file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
transaction.component.ts | import { LocationComponent } from './location/location.component';
import { NgbModal, NgbModalRef } from '@ng-bootstrap/ng-bootstrap';
import { GooleMapsService } from './../../../../service/googlemaps.service';
import { CheckValueSevice } from './../../../../service/check-value.sevice';
import { WalletService } from './../../../../service/wallet.service';
import { FomatDateService } from './../../../../service/fomatDate.service';
import { ITransaction } from './../../../../model/transaction.model';
import { IDate } from './../../../../model/date.model';
import { ToastsManager } from 'ng2-toastr/ng2-toastr';
import { Component, ViewChild, ViewContainerRef } from '@angular/core';
import { ActivatedRoute } from '@angular/router';
import { TransactionService } from '../../../../service/transaction.service';
import { } from '@types/googlemaps';
import { FormControl, ReactiveFormsModule } from '@angular/forms';
declare var $: any;
declare var google: any;
@Component({
selector: 'app-transaction',
styleUrls: ['./transaction.component.scss'],
templateUrl: './transaction.component.html',
})
export class TransactionComponent {
dataIncome: Array<any>;
dataExpense: Array<any>;
dataDebtLoan: Array<any>;
// hiện thị phần thêm chi tiết
public adddetail = true;
// KHỞI TẠO CÁC BIẾN VỊ TRÍ
lat: number = 10.812035;
lng: number = 106.7119887
zoom: number = 14;
// DANH SÁCH TẤT CẢ CÁC ĐỊA ĐIỂM
allPlace: any[] = [];
// OBJCET ĐỊA ĐIỂM
objLocation = {
lat: 10.812035,
lng: 106.7119887,
name: "Đặt vị trí",
}
dataWallets: Array<any>;
infoCheckMoney: any = {};
public modalCheckMoney: NgbModalRef;
titleTransaction: String = "Thêm Giao Dịch";
nameButtonTransaction: String = "Thêm Giao Dịch";
dateCurrent = new Date();
nameWallet: String = '';
// TRANSACTION DEFAULT
transaction: ITransaction = {
groupcategory: '',
idcategory: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// URL HÌNH ẢNH
public url: String = '';
private fileToUpload: File = null;
ngOnInit() {
// LẤY TẤT CẢ CÁC VÍ HIỂN THỊ LÊN
this.getDataWallets();
// LẤY TOẠ ĐỘ Ở VỊ TRÍ HIỆN TẠI
this.setCurrentPosition();
}
constructor(private FomatDateService: FomatDateService,
private WalletService: WalletService,
private modalService: NgbModal,
private checkvalue: CheckValueSevice,
private TransactionService: TransactionService,
private ActivatedRoute: ActivatedRoute,
private GooleMapsService: GooleMapsService,
public toastr: ToastsManager,
vcr: ViewContainerRef,
) {
this.toastr.setRootViewContainerRef(vcr);
// LẤY TÊN VÍ HIỆN THỊ LÊN GIAO DIỆN
this.paramIdWalletURL();
// PHẦN CHỨC NĂNG TAG USER
let thisglob = this;
window.onload = function () {
$('#taguser').tagEditor({
autocomplete: {
delay: 0.15,
position: { collision: 'flip' },
source: ['ActionScript', 'AppleScript', 'Asp', 'BASIC', 'C', 'C++', 'CSS', 'Clojure', 'COBOL', 'ColdFusion', 'Erlang', 'Fortran', 'Groovy', 'Haskell', 'HTML', 'Java', 'JavaScript', 'Lisp', 'Perl', 'PHP', 'Python', 'Ruby', 'Scala', 'Scheme']
},
forceLowercase: false,
placeholder: 'Với',
onChange: (field, editor, tags) => {
thisglob.transaction.taguser = tags;
}
});
}
}
// LẤY FILE
onSelectFile(event) {
if (event.target.files && event.target.files[0]) {
var reader = new FileReader();
this.fileToUpload = event.target.files[0];
reader.readAsDataURL(event.target.files[0]);
reader.onload = (event: any) => {
this.url = event.target.result;
}
}
}
// HÀM LẤY DATA TẤT CÁ CẢ VÍ
getDataWallets() {
this.WalletService.getDataWallets();
this.WalletService.getAllWallet.subscribe((wallet) => {
this.dataWallets = wallet;
})
}
changeMoneyWallet() {
let obj = {
_id: this.transaction.idwallet,
money: this.infoCheckMoney.moneytrnasction,
namewallet: this.infoCheckMoney.namewallet
}
this.WalletService.updateDataWallet(obj)
.then((result) => {
this.modalCheckMoney.close();
// CHỈNH SỬA XONG CẬP NHẬT LẠI GIAO DIỆN MỚI
this.reloadData();
this.toastr.success('Điều chỉnh số tiền trong ví thành công ! ', 'Success ! ');
});
}
changeMoneyTransaction() {
this.transaction.moneytransaction = this.infoCheckMoney.moneywallet;
this.modalCheckMoney.close();
}
// SUMMIT GỬI GIAO DỊCH
submitTransaction(modalCheckMoney) {
if (this.transaction.groupcategory == '') {
this.toastr.warning('Vui lòng chọn category ! ', 'Cảnh báo ! ');
} else if (this.transaction.moneytransaction == '') {
this.toastr.warning('Vui lòng nhập số tiền vào ! ', 'Cảnh báo ! ');
} else if (isNaN(Number.parseInt(this.transaction.moneytransaction.toString()))) {
this.toastr.warning('Số tiền phải là 1 số ! ', 'Waring ! ');
} else {
let checkMoney = true;
if (this.transaction.groupcategory == "expense") {
this.dataWallets.forEach((wallet) => {
if (wallet._id == this.transaction.idwallet) {
if ((Number.parseInt(this.transaction.moneytransaction.toString())) > wallet.money) {
this.infoCheckMoney['moneywallet'] = wallet.money;
this.infoCheckMoney['moneytrnasction'] = this.transaction.moneytransaction;
checkMoney = false;
}
}
})
}
if (checkMoney == true) {
// thay đổi dấu
if (this.transaction.groupcategory == "income" || this.transaction.groupcategory == "debt") {
if (Number(this.transaction.moneytransaction) < 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
if (this.transaction.groupcategory == "expense" || this.transaction.groupcategory == "loan") {
if (Number(this.transaction.moneytransaction) > 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
// tạo một giao dịch
this.TransactionService.createTransaction(this.transaction)
.then((result) => {
// upload hình ảnh
if (this.fileToUpload != null) {
this.TransactionService.uploadImage(result._id, this.fileToUpload)
.then((data) => {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
})
} else {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
}
})
.catch((err) => {
this.toastr.error(err, 'Thất bại ! ');
})
} else {
this.modalCheckMoney = this.modalService.open(modalCheckMoney, { windowClass: 'modalCheckMoney' });
}
}
}
// CHỌN THU NHẬP, CHI TIÊU, HAY NỢ
chooseCategory(event) {
this.transaction.groupcategory = event.detect;
this.transaction.imagecategory = event.image;
this.transaction.categorytransaction = event.name;
this.transaction.idcategory = event._id;
if (this.transaction.groupcategory == 'income') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Thu Nhập';
} else if (this.transaction.groupcategory == 'expense') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Chi Tiêu';
} else if (this.transaction.groupcategory == 'debt-loan') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Nợ/Vay';
}
}
// XOÁ HÌNH ẢNH
deleteImage() {
this.url = null;
this.fileToUpload = null;
}
// KHI USER CHỌN NGÀY
changeDate(event) {
this.dateCurrent = new Date(event.value.toDateString());
this.transaction.datecreatetransaction = new Date(event.value.toDateString()).toString();
}
// LẤY 1 VÍ CÓ ID LÀ
paramIdWalletURL() {
//LẤY ID WALLET TỪ URL
this.ActivatedRoute.paramMap
.subscribe((params) => {
if (params['params'].idwallet != undefined) {
this.WalletService.getDataWalletId(params['params'].idwallet).then((data) => {
this.nameWallet = data.namewallet;
this.infoCheckMoney['namewallet'] = data.namewallet;
this.transaction.idwallet = data._id;
})
.catch((err) => { })
}
})
}
// LẤY DỮ LIỆU KHI NGƯỜI DÙNG CHỌN VÍ NÀO
outputIdWallet(event) {
this.nameWallet = event.namewallet;
this.infoCheckMoney['namewallet'] = event.namewallet;
this.transaction.idwallet = event._id;
}
// LOAD LẠI DATA
reloadData() {
let urlIdWallet = (this.ActivatedRoute.snapshot.params.idwallet == undefined) ? '' : this.ActivatedRoute.snapshot.params.idwallet;
// LOAD LẠI CẬP NHẬT BÁO CÁO
this.TransactionService.getTransactions(urlIdWallet);
// LOAD CẬP NHẬT LẠI TẤT CẢ CÁC VÍ
this.WalletService.getDataWallets();
}
// RESET DATA
resetData() {
this.titleTransaction = "Thêm Giao Dịch";
this.nameButtonTransaction = "Thêm Giao Dịch";
this.transaction = {
idcategory: '',
groupcategory: '',
notetransaction: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// RESET TẤT CẢ CÁC TAGS
if(this.transaction.taguser != null){
let tags = $('#taguser').tagEditor('getTags')[0].tags;
for (let i = 0; i < tags.length; i++) {
$('#taguser').tagEditor('removeTag', tags[i]);
}
}
this.url = null;
this.fileToUpload = null;
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
// RESET WALLET
this.paramIdWalletURL();
// RESET IMAGE
this.url = null;
this.fileToUpload = null;
}
private setCurrentPosition() {
if ("geolocation" in navigator) {
navigator.geolocation.getCurrentPosition((position) => {
this.lat = position.coords.latitude;
this.lng = position.coords.longitude;
this.zoom = 14;
});
}
}
/ | ce) {
this.objLocation = {
lat: place.geometry.location.lat,
lng: place.geometry.location.lng,
name: place.name
}
this.transaction.location = this.objLocation;
}
// XOÁ ĐI VỊ CHÍ ĐÃ CHỌN
deleteLocation() {
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
}
} | / MỞ MODAL CHỌN ĐỊA ĐIỂM GOOGLE MAP
open(content) {
this.GooleMapsService.getPlaceNear(this.lat, this.lng).then((data) => {
this.allPlace = data.results;
})
this.modalService.open(content);
}
// SUBMIT ĐỊA ĐIỂM
submitLocation(pla | identifier_body |
transaction.component.ts | import { LocationComponent } from './location/location.component';
import { NgbModal, NgbModalRef } from '@ng-bootstrap/ng-bootstrap';
import { GooleMapsService } from './../../../../service/googlemaps.service';
import { CheckValueSevice } from './../../../../service/check-value.sevice';
import { WalletService } from './../../../../service/wallet.service';
import { FomatDateService } from './../../../../service/fomatDate.service';
import { ITransaction } from './../../../../model/transaction.model';
import { IDate } from './../../../../model/date.model';
import { ToastsManager } from 'ng2-toastr/ng2-toastr';
import { Component, ViewChild, ViewContainerRef } from '@angular/core';
import { ActivatedRoute } from '@angular/router';
import { TransactionService } from '../../../../service/transaction.service';
import { } from '@types/googlemaps';
import { FormControl, ReactiveFormsModule } from '@angular/forms';
declare var $: any;
declare var google: any;
@Component({
selector: 'app-transaction',
styleUrls: ['./transaction.component.scss'],
templateUrl: './transaction.component.html',
})
export class TransactionComponent {
dataIncome: Array<any>;
dataExpense: Array<any>;
dataDebtLoan: Array<any>;
// hiện thị phần thêm chi tiết
public adddetail = true;
// KHỞI TẠO CÁC BIẾN VỊ TRÍ
lat: number = 10.812035;
lng: number = 106.7119887
zoom: number = 14;
// DANH SÁCH TẤT CẢ CÁC ĐỊA ĐIỂM
allPlace: any[] = [];
// OBJCET ĐỊA ĐIỂM
objLocation = {
lat: 10.812035,
lng: 106.7119887,
name: "Đặt vị trí",
}
dataWallets: Array<any>;
infoCheckMoney: any = {};
public modalCheckMoney: NgbModalRef;
titleTransaction: String = "Thêm Giao Dịch";
nameButtonTransaction: String = "Thêm Giao Dịch";
dateCurrent = new Date();
nameWallet: String = '';
// TRANSACTION DEFAULT
transaction: ITransaction = {
groupcategory: '',
idcategory: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// URL HÌNH ẢNH
public url: String = '';
private fileToUpload: File = null;
ngOnInit() {
// LẤY TẤT CẢ CÁC VÍ HIỂN THỊ LÊN
this.getDataWallets();
// LẤY TOẠ ĐỘ Ở VỊ TRÍ HIỆN TẠI
this.setCurrentPosition();
}
constructor(private FomatDateService: FomatDateService,
private WalletService: WalletService,
private modalService: NgbModal,
private checkvalue: CheckValueSevice,
private TransactionService: TransactionService,
private ActivatedRoute: ActivatedRoute,
private GooleMapsService: GooleMapsService,
public toastr: ToastsManager,
vcr: ViewContainerRef,
) {
this.toastr.setRootViewContainerRef(vcr);
// LẤY TÊN VÍ HIỆN THỊ LÊN GIAO DIỆN
this.paramIdWalletURL();
// PHẦN CHỨC NĂNG TAG USER
let thisglob = this;
window.onload = function () {
$('#taguser').tagEditor({
autocomplete: {
delay: 0.15,
position: { collision: 'flip' },
source: ['ActionScript', 'AppleScript', 'Asp', 'BASIC', 'C', 'C++', 'CSS', 'Clojure', 'COBOL', 'ColdFusion', 'Erlang', 'Fortran', 'Groovy', 'Haskell', 'HTML', 'Java', 'JavaScript', 'Lisp', 'Perl', 'PHP', 'Python', 'Ruby', 'Scala', 'Scheme']
},
forceLowercase: false,
placeholder: 'Với',
onChange: (field, editor, tags) => {
thisglob.transaction.taguser = tags;
}
});
}
}
// LẤY FILE
onSelectFile(event) {
if (event.target.files && event.target.files[0]) {
var reader = new FileReader();
this.fileToUpload = event.target.files[0];
reader.readAsDataURL(event.target.files[0]);
reader.onload = (event: any) => {
this.url = event.target.result;
}
}
}
// HÀM LẤY DATA TẤT CÁ CẢ VÍ
getDataWallets() {
this.WalletService.getDataWallets();
this.WalletService.getAllWallet.subscribe((wallet) => {
this.dataWallets = wallet;
})
}
changeMoneyWallet() {
let obj = {
_id: this.transaction.idwallet,
money: this.infoCheckMoney.moneytrnasction,
namewallet: this.infoCheckMoney.namewallet
}
this.WalletService.updateDataWallet(obj)
.then((result) => {
this.modalCheckMoney.close();
// CHỈNH SỬA XONG CẬP NHẬT LẠI GIAO DIỆN MỚI
this.reloadData();
this.toastr.success('Điều chỉnh số tiền trong ví thành công ! ', 'Success ! ');
});
}
changeMoneyTransaction() {
this.transaction.moneytransaction = this.infoCheckMoney.moneywallet;
this.modalCheckMoney.close();
}
// SUMMIT GỬI GIAO DỊCH
submitTransaction(modalCheckMoney) {
if (this.transaction.groupcategory == '') {
this.toastr.warning('Vui lòng chọn category ! ', 'Cảnh báo ! ');
} else if (this.transaction.moneytransaction == '') {
this.toastr.warning('Vui lòng nhập số tiền vào ! ', 'Cảnh báo ! ');
} else if (isNaN(Number.parseInt(this.transaction.moneytransaction.toString()))) {
this.toastr.warning('Số tiền phải là 1 số ! ', 'Waring ! ');
} else {
let checkMoney = true;
if (this.transaction.groupcategory == "expense") {
this.dataWallets.forEach((wallet) => {
if (wallet._id == this.transaction.idwallet) {
if ((Number.parseInt(this.transaction.moneytransaction.toString())) > wallet.money) {
this.infoCheckMoney['moneywallet'] = wallet.money;
this.infoCheckMoney['moneytrnasction'] = this.transaction.moneytransaction;
checkMoney = false;
}
}
})
}
if (checkMoney == true) {
// thay đổi dấu
if (this.transaction.groupcategory == "income" || this.transaction.groupcategory == "debt") {
if (Number(this.transaction.moneytransaction) < 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
if (this.transaction.groupcategory == "expense" || this.transaction.groupcategory == "loan") {
if (Number(this.transaction.moneytransaction) > 0) {
this.transaction.moneytransaction = (Number(this.transaction.moneytransaction) * -1).toString();
}
}
// tạo một giao dịch
this.TransactionService.createTransaction(this.transaction)
.then((result) => {
// upload hình ảnh
if (this.fileToUpload != null) {
this.TransactionService.uploadImage(result._id, this.fileToUpload)
.then((data) => {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
})
} else {
this.toastr.success('Thêm giao dịch thành công ! ', 'Thành công ! ');
this.reloadData();
this.resetData();
}
})
.catch((err) => {
this.toastr.error(err, 'Thất bại ! ');
})
} else {
this.modalCheckMoney = this.modalService.open(modalCheckMoney, { windowClass: 'modalCheckMoney' });
}
}
}
// CHỌN THU NHẬP, CHI TIÊU, HAY NỢ
chooseCategory(event) {
this.transaction.groupcategory = event.detect;
this.transaction.imagecategory = event.image;
this.transaction.categorytransaction = event.name;
this.transaction.idcategory = event._id;
if (this.transaction.groupcategory == 'income') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Thu Nhập';
} else if (this.transaction.groupcategory == 'expense') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Chi Tiêu';
} else if (this.transaction.groupcategory == 'debt-loan') {
this.titleTransaction = this.nameButtonTransaction = 'Thêm Nợ/Vay';
}
}
// XOÁ HÌNH ẢNH
deleteImage() {
this.url = null;
this.fileToUpload = null;
}
// KHI USER CHỌN NGÀY
changeDate(event) {
this.dateCurrent = new Date(event.value.toDateString());
this.transaction.datecreatetransaction = new Date(event.value.toDateString()).toString();
}
// LẤY 1 VÍ CÓ ID LÀ
paramIdWalletURL() {
//LẤY ID WALLET TỪ URL
this.ActivatedRoute.paramMap
.subscribe((params) => {
if (params['params'].idwallet != undefined) {
this.WalletService.getDataWalletId(params['params'].idwallet).then((data) => {
this.nameWallet = data.namewallet;
this.infoCheckMoney['namewallet'] = data.namewallet;
this.transaction.idwallet = data._id;
})
.catch((err) => { })
}
})
}
// LẤY DỮ LIỆU KHI NGƯỜI DÙNG CHỌN VÍ NÀO
outputIdWallet(event) {
this.nameWallet = event.namewallet;
this.infoCheckMoney['namewallet'] = event.namewallet;
this.transaction.idwallet = event._id;
}
// LOAD LẠI DATA
reloadData() {
let urlIdWallet | atedRoute.snapshot.params.idwallet == undefined) ? '' : this.ActivatedRoute.snapshot.params.idwallet;
// LOAD LẠI CẬP NHẬT BÁO CÁO
this.TransactionService.getTransactions(urlIdWallet);
// LOAD CẬP NHẬT LẠI TẤT CẢ CÁC VÍ
this.WalletService.getDataWallets();
}
// RESET DATA
resetData() {
this.titleTransaction = "Thêm Giao Dịch";
this.nameButtonTransaction = "Thêm Giao Dịch";
this.transaction = {
idcategory: '',
groupcategory: '',
notetransaction: '',
datecreatetransaction: new Date().toDateString(),
moneytransaction: '',
imagecategory: 'default',
categorytransaction: 'Chọn Danh Mục',
idwallet: '',
}
// RESET TẤT CẢ CÁC TAGS
if(this.transaction.taguser != null){
let tags = $('#taguser').tagEditor('getTags')[0].tags;
for (let i = 0; i < tags.length; i++) {
$('#taguser').tagEditor('removeTag', tags[i]);
}
}
this.url = null;
this.fileToUpload = null;
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
// RESET WALLET
this.paramIdWalletURL();
// RESET IMAGE
this.url = null;
this.fileToUpload = null;
}
private setCurrentPosition() {
if ("geolocation" in navigator) {
navigator.geolocation.getCurrentPosition((position) => {
this.lat = position.coords.latitude;
this.lng = position.coords.longitude;
this.zoom = 14;
});
}
}
// MỞ MODAL CHỌN ĐỊA ĐIỂM GOOGLE MAP
open(content) {
this.GooleMapsService.getPlaceNear(this.lat, this.lng).then((data) => {
this.allPlace = data.results;
})
this.modalService.open(content);
}
// SUBMIT ĐỊA ĐIỂM
submitLocation(place) {
this.objLocation = {
lat: place.geometry.location.lat,
lng: place.geometry.location.lng,
name: place.name
}
this.transaction.location = this.objLocation;
}
// XOÁ ĐI VỊ CHÍ ĐÃ CHỌN
deleteLocation() {
delete this.transaction.location;
this.objLocation.name = "Đặt vị trí";
}
} | = (this.Activ | identifier_name |
MIDRAN.py | import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim
import sys
import glob
import time
import colorama
from colorama import Fore, Style
from etaprogress.progress import ProgressBar
from torchsummary import summary
from ptflops import get_model_complexity_info
from utilities.torchUtils import *
from dataTools.customDataloader import *
from utilities.inferenceUtils import *
from utilities.aestheticUtils import *
from modelDefinitions.DRAN import *
from torchvision.utils import save_image
class MIDRAN:
def __init__(self, config):
# Model Configration
self.trainingImagePath = config['trainingImagePath']
#self.trainingImagePath = config['targetPath']
self.checkpointPath = config['checkpointPath']
self.logPath = config['logPath']
self.testImagesPath = config['testImagePath']
self.resultDir = config['resultDir']
self.modelName = config['modelName']
self.dataSamples = config['dataSamples']
self.batchSize = int(config['batchSize'])
self.imageH = int(config['imageH'])
self.imageW = int(config['imageW'])
self.inputC = int(config['inputC'])
self.outputC = int(config['outputC'])
self.totalEpoch = int(config['epoch'])
self.interval = int(config['interval'])
self.learningRate = float(config['learningRate'])
self.adamBeta1 = float(config['adamBeta1'])
self.adamBeta2 = float(config['adamBeta2'])
self.barLen = int(config['barLen'])
# Initiating Training Parameters(for step)
self.currentEpoch = 0
self.startSteps = 0
self.totalSteps = 0
self.adversarialMean = 0
self.PR = 0.0
# Normalization
self.unNorm = UnNormalize()
# Noise Level for inferencing
self.noiseSet = [25,50]
# Preapring model(s) for GPU acceleration
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.net = DynamicResAttNet(3).to(self.device)
# Optimizers
self.optimizerEG = torch.optim.Adam(self.net.parameters(), lr=self.learningRate, betas=(self.adamBeta1, self.adamBeta2))
# Scheduler for Super Convergance
self.scheduleLR = None
def customTrainLoader(self, overFitTest = False):
targetImageList = imageList(self.trainingImagePath)
print ("Trining Samples (Input):", self.trainingImagePath, len(targetImageList))
if overFitTest == True:
targetImageList = targetImageList[-1:]
if self.dataSamples:
targetImageList = targetImageList[:self.dataSamples]
datasetReadder = customDatasetReader(
image_list=targetImageList,
imagePath=self.trainingImagePath,
height = self.imageH,
width = self.imageW,
)
self.trainLoader = torch.utils.data.DataLoader( dataset=datasetReadder,
batch_size=self.batchSize,
shuffle=True
)
return self.trainLoader
def modelTraining(self, resumeTraning=False, overFitTest=False, dataSamples = None):
if dataSamples:
self.dataSamples = dataSamples
# Losses
reconstructionLoss = torch.nn.L1Loss().to(self.device)
# Overfitting Testing
if overFitTest == True:
customPrint(Fore.RED + "Over Fitting Testing with an arbitary image!", self.barLen)
trainingImageLoader = self.customTrainLoader(overFitTest=True)
self.interval = 1
self.totalEpoch = 100000
else:
trainingImageLoader = self.customTrainLoader()
# Resuming Training
if resumeTraning == True:
#self.modelLoad()
try:
self.modelLoad()
except:
#print()
customPrint(Fore.RED + "Would you like to start training from sketch (default: Y): ", textWidth=self.barLen)
userInput = input() or "Y"
if not (userInput == "Y" or userInput == "y"):
exit()
# Starting Training
customPrint('Training is about to begin using:' + Fore.YELLOW + '[{}]'.format(self.device).upper(), textWidth=self.barLen)
# Initiating steps
self.totalSteps = int(len(trainingImageLoader)*self.totalEpoch)
startTime = time.time()
# Initiating progress bar
bar = ProgressBar(self.totalSteps, max_width=int(self.barLen/2))
currentStep = self.startSteps
while currentStep < self.totalSteps:
# Time tracker
iterTime = time.time()
for LRImages, HRGTImages in trainingImageLoader:
##############################
#### Initiating Variables ####
##############################
# Updating Steps
if currentStep > self.totalSteps:
self.savingWeights(currentStep)
customPrint(Fore.YELLOW + "Training Completed Successfully!", textWidth=self.barLen)
exit()
currentStep += 1
# Images
rawInput = LRImages.to(self.device)
highResReal = HRGTImages.to(self.device)
##############################
####### Training Phase #######
##############################
# Image Generation
residualNoise = self.net(rawInput)
# Optimization of generator
self.optimizerEG.zero_grad()
generatorContentLoss = reconstructionLoss(residualNoise, highResReal)
lossEG = generatorContentLoss
lossEG.backward()
self.optimizerEG.step()
##########################
###### Model Logger ######
##########################
# Progress Bar
if (currentStep + 1) % 25 == 0:
bar.numerator = currentStep + 1
print(Fore.YELLOW + "Steps |",bar,Fore.YELLOW + "| LossEG: {:.4f}".format(lossEG),end='\r')
# Updating training log
if (currentStep + 1) % self.interval == 0:
# Updating Tensorboard
summaryInfo = {
'Input Images' : self.unNorm(rawInput),
'Residual Images' : self.unNorm(residualNoise),
'Denoised Images' : self.unNorm(rawInput-residualNoise),
'GTNoise' : self.unNorm(highResReal),
'Step' : currentStep + 1,
'Epoch' : self.currentEpoch,
'LossEG' : lossEG.item(),
'Path' : self.logPath,
'Atttention Net' : self.net,
}
tbLogWritter(summaryInfo)
save_image(self.unNorm(rawInput-residualNoise[0]), 'modelOutput.png')
# Saving Weights and state of the model for resume training
self.savingWeights(currentStep)
if (currentStep + 1) % (10000) == 0 :
print("\n")
self.savingWeights(currentStep + 1, True)
self.modelInference(validation=True, steps = currentStep + 1)
eHours, eMinutes, eSeconds = timer(iterTime, time.time())
print (Fore.CYAN +'Steps [{}/{}] | Time elapsed [{:0>2}:{:0>2}:{:0>2}] | Loss: {:.2f}'
.format(currentStep + 1, self.totalSteps, eHours, eMinutes, eSeconds, lossEG))
def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
if not validation:
self.modelLoad()
print("\nInferencing on pretrained weights.")
else:
print("Validation about to begin.")
if not noiseSet:
noiseSet = self.noiseSet
if testImagesPath:
self.testImagesPath = testImagesPath
if outputDir:
self.resultDir = outputDir
modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)
testImageList = modelInference.testingSetProcessor()
barVal = ProgressBar(len(testImageList) * len(noiseSet), max_width=int(50))
imageCounter = 0
with torch.no_grad():
for noise in noiseSet:
for imgPath in testImageList:
img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
output = self.net(img)
modelInference.saveModelOutput(img-output, imgPath, noise, steps)
imageCounter += 1
if imageCounter % 2 == 0:
barVal.numerator = imageCounter
print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
print("\n")
def modelSummary(self,input_size = None):
if not input_size:
input_size = (3, self.imageH, self.imageW)
customPrint(Fore.YELLOW + "Model Summary:Dynamic Residual Attention Network", textWidth=self.barLen)
summary(self.net, input_size =input_size)
print ("*" * self.barLen)
print()
flops, params = get_model_complexity_info(self.net, input_size, as_strings=True, print_per_layer_stat=False)
customPrint('Computational complexity (Dynamic Residual Attention Network):{}'.format(flops), self.barLen, '-')
#customPrint('Number of parameters (Enhace-Gen):{}'.format(params), self.barLen, '-')
configShower()
print ("*" * self.barLen)
| checkpoint = {
'step' : currentStep + 1,
'stateDictEG': self.net.state_dict(),
'optimizerEG': self.optimizerEG.state_dict(),
'schedulerLR': self.scheduleLR
}
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath, modelName = self.modelName)
if duplicate:
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath + str(currentStep) + "/", modelName = self.modelName, backup=None)
def modelLoad(self):
customPrint(Fore.RED + "Loading pretrained weight", textWidth=self.barLen)
previousWeight = loadCheckpoints(self.checkpointPath, self.modelName)
self.net.load_state_dict(previousWeight['stateDictEG'])
self.optimizerEG.load_state_dict(previousWeight['optimizerEG'])
self.scheduleLR = previousWeight['schedulerLR']
self.startSteps = int(previousWeight['step'])
customPrint(Fore.YELLOW + "Weight loaded successfully", textWidth=self.barLen) | def savingWeights(self, currentStep, duplicate=None):
# Saving weights | random_line_split |
MIDRAN.py | import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim
import sys
import glob
import time
import colorama
from colorama import Fore, Style
from etaprogress.progress import ProgressBar
from torchsummary import summary
from ptflops import get_model_complexity_info
from utilities.torchUtils import *
from dataTools.customDataloader import *
from utilities.inferenceUtils import *
from utilities.aestheticUtils import *
from modelDefinitions.DRAN import *
from torchvision.utils import save_image
class MIDRAN:
def __init__(self, config):
# Model Configration
self.trainingImagePath = config['trainingImagePath']
#self.trainingImagePath = config['targetPath']
self.checkpointPath = config['checkpointPath']
self.logPath = config['logPath']
self.testImagesPath = config['testImagePath']
self.resultDir = config['resultDir']
self.modelName = config['modelName']
self.dataSamples = config['dataSamples']
self.batchSize = int(config['batchSize'])
self.imageH = int(config['imageH'])
self.imageW = int(config['imageW'])
self.inputC = int(config['inputC'])
self.outputC = int(config['outputC'])
self.totalEpoch = int(config['epoch'])
self.interval = int(config['interval'])
self.learningRate = float(config['learningRate'])
self.adamBeta1 = float(config['adamBeta1'])
self.adamBeta2 = float(config['adamBeta2'])
self.barLen = int(config['barLen'])
# Initiating Training Parameters(for step)
self.currentEpoch = 0
self.startSteps = 0
self.totalSteps = 0
self.adversarialMean = 0
self.PR = 0.0
# Normalization
self.unNorm = UnNormalize()
# Noise Level for inferencing
self.noiseSet = [25,50]
# Preapring model(s) for GPU acceleration
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.net = DynamicResAttNet(3).to(self.device)
# Optimizers
self.optimizerEG = torch.optim.Adam(self.net.parameters(), lr=self.learningRate, betas=(self.adamBeta1, self.adamBeta2))
# Scheduler for Super Convergance
self.scheduleLR = None
def customTrainLoader(self, overFitTest = False):
targetImageList = imageList(self.trainingImagePath)
print ("Trining Samples (Input):", self.trainingImagePath, len(targetImageList))
if overFitTest == True:
targetImageList = targetImageList[-1:]
if self.dataSamples:
targetImageList = targetImageList[:self.dataSamples]
datasetReadder = customDatasetReader(
image_list=targetImageList,
imagePath=self.trainingImagePath,
height = self.imageH,
width = self.imageW,
)
self.trainLoader = torch.utils.data.DataLoader( dataset=datasetReadder,
batch_size=self.batchSize,
shuffle=True
)
return self.trainLoader
def modelTraining(self, resumeTraning=False, overFitTest=False, dataSamples = None):
if dataSamples:
self.dataSamples = dataSamples
# Losses
reconstructionLoss = torch.nn.L1Loss().to(self.device)
# Overfitting Testing
if overFitTest == True:
customPrint(Fore.RED + "Over Fitting Testing with an arbitary image!", self.barLen)
trainingImageLoader = self.customTrainLoader(overFitTest=True)
self.interval = 1
self.totalEpoch = 100000
else:
trainingImageLoader = self.customTrainLoader()
# Resuming Training
if resumeTraning == True:
#self.modelLoad()
try:
self.modelLoad()
except:
#print()
customPrint(Fore.RED + "Would you like to start training from sketch (default: Y): ", textWidth=self.barLen)
userInput = input() or "Y"
if not (userInput == "Y" or userInput == "y"):
exit()
# Starting Training
customPrint('Training is about to begin using:' + Fore.YELLOW + '[{}]'.format(self.device).upper(), textWidth=self.barLen)
# Initiating steps
self.totalSteps = int(len(trainingImageLoader)*self.totalEpoch)
startTime = time.time()
# Initiating progress bar
bar = ProgressBar(self.totalSteps, max_width=int(self.barLen/2))
currentStep = self.startSteps
while currentStep < self.totalSteps:
# Time tracker
iterTime = time.time()
for LRImages, HRGTImages in trainingImageLoader:
##############################
#### Initiating Variables ####
##############################
# Updating Steps
if currentStep > self.totalSteps:
self.savingWeights(currentStep)
customPrint(Fore.YELLOW + "Training Completed Successfully!", textWidth=self.barLen)
exit()
currentStep += 1
# Images
rawInput = LRImages.to(self.device)
highResReal = HRGTImages.to(self.device)
##############################
####### Training Phase #######
##############################
# Image Generation
residualNoise = self.net(rawInput)
# Optimization of generator
self.optimizerEG.zero_grad()
generatorContentLoss = reconstructionLoss(residualNoise, highResReal)
lossEG = generatorContentLoss
lossEG.backward()
self.optimizerEG.step()
##########################
###### Model Logger ######
##########################
# Progress Bar
if (currentStep + 1) % 25 == 0:
bar.numerator = currentStep + 1
print(Fore.YELLOW + "Steps |",bar,Fore.YELLOW + "| LossEG: {:.4f}".format(lossEG),end='\r')
# Updating training log
if (currentStep + 1) % self.interval == 0:
# Updating Tensorboard
summaryInfo = {
'Input Images' : self.unNorm(rawInput),
'Residual Images' : self.unNorm(residualNoise),
'Denoised Images' : self.unNorm(rawInput-residualNoise),
'GTNoise' : self.unNorm(highResReal),
'Step' : currentStep + 1,
'Epoch' : self.currentEpoch,
'LossEG' : lossEG.item(),
'Path' : self.logPath,
'Atttention Net' : self.net,
}
tbLogWritter(summaryInfo)
save_image(self.unNorm(rawInput-residualNoise[0]), 'modelOutput.png')
# Saving Weights and state of the model for resume training
self.savingWeights(currentStep)
if (currentStep + 1) % (10000) == 0 :
print("\n")
self.savingWeights(currentStep + 1, True)
self.modelInference(validation=True, steps = currentStep + 1)
eHours, eMinutes, eSeconds = timer(iterTime, time.time())
print (Fore.CYAN +'Steps [{}/{}] | Time elapsed [{:0>2}:{:0>2}:{:0>2}] | Loss: {:.2f}'
.format(currentStep + 1, self.totalSteps, eHours, eMinutes, eSeconds, lossEG))
def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
if not validation:
self.modelLoad()
print("\nInferencing on pretrained weights.")
else:
print("Validation about to begin.")
if not noiseSet:
noiseSet = self.noiseSet
if testImagesPath:
self.testImagesPath = testImagesPath
if outputDir:
self.resultDir = outputDir
modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)
testImageList = modelInference.testingSetProcessor()
barVal = ProgressBar(len(testImageList) * len(noiseSet), max_width=int(50))
imageCounter = 0
with torch.no_grad():
for noise in noiseSet:
for imgPath in testImageList:
img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
output = self.net(img)
modelInference.saveModelOutput(img-output, imgPath, noise, steps)
imageCounter += 1
if imageCounter % 2 == 0:
barVal.numerator = imageCounter
print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
print("\n")
def modelSummary(self,input_size = None):
if not input_size:
input_size = (3, self.imageH, self.imageW)
customPrint(Fore.YELLOW + "Model Summary:Dynamic Residual Attention Network", textWidth=self.barLen)
summary(self.net, input_size =input_size)
print ("*" * self.barLen)
print()
flops, params = get_model_complexity_info(self.net, input_size, as_strings=True, print_per_layer_stat=False)
customPrint('Computational complexity (Dynamic Residual Attention Network):{}'.format(flops), self.barLen, '-')
#customPrint('Number of parameters (Enhace-Gen):{}'.format(params), self.barLen, '-')
configShower()
print ("*" * self.barLen)
def savingWeights(self, currentStep, duplicate=None):
# Saving weights
checkpoint = {
'step' : currentStep + 1,
'stateDictEG': self.net.state_dict(),
'optimizerEG': self.optimizerEG.state_dict(),
'schedulerLR': self.scheduleLR
}
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath, modelName = self.modelName)
if duplicate:
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath + str(currentStep) + "/", modelName = self.modelName, backup=None)
def modelLoad(self):
| customPrint(Fore.RED + "Loading pretrained weight", textWidth=self.barLen)
previousWeight = loadCheckpoints(self.checkpointPath, self.modelName)
self.net.load_state_dict(previousWeight['stateDictEG'])
self.optimizerEG.load_state_dict(previousWeight['optimizerEG'])
self.scheduleLR = previousWeight['schedulerLR']
self.startSteps = int(previousWeight['step'])
customPrint(Fore.YELLOW + "Weight loaded successfully", textWidth=self.barLen) | identifier_body | |
MIDRAN.py | import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim
import sys
import glob
import time
import colorama
from colorama import Fore, Style
from etaprogress.progress import ProgressBar
from torchsummary import summary
from ptflops import get_model_complexity_info
from utilities.torchUtils import *
from dataTools.customDataloader import *
from utilities.inferenceUtils import *
from utilities.aestheticUtils import *
from modelDefinitions.DRAN import *
from torchvision.utils import save_image
class MIDRAN:
def __init__(self, config):
# Model Configration
self.trainingImagePath = config['trainingImagePath']
#self.trainingImagePath = config['targetPath']
self.checkpointPath = config['checkpointPath']
self.logPath = config['logPath']
self.testImagesPath = config['testImagePath']
self.resultDir = config['resultDir']
self.modelName = config['modelName']
self.dataSamples = config['dataSamples']
self.batchSize = int(config['batchSize'])
self.imageH = int(config['imageH'])
self.imageW = int(config['imageW'])
self.inputC = int(config['inputC'])
self.outputC = int(config['outputC'])
self.totalEpoch = int(config['epoch'])
self.interval = int(config['interval'])
self.learningRate = float(config['learningRate'])
self.adamBeta1 = float(config['adamBeta1'])
self.adamBeta2 = float(config['adamBeta2'])
self.barLen = int(config['barLen'])
# Initiating Training Parameters(for step)
self.currentEpoch = 0
self.startSteps = 0
self.totalSteps = 0
self.adversarialMean = 0
self.PR = 0.0
# Normalization
self.unNorm = UnNormalize()
# Noise Level for inferencing
self.noiseSet = [25,50]
# Preapring model(s) for GPU acceleration
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.net = DynamicResAttNet(3).to(self.device)
# Optimizers
self.optimizerEG = torch.optim.Adam(self.net.parameters(), lr=self.learningRate, betas=(self.adamBeta1, self.adamBeta2))
# Scheduler for Super Convergance
self.scheduleLR = None
def customTrainLoader(self, overFitTest = False):
targetImageList = imageList(self.trainingImagePath)
print ("Trining Samples (Input):", self.trainingImagePath, len(targetImageList))
if overFitTest == True:
targetImageList = targetImageList[-1:]
if self.dataSamples:
|
datasetReadder = customDatasetReader(
image_list=targetImageList,
imagePath=self.trainingImagePath,
height = self.imageH,
width = self.imageW,
)
self.trainLoader = torch.utils.data.DataLoader( dataset=datasetReadder,
batch_size=self.batchSize,
shuffle=True
)
return self.trainLoader
def modelTraining(self, resumeTraning=False, overFitTest=False, dataSamples = None):
if dataSamples:
self.dataSamples = dataSamples
# Losses
reconstructionLoss = torch.nn.L1Loss().to(self.device)
# Overfitting Testing
if overFitTest == True:
customPrint(Fore.RED + "Over Fitting Testing with an arbitary image!", self.barLen)
trainingImageLoader = self.customTrainLoader(overFitTest=True)
self.interval = 1
self.totalEpoch = 100000
else:
trainingImageLoader = self.customTrainLoader()
# Resuming Training
if resumeTraning == True:
#self.modelLoad()
try:
self.modelLoad()
except:
#print()
customPrint(Fore.RED + "Would you like to start training from sketch (default: Y): ", textWidth=self.barLen)
userInput = input() or "Y"
if not (userInput == "Y" or userInput == "y"):
exit()
# Starting Training
customPrint('Training is about to begin using:' + Fore.YELLOW + '[{}]'.format(self.device).upper(), textWidth=self.barLen)
# Initiating steps
self.totalSteps = int(len(trainingImageLoader)*self.totalEpoch)
startTime = time.time()
# Initiating progress bar
bar = ProgressBar(self.totalSteps, max_width=int(self.barLen/2))
currentStep = self.startSteps
while currentStep < self.totalSteps:
# Time tracker
iterTime = time.time()
for LRImages, HRGTImages in trainingImageLoader:
##############################
#### Initiating Variables ####
##############################
# Updating Steps
if currentStep > self.totalSteps:
self.savingWeights(currentStep)
customPrint(Fore.YELLOW + "Training Completed Successfully!", textWidth=self.barLen)
exit()
currentStep += 1
# Images
rawInput = LRImages.to(self.device)
highResReal = HRGTImages.to(self.device)
##############################
####### Training Phase #######
##############################
# Image Generation
residualNoise = self.net(rawInput)
# Optimization of generator
self.optimizerEG.zero_grad()
generatorContentLoss = reconstructionLoss(residualNoise, highResReal)
lossEG = generatorContentLoss
lossEG.backward()
self.optimizerEG.step()
##########################
###### Model Logger ######
##########################
# Progress Bar
if (currentStep + 1) % 25 == 0:
bar.numerator = currentStep + 1
print(Fore.YELLOW + "Steps |",bar,Fore.YELLOW + "| LossEG: {:.4f}".format(lossEG),end='\r')
# Updating training log
if (currentStep + 1) % self.interval == 0:
# Updating Tensorboard
summaryInfo = {
'Input Images' : self.unNorm(rawInput),
'Residual Images' : self.unNorm(residualNoise),
'Denoised Images' : self.unNorm(rawInput-residualNoise),
'GTNoise' : self.unNorm(highResReal),
'Step' : currentStep + 1,
'Epoch' : self.currentEpoch,
'LossEG' : lossEG.item(),
'Path' : self.logPath,
'Atttention Net' : self.net,
}
tbLogWritter(summaryInfo)
save_image(self.unNorm(rawInput-residualNoise[0]), 'modelOutput.png')
# Saving Weights and state of the model for resume training
self.savingWeights(currentStep)
if (currentStep + 1) % (10000) == 0 :
print("\n")
self.savingWeights(currentStep + 1, True)
self.modelInference(validation=True, steps = currentStep + 1)
eHours, eMinutes, eSeconds = timer(iterTime, time.time())
print (Fore.CYAN +'Steps [{}/{}] | Time elapsed [{:0>2}:{:0>2}:{:0>2}] | Loss: {:.2f}'
.format(currentStep + 1, self.totalSteps, eHours, eMinutes, eSeconds, lossEG))
def modelInference(self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
if not validation:
self.modelLoad()
print("\nInferencing on pretrained weights.")
else:
print("Validation about to begin.")
if not noiseSet:
noiseSet = self.noiseSet
if testImagesPath:
self.testImagesPath = testImagesPath
if outputDir:
self.resultDir = outputDir
modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)
testImageList = modelInference.testingSetProcessor()
barVal = ProgressBar(len(testImageList) * len(noiseSet), max_width=int(50))
imageCounter = 0
with torch.no_grad():
for noise in noiseSet:
for imgPath in testImageList:
img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
output = self.net(img)
modelInference.saveModelOutput(img-output, imgPath, noise, steps)
imageCounter += 1
if imageCounter % 2 == 0:
barVal.numerator = imageCounter
print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
print("\n")
def modelSummary(self,input_size = None):
if not input_size:
input_size = (3, self.imageH, self.imageW)
customPrint(Fore.YELLOW + "Model Summary:Dynamic Residual Attention Network", textWidth=self.barLen)
summary(self.net, input_size =input_size)
print ("*" * self.barLen)
print()
flops, params = get_model_complexity_info(self.net, input_size, as_strings=True, print_per_layer_stat=False)
customPrint('Computational complexity (Dynamic Residual Attention Network):{}'.format(flops), self.barLen, '-')
#customPrint('Number of parameters (Enhace-Gen):{}'.format(params), self.barLen, '-')
configShower()
print ("*" * self.barLen)
def savingWeights(self, currentStep, duplicate=None):
# Saving weights
checkpoint = {
'step' : currentStep + 1,
'stateDictEG': self.net.state_dict(),
'optimizerEG': self.optimizerEG.state_dict(),
'schedulerLR': self.scheduleLR
}
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath, modelName = self.modelName)
if duplicate:
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath + str(currentStep) + "/", modelName = self.modelName, backup=None)
def modelLoad(self):
customPrint(Fore.RED + "Loading pretrained weight", textWidth=self.barLen)
previousWeight = loadCheckpoints(self.checkpointPath, self.modelName)
self.net.load_state_dict(previousWeight['stateDictEG'])
self.optimizerEG.load_state_dict(previousWeight['optimizerEG'])
self.scheduleLR = previousWeight['schedulerLR']
self.startSteps = int(previousWeight['step'])
customPrint(Fore.YELLOW + "Weight loaded successfully", textWidth=self.barLen)
| targetImageList = targetImageList[:self.dataSamples] | conditional_block |
MIDRAN.py | import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.optim as optim
import sys
import glob
import time
import colorama
from colorama import Fore, Style
from etaprogress.progress import ProgressBar
from torchsummary import summary
from ptflops import get_model_complexity_info
from utilities.torchUtils import *
from dataTools.customDataloader import *
from utilities.inferenceUtils import *
from utilities.aestheticUtils import *
from modelDefinitions.DRAN import *
from torchvision.utils import save_image
class MIDRAN:
def __init__(self, config):
# Model Configration
self.trainingImagePath = config['trainingImagePath']
#self.trainingImagePath = config['targetPath']
self.checkpointPath = config['checkpointPath']
self.logPath = config['logPath']
self.testImagesPath = config['testImagePath']
self.resultDir = config['resultDir']
self.modelName = config['modelName']
self.dataSamples = config['dataSamples']
self.batchSize = int(config['batchSize'])
self.imageH = int(config['imageH'])
self.imageW = int(config['imageW'])
self.inputC = int(config['inputC'])
self.outputC = int(config['outputC'])
self.totalEpoch = int(config['epoch'])
self.interval = int(config['interval'])
self.learningRate = float(config['learningRate'])
self.adamBeta1 = float(config['adamBeta1'])
self.adamBeta2 = float(config['adamBeta2'])
self.barLen = int(config['barLen'])
# Initiating Training Parameters(for step)
self.currentEpoch = 0
self.startSteps = 0
self.totalSteps = 0
self.adversarialMean = 0
self.PR = 0.0
# Normalization
self.unNorm = UnNormalize()
# Noise Level for inferencing
self.noiseSet = [25,50]
# Preapring model(s) for GPU acceleration
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.net = DynamicResAttNet(3).to(self.device)
# Optimizers
self.optimizerEG = torch.optim.Adam(self.net.parameters(), lr=self.learningRate, betas=(self.adamBeta1, self.adamBeta2))
# Scheduler for Super Convergance
self.scheduleLR = None
def customTrainLoader(self, overFitTest = False):
targetImageList = imageList(self.trainingImagePath)
print ("Trining Samples (Input):", self.trainingImagePath, len(targetImageList))
if overFitTest == True:
targetImageList = targetImageList[-1:]
if self.dataSamples:
targetImageList = targetImageList[:self.dataSamples]
datasetReadder = customDatasetReader(
image_list=targetImageList,
imagePath=self.trainingImagePath,
height = self.imageH,
width = self.imageW,
)
self.trainLoader = torch.utils.data.DataLoader( dataset=datasetReadder,
batch_size=self.batchSize,
shuffle=True
)
return self.trainLoader
def modelTraining(self, resumeTraning=False, overFitTest=False, dataSamples = None):
if dataSamples:
self.dataSamples = dataSamples
# Losses
reconstructionLoss = torch.nn.L1Loss().to(self.device)
# Overfitting Testing
if overFitTest == True:
customPrint(Fore.RED + "Over Fitting Testing with an arbitary image!", self.barLen)
trainingImageLoader = self.customTrainLoader(overFitTest=True)
self.interval = 1
self.totalEpoch = 100000
else:
trainingImageLoader = self.customTrainLoader()
# Resuming Training
if resumeTraning == True:
#self.modelLoad()
try:
self.modelLoad()
except:
#print()
customPrint(Fore.RED + "Would you like to start training from sketch (default: Y): ", textWidth=self.barLen)
userInput = input() or "Y"
if not (userInput == "Y" or userInput == "y"):
exit()
# Starting Training
customPrint('Training is about to begin using:' + Fore.YELLOW + '[{}]'.format(self.device).upper(), textWidth=self.barLen)
# Initiating steps
self.totalSteps = int(len(trainingImageLoader)*self.totalEpoch)
startTime = time.time()
# Initiating progress bar
bar = ProgressBar(self.totalSteps, max_width=int(self.barLen/2))
currentStep = self.startSteps
while currentStep < self.totalSteps:
# Time tracker
iterTime = time.time()
for LRImages, HRGTImages in trainingImageLoader:
##############################
#### Initiating Variables ####
##############################
# Updating Steps
if currentStep > self.totalSteps:
self.savingWeights(currentStep)
customPrint(Fore.YELLOW + "Training Completed Successfully!", textWidth=self.barLen)
exit()
currentStep += 1
# Images
rawInput = LRImages.to(self.device)
highResReal = HRGTImages.to(self.device)
##############################
####### Training Phase #######
##############################
# Image Generation
residualNoise = self.net(rawInput)
# Optimization of generator
self.optimizerEG.zero_grad()
generatorContentLoss = reconstructionLoss(residualNoise, highResReal)
lossEG = generatorContentLoss
lossEG.backward()
self.optimizerEG.step()
##########################
###### Model Logger ######
##########################
# Progress Bar
if (currentStep + 1) % 25 == 0:
bar.numerator = currentStep + 1
print(Fore.YELLOW + "Steps |",bar,Fore.YELLOW + "| LossEG: {:.4f}".format(lossEG),end='\r')
# Updating training log
if (currentStep + 1) % self.interval == 0:
# Updating Tensorboard
summaryInfo = {
'Input Images' : self.unNorm(rawInput),
'Residual Images' : self.unNorm(residualNoise),
'Denoised Images' : self.unNorm(rawInput-residualNoise),
'GTNoise' : self.unNorm(highResReal),
'Step' : currentStep + 1,
'Epoch' : self.currentEpoch,
'LossEG' : lossEG.item(),
'Path' : self.logPath,
'Atttention Net' : self.net,
}
tbLogWritter(summaryInfo)
save_image(self.unNorm(rawInput-residualNoise[0]), 'modelOutput.png')
# Saving Weights and state of the model for resume training
self.savingWeights(currentStep)
if (currentStep + 1) % (10000) == 0 :
print("\n")
self.savingWeights(currentStep + 1, True)
self.modelInference(validation=True, steps = currentStep + 1)
eHours, eMinutes, eSeconds = timer(iterTime, time.time())
print (Fore.CYAN +'Steps [{}/{}] | Time elapsed [{:0>2}:{:0>2}:{:0>2}] | Loss: {:.2f}'
.format(currentStep + 1, self.totalSteps, eHours, eMinutes, eSeconds, lossEG))
def | (self, testImagesPath = None, outputDir = None, resize = None, validation = None, noiseSet = None, steps = None):
if not validation:
self.modelLoad()
print("\nInferencing on pretrained weights.")
else:
print("Validation about to begin.")
if not noiseSet:
noiseSet = self.noiseSet
if testImagesPath:
self.testImagesPath = testImagesPath
if outputDir:
self.resultDir = outputDir
modelInference = inference(inputRootDir=self.testImagesPath, outputRootDir=self.resultDir, modelName=self.modelName, validation=validation)
testImageList = modelInference.testingSetProcessor()
barVal = ProgressBar(len(testImageList) * len(noiseSet), max_width=int(50))
imageCounter = 0
with torch.no_grad():
for noise in noiseSet:
for imgPath in testImageList:
img = modelInference.inputForInference(imgPath, noiseLevel=noise).to(self.device)
output = self.net(img)
modelInference.saveModelOutput(img-output, imgPath, noise, steps)
imageCounter += 1
if imageCounter % 2 == 0:
barVal.numerator = imageCounter
print(Fore.CYAN + "Image Processd |", barVal,Fore.CYAN, end='\r')
print("\n")
def modelSummary(self,input_size = None):
if not input_size:
input_size = (3, self.imageH, self.imageW)
customPrint(Fore.YELLOW + "Model Summary:Dynamic Residual Attention Network", textWidth=self.barLen)
summary(self.net, input_size =input_size)
print ("*" * self.barLen)
print()
flops, params = get_model_complexity_info(self.net, input_size, as_strings=True, print_per_layer_stat=False)
customPrint('Computational complexity (Dynamic Residual Attention Network):{}'.format(flops), self.barLen, '-')
#customPrint('Number of parameters (Enhace-Gen):{}'.format(params), self.barLen, '-')
configShower()
print ("*" * self.barLen)
def savingWeights(self, currentStep, duplicate=None):
# Saving weights
checkpoint = {
'step' : currentStep + 1,
'stateDictEG': self.net.state_dict(),
'optimizerEG': self.optimizerEG.state_dict(),
'schedulerLR': self.scheduleLR
}
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath, modelName = self.modelName)
if duplicate:
saveCheckpoint(modelStates = checkpoint, path = self.checkpointPath + str(currentStep) + "/", modelName = self.modelName, backup=None)
def modelLoad(self):
customPrint(Fore.RED + "Loading pretrained weight", textWidth=self.barLen)
previousWeight = loadCheckpoints(self.checkpointPath, self.modelName)
self.net.load_state_dict(previousWeight['stateDictEG'])
self.optimizerEG.load_state_dict(previousWeight['optimizerEG'])
self.scheduleLR = previousWeight['schedulerLR']
self.startSteps = int(previousWeight['step'])
customPrint(Fore.YELLOW + "Weight loaded successfully", textWidth=self.barLen)
| modelInference | identifier_name |
validator.go | package hms
import (
"bytes"
"context"
"crypto"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
// VerifySignature validate inapp order or subscription data signature. Returns nil if pass.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-Guides-V5/verifying-signature-returned-result-0000001050033088-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L60
func VerifySignature(base64EncodedPublicKey string, data string, signature string) (err error) {
publicKeyByte, err := base64.StdEncoding.DecodeString(base64EncodedPublicKey)
if err != nil {
return err
}
pub, err := x509.ParsePKIXPublicKey(publicKeyByte)
if err != nil {
return err
}
hashed := sha256.Sum256([]byte(data))
signatureByte, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashed[:], signatureByte)
}
// SubscriptionVerifyResponse JSON response after requested {rootUrl}/sub/applications/v2/purchases/get
type SubscriptionVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
InappPurchaseData string `json:"inappPurchaseData,omitempty"` // InappPurchaseData JSON string
}
// VerifySubscription gets subscriptions info with subscriptionId and purchaseToken.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) VerifySubscription(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, err := c.GetSubscriptionDataString(ctx, purchaseToken, subscriptionID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetSubscriptionDataString gets subscriptions response data string.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) GetSubscriptionDataString(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (string, error) {
bodyMap := map[string]string{
"subscriptionId": subscriptionID,
"purchaseToken": purchaseToken,
}
url := c.getRootSubscriptionURLByFlag(accountFlag) + "/sub/applications/v2/purchases/get"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetSubscriptionDataString(): Encounter error: %s", err)
return "", err
}
var resp SubscriptionVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", err
}
return resp.InappPurchaseData, nil
}
// OrderVerifyResponse JSON response from {rootUrl}/applications/purchases/tokens/verify
type OrderVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
PurchaseTokenData string `json:"purchaseTokenData,omitempty"` // InappPurchaseData JSON string
DataSignature string `json:"dataSignature,omitempty"` // Signature to verify PurchaseTokenData string
}
// VerifyOrder gets order (single item purchase) info with productId and purchaseToken.
//
// Note that this method does not verify the DataSignature, thus security is relied on HTTPS solely.
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) VerifyOrder(ctx context.Context, purchaseToken, productID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, _, err := c.GetOrderDataString(ctx, purchaseToken, productID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetOrderDataString gets order (single item purchase) response data as json string and dataSignature
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) GetOrderDataString(ctx context.Context, purchaseToken, productID string, accountFlag int64) (purchaseTokenData, dataSignature string, err error) {
bodyMap := map[string]string{
"purchaseToken": purchaseToken,
"productId": productID,
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/purchases/tokens/verify"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetOrderDataString(): Encounter error: %s", err)
return "", "", err
}
var resp OrderVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", "", err
}
return resp.PurchaseTokenData, resp.DataSignature, nil
}
// Helper function to send http json request and get response bodyBytes.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L33
func (c *Client) sendJSONRequest(ctx context.Context, url string, bodyMap map[string]string) (bodyBytes []byte, err error) {
bodyString, err := json.Marshal(bodyMap)
if err != nil {
return
}
req, err := http.NewRequest("POST", url, bytes.NewReader(bodyString))
if err != nil {
return
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
atHeader, err := c.GetApplicationAccessTokenHeader()
if err == nil {
req.Header.Set("Authorization", atHeader)
} else {
return
}
resp, err := c.httpCli.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
bodyBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return
}
// GetCanceledOrRefundedPurchases gets all revoked purchases in CanceledPurchaseList{}.
// This method allow fetch over 1000 results regardles the cap implied by HMS API. Though you should still limit maxRows to a certain number to increate preformance.
//
// In case of an error, this method might return some fetch results if maxRows greater than 1000 or equals 0.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L52
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-cancel-or-refund-record-0000001050746117-V5
func (c *Client) GetCanceledOrRefundedPurchases(
// context of request
ctx context.Context,
// start time timestamp in milliseconds, if =0, will default to 1 month ago.
startAt int64,
// end time timestamp in milliseconds, if =0, will default to now.
endAt int64,
// rows to return. default to 1000 if maxRows>1000 or equals to 0.
maxRows int,
// Token returned in the last query to query the data on the next page.
continuationToken string,
// Query type. Ignore this parameter when continuationToken is passed. The options are as follows:
// 0: Queries purchase information about consumables and non-consumables. This is the default value.
// 1: Queries all purchase information about consumables, non-consumables, and subscriptions.
productType int64,
// Account flag to determine which API URL to use.
accountFlag int64,
) (canceledPurchases []CanceledPurchase, newContinuationToken string, responseCode string, responseMessage string, err error) | {
// default values
if maxRows > 1000 || maxRows < 1 {
maxRows = 1000
}
switch endAt {
case 0:
endAt = time.Now().UnixNano() / 1000000
case startAt:
endAt++
}
bodyMap := map[string]string{
"startAt": fmt.Sprintf("%v", startAt),
"endAt": fmt.Sprintf("%v", endAt),
"maxRows": fmt.Sprintf("%v", maxRows),
"continuationToken": continuationToken,
"type": fmt.Sprintf("%v", productType),
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/v2/purchases/cancelledList"
var bodyBytes []byte
bodyBytes, err = c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetCanceledOrRefundedPurchases(): Encounter error: %s", err)
return
}
var cpl CanceledPurchaseList // temporary variable to store api query result
err = json.Unmarshal(bodyBytes, &cpl)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
if cpl.ResponseCode != "0" {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, c.getResponseErrorByCode(cpl.ResponseCode)
}
err = json.Unmarshal([]byte(cpl.CancelledPurchaseList), &canceledPurchases)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
return canceledPurchases, cpl.ContinuationToken, cpl.ResponseCode, cpl.ResponseMessage, nil
} | identifier_body | |
validator.go | package hms
import (
"bytes"
"context"
"crypto"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
// VerifySignature validate inapp order or subscription data signature. Returns nil if pass.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-Guides-V5/verifying-signature-returned-result-0000001050033088-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L60
func VerifySignature(base64EncodedPublicKey string, data string, signature string) (err error) {
publicKeyByte, err := base64.StdEncoding.DecodeString(base64EncodedPublicKey)
if err != nil {
return err
}
pub, err := x509.ParsePKIXPublicKey(publicKeyByte)
if err != nil {
return err
}
hashed := sha256.Sum256([]byte(data))
signatureByte, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashed[:], signatureByte)
}
// SubscriptionVerifyResponse JSON response after requested {rootUrl}/sub/applications/v2/purchases/get
type SubscriptionVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
InappPurchaseData string `json:"inappPurchaseData,omitempty"` // InappPurchaseData JSON string
}
// VerifySubscription gets subscriptions info with subscriptionId and purchaseToken.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) VerifySubscription(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, err := c.GetSubscriptionDataString(ctx, purchaseToken, subscriptionID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetSubscriptionDataString gets subscriptions response data string.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) GetSubscriptionDataString(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (string, error) {
bodyMap := map[string]string{
"subscriptionId": subscriptionID,
"purchaseToken": purchaseToken,
}
url := c.getRootSubscriptionURLByFlag(accountFlag) + "/sub/applications/v2/purchases/get"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetSubscriptionDataString(): Encounter error: %s", err)
return "", err
}
var resp SubscriptionVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil { | }
return resp.InappPurchaseData, nil
}
// OrderVerifyResponse JSON response from {rootUrl}/applications/purchases/tokens/verify
type OrderVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
PurchaseTokenData string `json:"purchaseTokenData,omitempty"` // InappPurchaseData JSON string
DataSignature string `json:"dataSignature,omitempty"` // Signature to verify PurchaseTokenData string
}
// VerifyOrder gets order (single item purchase) info with productId and purchaseToken.
//
// Note that this method does not verify the DataSignature, thus security is relied on HTTPS solely.
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) VerifyOrder(ctx context.Context, purchaseToken, productID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, _, err := c.GetOrderDataString(ctx, purchaseToken, productID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetOrderDataString gets order (single item purchase) response data as json string and dataSignature
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) GetOrderDataString(ctx context.Context, purchaseToken, productID string, accountFlag int64) (purchaseTokenData, dataSignature string, err error) {
bodyMap := map[string]string{
"purchaseToken": purchaseToken,
"productId": productID,
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/purchases/tokens/verify"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetOrderDataString(): Encounter error: %s", err)
return "", "", err
}
var resp OrderVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", "", err
}
return resp.PurchaseTokenData, resp.DataSignature, nil
}
// Helper function to send http json request and get response bodyBytes.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L33
func (c *Client) sendJSONRequest(ctx context.Context, url string, bodyMap map[string]string) (bodyBytes []byte, err error) {
bodyString, err := json.Marshal(bodyMap)
if err != nil {
return
}
req, err := http.NewRequest("POST", url, bytes.NewReader(bodyString))
if err != nil {
return
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
atHeader, err := c.GetApplicationAccessTokenHeader()
if err == nil {
req.Header.Set("Authorization", atHeader)
} else {
return
}
resp, err := c.httpCli.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
bodyBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return
}
// GetCanceledOrRefundedPurchases gets all revoked purchases in CanceledPurchaseList{}.
// This method allow fetch over 1000 results regardles the cap implied by HMS API. Though you should still limit maxRows to a certain number to increate preformance.
//
// In case of an error, this method might return some fetch results if maxRows greater than 1000 or equals 0.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L52
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-cancel-or-refund-record-0000001050746117-V5
func (c *Client) GetCanceledOrRefundedPurchases(
// context of request
ctx context.Context,
// start time timestamp in milliseconds, if =0, will default to 1 month ago.
startAt int64,
// end time timestamp in milliseconds, if =0, will default to now.
endAt int64,
// rows to return. default to 1000 if maxRows>1000 or equals to 0.
maxRows int,
// Token returned in the last query to query the data on the next page.
continuationToken string,
// Query type. Ignore this parameter when continuationToken is passed. The options are as follows:
// 0: Queries purchase information about consumables and non-consumables. This is the default value.
// 1: Queries all purchase information about consumables, non-consumables, and subscriptions.
productType int64,
// Account flag to determine which API URL to use.
accountFlag int64,
) (canceledPurchases []CanceledPurchase, newContinuationToken string, responseCode string, responseMessage string, err error) {
// default values
if maxRows > 1000 || maxRows < 1 {
maxRows = 1000
}
switch endAt {
case 0:
endAt = time.Now().UnixNano() / 1000000
case startAt:
endAt++
}
bodyMap := map[string]string{
"startAt": fmt.Sprintf("%v", startAt),
"endAt": fmt.Sprintf("%v", endAt),
"maxRows": fmt.Sprintf("%v", maxRows),
"continuationToken": continuationToken,
"type": fmt.Sprintf("%v", productType),
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/v2/purchases/cancelledList"
var bodyBytes []byte
bodyBytes, err = c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetCanceledOrRefundedPurchases(): Encounter error: %s", err)
return
}
var cpl CanceledPurchaseList // temporary variable to store api query result
err = json.Unmarshal(bodyBytes, &cpl)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
if cpl.ResponseCode != "0" {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, c.getResponseErrorByCode(cpl.ResponseCode)
}
err = json.Unmarshal([]byte(cpl.CancelledPurchaseList), &canceledPurchases)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
return canceledPurchases, cpl.ContinuationToken, cpl.ResponseCode, cpl.ResponseMessage, nil
} | return "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", err | random_line_split |
validator.go | package hms
import (
"bytes"
"context"
"crypto"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
// VerifySignature validate inapp order or subscription data signature. Returns nil if pass.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-Guides-V5/verifying-signature-returned-result-0000001050033088-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L60
func VerifySignature(base64EncodedPublicKey string, data string, signature string) (err error) {
publicKeyByte, err := base64.StdEncoding.DecodeString(base64EncodedPublicKey)
if err != nil {
return err
}
pub, err := x509.ParsePKIXPublicKey(publicKeyByte)
if err != nil {
return err
}
hashed := sha256.Sum256([]byte(data))
signatureByte, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashed[:], signatureByte)
}
// SubscriptionVerifyResponse JSON response after requested {rootUrl}/sub/applications/v2/purchases/get
type SubscriptionVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
InappPurchaseData string `json:"inappPurchaseData,omitempty"` // InappPurchaseData JSON string
}
// VerifySubscription gets subscriptions info with subscriptionId and purchaseToken.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) VerifySubscription(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, err := c.GetSubscriptionDataString(ctx, purchaseToken, subscriptionID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetSubscriptionDataString gets subscriptions response data string.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) GetSubscriptionDataString(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (string, error) {
bodyMap := map[string]string{
"subscriptionId": subscriptionID,
"purchaseToken": purchaseToken,
}
url := c.getRootSubscriptionURLByFlag(accountFlag) + "/sub/applications/v2/purchases/get"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetSubscriptionDataString(): Encounter error: %s", err)
return "", err
}
var resp SubscriptionVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", err
}
return resp.InappPurchaseData, nil
}
// OrderVerifyResponse JSON response from {rootUrl}/applications/purchases/tokens/verify
type OrderVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
PurchaseTokenData string `json:"purchaseTokenData,omitempty"` // InappPurchaseData JSON string
DataSignature string `json:"dataSignature,omitempty"` // Signature to verify PurchaseTokenData string
}
// VerifyOrder gets order (single item purchase) info with productId and purchaseToken.
//
// Note that this method does not verify the DataSignature, thus security is relied on HTTPS solely.
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) VerifyOrder(ctx context.Context, purchaseToken, productID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, _, err := c.GetOrderDataString(ctx, purchaseToken, productID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetOrderDataString gets order (single item purchase) response data as json string and dataSignature
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) GetOrderDataString(ctx context.Context, purchaseToken, productID string, accountFlag int64) (purchaseTokenData, dataSignature string, err error) {
bodyMap := map[string]string{
"purchaseToken": purchaseToken,
"productId": productID,
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/purchases/tokens/verify"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetOrderDataString(): Encounter error: %s", err)
return "", "", err
}
var resp OrderVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", "", err
}
return resp.PurchaseTokenData, resp.DataSignature, nil
}
// Helper function to send http json request and get response bodyBytes.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L33
func (c *Client) sendJSONRequest(ctx context.Context, url string, bodyMap map[string]string) (bodyBytes []byte, err error) {
bodyString, err := json.Marshal(bodyMap)
if err != nil {
return
}
req, err := http.NewRequest("POST", url, bytes.NewReader(bodyString))
if err != nil {
return
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
atHeader, err := c.GetApplicationAccessTokenHeader()
if err == nil {
req.Header.Set("Authorization", atHeader)
} else {
return
}
resp, err := c.httpCli.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
bodyBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return
}
// GetCanceledOrRefundedPurchases gets all revoked purchases in CanceledPurchaseList{}.
// This method allow fetch over 1000 results regardles the cap implied by HMS API. Though you should still limit maxRows to a certain number to increate preformance.
//
// In case of an error, this method might return some fetch results if maxRows greater than 1000 or equals 0.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L52
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-cancel-or-refund-record-0000001050746117-V5
func (c *Client) GetCanceledOrRefundedPurchases(
// context of request
ctx context.Context,
// start time timestamp in milliseconds, if =0, will default to 1 month ago.
startAt int64,
// end time timestamp in milliseconds, if =0, will default to now.
endAt int64,
// rows to return. default to 1000 if maxRows>1000 or equals to 0.
maxRows int,
// Token returned in the last query to query the data on the next page.
continuationToken string,
// Query type. Ignore this parameter when continuationToken is passed. The options are as follows:
// 0: Queries purchase information about consumables and non-consumables. This is the default value.
// 1: Queries all purchase information about consumables, non-consumables, and subscriptions.
productType int64,
// Account flag to determine which API URL to use.
accountFlag int64,
) (canceledPurchases []CanceledPurchase, newContinuationToken string, responseCode string, responseMessage string, err error) {
// default values
if maxRows > 1000 || maxRows < 1 {
maxRows = 1000
}
switch endAt {
case 0:
endAt = time.Now().UnixNano() / 1000000
case startAt:
endAt++
}
bodyMap := map[string]string{
"startAt": fmt.Sprintf("%v", startAt),
"endAt": fmt.Sprintf("%v", endAt),
"maxRows": fmt.Sprintf("%v", maxRows),
"continuationToken": continuationToken,
"type": fmt.Sprintf("%v", productType),
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/v2/purchases/cancelledList"
var bodyBytes []byte
bodyBytes, err = c.sendJSONRequest(ctx, url, bodyMap)
if err != nil |
var cpl CanceledPurchaseList // temporary variable to store api query result
err = json.Unmarshal(bodyBytes, &cpl)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
if cpl.ResponseCode != "0" {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, c.getResponseErrorByCode(cpl.ResponseCode)
}
err = json.Unmarshal([]byte(cpl.CancelledPurchaseList), &canceledPurchases)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
return canceledPurchases, cpl.ContinuationToken, cpl.ResponseCode, cpl.ResponseMessage, nil
}
| {
// log.Printf("GetCanceledOrRefundedPurchases(): Encounter error: %s", err)
return
} | conditional_block |
validator.go | package hms
import (
"bytes"
"context"
"crypto"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
// VerifySignature validate inapp order or subscription data signature. Returns nil if pass.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-Guides-V5/verifying-signature-returned-result-0000001050033088-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L60
func VerifySignature(base64EncodedPublicKey string, data string, signature string) (err error) {
publicKeyByte, err := base64.StdEncoding.DecodeString(base64EncodedPublicKey)
if err != nil {
return err
}
pub, err := x509.ParsePKIXPublicKey(publicKeyByte)
if err != nil {
return err
}
hashed := sha256.Sum256([]byte(data))
signatureByte, err := base64.StdEncoding.DecodeString(signature)
if err != nil {
return err
}
return rsa.VerifyPKCS1v15(pub.(*rsa.PublicKey), crypto.SHA256, hashed[:], signatureByte)
}
// SubscriptionVerifyResponse JSON response after requested {rootUrl}/sub/applications/v2/purchases/get
type SubscriptionVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
InappPurchaseData string `json:"inappPurchaseData,omitempty"` // InappPurchaseData JSON string
}
// VerifySubscription gets subscriptions info with subscriptionId and purchaseToken.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) VerifySubscription(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, err := c.GetSubscriptionDataString(ctx, purchaseToken, subscriptionID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetSubscriptionDataString gets subscriptions response data string.
//
// Document: https://developer.huawei.com/consumer/en/doc/development/HMSCore-References-V5/api-subscription-verify-purchase-token-0000001050706080-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/subscription.go#L40
func (c *Client) GetSubscriptionDataString(ctx context.Context, purchaseToken, subscriptionID string, accountFlag int64) (string, error) {
bodyMap := map[string]string{
"subscriptionId": subscriptionID,
"purchaseToken": purchaseToken,
}
url := c.getRootSubscriptionURLByFlag(accountFlag) + "/sub/applications/v2/purchases/get"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetSubscriptionDataString(): Encounter error: %s", err)
return "", err
}
var resp SubscriptionVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", err
}
return resp.InappPurchaseData, nil
}
// OrderVerifyResponse JSON response from {rootUrl}/applications/purchases/tokens/verify
type OrderVerifyResponse struct {
ResponseCode string `json:"responseCode"` // Response code, if = "0" means succeed, for others see https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/server-error-code-0000001050166248-V5
ResponseMessage string `json:"responseMessage,omitempty"` // Response descriptions, especially when error
PurchaseTokenData string `json:"purchaseTokenData,omitempty"` // InappPurchaseData JSON string
DataSignature string `json:"dataSignature,omitempty"` // Signature to verify PurchaseTokenData string
}
// VerifyOrder gets order (single item purchase) info with productId and purchaseToken.
//
// Note that this method does not verify the DataSignature, thus security is relied on HTTPS solely.
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) VerifyOrder(ctx context.Context, purchaseToken, productID string, accountFlag int64) (InAppPurchaseData, error) {
var iap InAppPurchaseData
dataString, _, err := c.GetOrderDataString(ctx, purchaseToken, productID, accountFlag)
if err != nil {
return iap, err
}
if err := json.Unmarshal([]byte(dataString), &iap); err != nil {
return iap, err
}
return iap, nil
}
// GetOrderDataString gets order (single item purchase) response data as json string and dataSignature
//
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-order-verify-purchase-token-0000001050746113-V5
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L41
func (c *Client) GetOrderDataString(ctx context.Context, purchaseToken, productID string, accountFlag int64) (purchaseTokenData, dataSignature string, err error) {
bodyMap := map[string]string{
"purchaseToken": purchaseToken,
"productId": productID,
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/purchases/tokens/verify"
bodyBytes, err := c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetOrderDataString(): Encounter error: %s", err)
return "", "", err
}
var resp OrderVerifyResponse
if err := json.Unmarshal(bodyBytes, &resp); err != nil {
return "", "", err
}
if err := c.getResponseErrorByCode(resp.ResponseCode); err != nil {
return "", "", err
}
return resp.PurchaseTokenData, resp.DataSignature, nil
}
// Helper function to send http json request and get response bodyBytes.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/demo.go#L33
func (c *Client) sendJSONRequest(ctx context.Context, url string, bodyMap map[string]string) (bodyBytes []byte, err error) {
bodyString, err := json.Marshal(bodyMap)
if err != nil {
return
}
req, err := http.NewRequest("POST", url, bytes.NewReader(bodyString))
if err != nil {
return
}
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
atHeader, err := c.GetApplicationAccessTokenHeader()
if err == nil {
req.Header.Set("Authorization", atHeader)
} else {
return
}
resp, err := c.httpCli.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
bodyBytes, err = ioutil.ReadAll(resp.Body)
if err != nil {
return
}
return
}
// GetCanceledOrRefundedPurchases gets all revoked purchases in CanceledPurchaseList{}.
// This method allow fetch over 1000 results regardles the cap implied by HMS API. Though you should still limit maxRows to a certain number to increate preformance.
//
// In case of an error, this method might return some fetch results if maxRows greater than 1000 or equals 0.
//
// Source code originated from https://github.com/HMS-Core/hms-iap-serverdemo/blob/92241f97fed1b68ddeb7cb37ea4ca6e6d33d2a87/demo/order.go#L52
// Document: https://developer.huawei.com/consumer/en/doc/HMSCore-References-V5/api-cancel-or-refund-record-0000001050746117-V5
func (c *Client) | (
// context of request
ctx context.Context,
// start time timestamp in milliseconds, if =0, will default to 1 month ago.
startAt int64,
// end time timestamp in milliseconds, if =0, will default to now.
endAt int64,
// rows to return. default to 1000 if maxRows>1000 or equals to 0.
maxRows int,
// Token returned in the last query to query the data on the next page.
continuationToken string,
// Query type. Ignore this parameter when continuationToken is passed. The options are as follows:
// 0: Queries purchase information about consumables and non-consumables. This is the default value.
// 1: Queries all purchase information about consumables, non-consumables, and subscriptions.
productType int64,
// Account flag to determine which API URL to use.
accountFlag int64,
) (canceledPurchases []CanceledPurchase, newContinuationToken string, responseCode string, responseMessage string, err error) {
// default values
if maxRows > 1000 || maxRows < 1 {
maxRows = 1000
}
switch endAt {
case 0:
endAt = time.Now().UnixNano() / 1000000
case startAt:
endAt++
}
bodyMap := map[string]string{
"startAt": fmt.Sprintf("%v", startAt),
"endAt": fmt.Sprintf("%v", endAt),
"maxRows": fmt.Sprintf("%v", maxRows),
"continuationToken": continuationToken,
"type": fmt.Sprintf("%v", productType),
}
url := c.getRootOrderURLByFlag(accountFlag) + "/applications/v2/purchases/cancelledList"
var bodyBytes []byte
bodyBytes, err = c.sendJSONRequest(ctx, url, bodyMap)
if err != nil {
// log.Printf("GetCanceledOrRefundedPurchases(): Encounter error: %s", err)
return
}
var cpl CanceledPurchaseList // temporary variable to store api query result
err = json.Unmarshal(bodyBytes, &cpl)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
if cpl.ResponseCode != "0" {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, c.getResponseErrorByCode(cpl.ResponseCode)
}
err = json.Unmarshal([]byte(cpl.CancelledPurchaseList), &canceledPurchases)
if err != nil {
return canceledPurchases, continuationToken, cpl.ResponseCode, cpl.ResponseMessage, err
}
return canceledPurchases, cpl.ContinuationToken, cpl.ResponseCode, cpl.ResponseMessage, nil
}
| GetCanceledOrRefundedPurchases | identifier_name |
model.rs | use super::{
constants::*, empty_named_tuple, rewrite::Rewrite, sequent::RelSequent, symbol::Symbol, Error,
NamedTuple, Tuple,
};
use crate::chase::{r#impl::basic::BasicWitnessTerm, Model, Observation, E};
use codd::expression as rel_exp;
use itertools::Itertools;
use razor_fol::syntax::Sig;
use std::{collections::HashMap, fmt};
/// Implements an instance of [`Model`] with an underlying database.
/// It uses [`BasicWitnessTerm`] from the basic implementation to represent observations.
///
/// [`Model`]: crate::chase::Model
/// [`WitnessTerm`]: crate::chase::impl::basic::BasicWitnessTerm
pub struct RelModel {
/// Is a unique identifier for this model.
id: u64,
/// Keeps track of the next index to assign to a new element of this model.
element_index: i32,
/// Maps *flat* witness terms to elements of this model.
///
/// **Hint**: Flat (witness) terms are terms that do not contain any complex sub-terms
/// that consist of functions applications.
rewrites: HashMap<BasicWitnessTerm, E>,
/// Stores the information contained in this model.
database: codd::Database,
/// Maps each symbol to their corresponding relational expression.
relations: HashMap<Symbol, rel_exp::Relation<Tuple>>,
}
impl RelModel {
/// Creates a new model over the given `signature`.
pub fn new(signature: &Sig) -> Self {
let mut database = codd::Database::new();
let relations = relations_map(signature, &mut database).unwrap();
Self {
id: rand::random(),
element_index: 0,
rewrites: HashMap::new(),
database,
relations,
}
}
/// Creates a new element for the given `witness` and records that `witness`
/// denotes the new element.
fn new_element(&mut self, witness: BasicWitnessTerm) -> E {
let element = E(self.element_index);
self.element_index += 1;
self.rewrites.insert(witness, element);
element
}
// assumes that the witness term is flat
pub(super) fn record(&mut self, witness: BasicWitnessTerm) -> E {
match witness {
BasicWitnessTerm::Elem(e) => e,
_ => self
.rewrites
.get(&witness)
.copied()
.unwrap_or_else(|| self.new_element(witness)),
}
}
/// Evaluates a sequent in the model.
pub(super) fn evaluate<'a>(&self, sequent: &'a RelSequent) -> Vec<NamedTuple<'a>> {
let tuples = self.database.evaluate(sequent.expression()).unwrap();
tuples
.into_tuples()
.into_iter()
.map(|tuple| {
let mut elements = empty_named_tuple();
for (i, attr) in sequent.attributes().iter().enumerate() {
elements.insert(attr, tuple[i]);
}
elements
})
.collect()
}
pub(super) fn insert(
&mut self,
symbol: &Symbol,
mut tuples: codd::Tuples<Tuple>,
) -> Result<(), Error> {
// record result of function applications as a witness term to minimize
// creating new elements later on:
match symbol {
Symbol::Const(_) => {
for t in tuples.iter() {
self.rewrites.entry(symbol.witness(&[])?).or_insert(t[0]);
}
}
Symbol::Func { arity, .. } => {
for t in tuples.iter() {
self.rewrites
.entry(symbol.witness(&t[0..(*arity as usize)])?)
.or_insert(t[*arity as usize]);
}
} |
if let Some(relation) = self.relations.get(symbol) {
if let Symbol::Equality = symbol {
let to_add = tuples.iter().map(|t| vec![t[1], t[0]]).collect_vec();
tuples.extend(to_add);
};
self.database.insert(relation, tuples).map_err(Error::from)
} else {
Err(Error::MissingSymbol {
symbol: symbol.to_string(),
})
}
}
/// Returns a mutable reference to the underlying database of this model.
pub(super) fn database_mut(&mut self) -> &mut codd::Database {
&mut self.database
}
fn equation_rewrites(&self) -> Result<Rewrite<E>, Error> {
let mut rewrite = Rewrite::new();
let eq_relation = self
.relations
.get(&Symbol::Equality)
.ok_or(Error::MissingSymbol {
symbol: EQUALITY.into(),
})?;
let equations = self.database.evaluate(&eq_relation)?;
for eq in equations.iter() {
rewrite.rewrite(&eq[0], &eq[1])
}
Ok(rewrite)
}
fn rewrite_model(&mut self, rewrite: &Rewrite<E>) {
let mut conversion_map = HashMap::new();
let normal_forms = rewrite.normal_forms().into_iter().sorted();
for (count, item) in normal_forms.into_iter().enumerate() {
conversion_map.insert(item, E(count as i32));
}
let domain = self.domain();
for element in domain.iter() {
let canonical = rewrite.normalize(element).unwrap();
if conversion_map.contains_key(element) {
continue;
}
let convert = *conversion_map
.get(rewrite.normalize(canonical).unwrap())
.unwrap();
conversion_map.insert(element, convert);
}
let mut rewrites = HashMap::new();
for (term, element) in &self.rewrites {
let new_term = match &term {
BasicWitnessTerm::Elem(e) => {
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
}
BasicWitnessTerm::Const(_) => term.clone(),
BasicWitnessTerm::App { function, terms } => BasicWitnessTerm::App {
function: function.clone(),
terms: terms
.iter()
.map(|e| {
let e = match e {
BasicWitnessTerm::Elem(e) => e,
_ => unreachable!(),
};
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
})
.collect(),
},
};
let new_element = *conversion_map.get(element).unwrap();
rewrites.insert(new_term, new_element);
}
let mut database = codd::Database::new();
for relation in self.relations.values() {
let new_relation = database.add_relation(relation.name()).unwrap();
let tuples = self.database.evaluate(relation).unwrap();
let new_tuples: codd::Tuples<_> = tuples
.into_tuples()
.into_iter()
.map(|tuple| {
tuple
.into_iter()
.map(|e| *conversion_map.get(&e).unwrap())
.collect_vec()
})
.collect_vec()
.into();
database.insert(&new_relation, new_tuples).unwrap();
}
self.rewrites = rewrites;
self.database = database;
}
}
impl Model for RelModel {
type TermType = BasicWitnessTerm;
fn get_id(&self) -> u64 {
self.id
}
fn domain(&self) -> Vec<E> {
self.database
.evaluate(self.relations.get(&Symbol::Domain).unwrap())
.unwrap()
.iter()
.map(|e| e[0])
.collect()
}
fn facts(&self) -> Vec<Observation<Self::TermType>> {
let mut result = Vec::new();
for (symbol, relation) in &self.relations {
match symbol {
Symbol::Domain | Symbol::Equality => {}
_ => {
let observations = Vec::new();
let tuples = self.database.evaluate(relation).unwrap();
for t in tuples.into_tuples() {
result.push(symbol.observation(&t).unwrap());
}
result.extend(observations);
}
}
}
result
}
fn witness(&self, element: &E) -> Vec<BasicWitnessTerm> {
self.rewrites
.iter()
.filter(|(_, e)| *e == element)
.map(|(t, _)| t)
.cloned()
.collect()
}
fn element(&self, witness: &BasicWitnessTerm) -> Option<E> {
match witness {
BasicWitnessTerm::Elem(element) => self.domain().into_iter().find(|e| e == element),
BasicWitnessTerm::Const(_) => self.rewrites.get(witness).cloned(),
BasicWitnessTerm::App { function, terms } => {
let terms: Vec<Option<E>> = terms.iter().map(|t| self.element(t)).collect();
if terms.iter().any(|e| e.is_none()) {
None
} else {
let terms: Vec<BasicWitnessTerm> =
terms.into_iter().map(|e| e.unwrap().into()).collect();
self.rewrites
.get(&BasicWitnessTerm::App {
function: (*function).clone(),
terms,
})
.cloned()
}
}
}
}
fn finalize(mut self) -> Self {
let rewrites = self.equation_rewrites().unwrap();
self.rewrite_model(&rewrites);
self
}
}
impl Clone for RelModel {
fn clone(&self) -> Self {
Self {
id: rand::random(),
element_index: self.element_index,
rewrites: self.rewrites.clone(),
database: self.database.clone(),
relations: self.relations.clone(),
}
}
}
impl fmt::Debug for RelModel {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let domain: Vec<String> = self.domain().into_iter().map(|e| e.to_string()).collect();
let elements: Vec<String> = self
.domain()
.iter()
.sorted()
.iter()
.map(|e| {
let witnesses: Vec<String> =
self.witness(e).iter().map(|w| w.to_string()).collect();
let witnesses = witnesses.into_iter().sorted();
format!("{} -> {}", witnesses.into_iter().sorted().join(", "), e)
})
.collect();
let facts: Vec<String> = self.facts().into_iter().map(|e| e.to_string()).collect();
write!(
f,
"Domain: {{{}}}\nElements:{}\nFacts: {}\n",
domain.join(", "),
elements.join(", "),
facts.join(", ")
)
}
}
// Creates a dictionary of signatures and their corresponding relations to
// access their instances in the database.
fn relations_map(
sig: &Sig,
db: &mut codd::Database,
) -> Result<HashMap<Symbol, rel_exp::Relation<Tuple>>, Error> {
let mut relations = HashMap::new();
for c in sig.constants().iter() {
let name = constant_instance_name(c);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(Symbol::Const(c.clone()), relation);
}
for f in sig.functions().values() {
let name = function_instance_name(&f.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Func {
symbol: f.symbol.clone(),
arity: f.arity,
},
relation,
);
}
for p in sig.predicates().values() {
if p.symbol.name() == EQUALITY {
continue; // Equality is a special case (below)
}
let name = predicate_instance_name(&p.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Pred {
symbol: p.symbol.clone(),
arity: p.arity,
},
relation,
);
}
relations.insert(Symbol::Domain, db.add_relation::<Tuple>(DOMAIN)?);
relations
.entry(Symbol::Equality)
.or_insert(db.add_relation::<Tuple>(EQUALITY)?);
Ok(relations)
} | _ => {}
} | random_line_split |
model.rs | use super::{
constants::*, empty_named_tuple, rewrite::Rewrite, sequent::RelSequent, symbol::Symbol, Error,
NamedTuple, Tuple,
};
use crate::chase::{r#impl::basic::BasicWitnessTerm, Model, Observation, E};
use codd::expression as rel_exp;
use itertools::Itertools;
use razor_fol::syntax::Sig;
use std::{collections::HashMap, fmt};
/// Implements an instance of [`Model`] with an underlying database.
/// It uses [`BasicWitnessTerm`] from the basic implementation to represent observations.
///
/// [`Model`]: crate::chase::Model
/// [`WitnessTerm`]: crate::chase::impl::basic::BasicWitnessTerm
pub struct RelModel {
/// Is a unique identifier for this model.
id: u64,
/// Keeps track of the next index to assign to a new element of this model.
element_index: i32,
/// Maps *flat* witness terms to elements of this model.
///
/// **Hint**: Flat (witness) terms are terms that do not contain any complex sub-terms
/// that consist of functions applications.
rewrites: HashMap<BasicWitnessTerm, E>,
/// Stores the information contained in this model.
database: codd::Database,
/// Maps each symbol to their corresponding relational expression.
relations: HashMap<Symbol, rel_exp::Relation<Tuple>>,
}
impl RelModel {
/// Creates a new model over the given `signature`.
pub fn new(signature: &Sig) -> Self {
let mut database = codd::Database::new();
let relations = relations_map(signature, &mut database).unwrap();
Self {
id: rand::random(),
element_index: 0,
rewrites: HashMap::new(),
database,
relations,
}
}
/// Creates a new element for the given `witness` and records that `witness`
/// denotes the new element.
fn new_element(&mut self, witness: BasicWitnessTerm) -> E |
// assumes that the witness term is flat
pub(super) fn record(&mut self, witness: BasicWitnessTerm) -> E {
match witness {
BasicWitnessTerm::Elem(e) => e,
_ => self
.rewrites
.get(&witness)
.copied()
.unwrap_or_else(|| self.new_element(witness)),
}
}
/// Evaluates a sequent in the model.
pub(super) fn evaluate<'a>(&self, sequent: &'a RelSequent) -> Vec<NamedTuple<'a>> {
let tuples = self.database.evaluate(sequent.expression()).unwrap();
tuples
.into_tuples()
.into_iter()
.map(|tuple| {
let mut elements = empty_named_tuple();
for (i, attr) in sequent.attributes().iter().enumerate() {
elements.insert(attr, tuple[i]);
}
elements
})
.collect()
}
pub(super) fn insert(
&mut self,
symbol: &Symbol,
mut tuples: codd::Tuples<Tuple>,
) -> Result<(), Error> {
// record result of function applications as a witness term to minimize
// creating new elements later on:
match symbol {
Symbol::Const(_) => {
for t in tuples.iter() {
self.rewrites.entry(symbol.witness(&[])?).or_insert(t[0]);
}
}
Symbol::Func { arity, .. } => {
for t in tuples.iter() {
self.rewrites
.entry(symbol.witness(&t[0..(*arity as usize)])?)
.or_insert(t[*arity as usize]);
}
}
_ => {}
}
if let Some(relation) = self.relations.get(symbol) {
if let Symbol::Equality = symbol {
let to_add = tuples.iter().map(|t| vec![t[1], t[0]]).collect_vec();
tuples.extend(to_add);
};
self.database.insert(relation, tuples).map_err(Error::from)
} else {
Err(Error::MissingSymbol {
symbol: symbol.to_string(),
})
}
}
/// Returns a mutable reference to the underlying database of this model.
pub(super) fn database_mut(&mut self) -> &mut codd::Database {
&mut self.database
}
fn equation_rewrites(&self) -> Result<Rewrite<E>, Error> {
let mut rewrite = Rewrite::new();
let eq_relation = self
.relations
.get(&Symbol::Equality)
.ok_or(Error::MissingSymbol {
symbol: EQUALITY.into(),
})?;
let equations = self.database.evaluate(&eq_relation)?;
for eq in equations.iter() {
rewrite.rewrite(&eq[0], &eq[1])
}
Ok(rewrite)
}
fn rewrite_model(&mut self, rewrite: &Rewrite<E>) {
let mut conversion_map = HashMap::new();
let normal_forms = rewrite.normal_forms().into_iter().sorted();
for (count, item) in normal_forms.into_iter().enumerate() {
conversion_map.insert(item, E(count as i32));
}
let domain = self.domain();
for element in domain.iter() {
let canonical = rewrite.normalize(element).unwrap();
if conversion_map.contains_key(element) {
continue;
}
let convert = *conversion_map
.get(rewrite.normalize(canonical).unwrap())
.unwrap();
conversion_map.insert(element, convert);
}
let mut rewrites = HashMap::new();
for (term, element) in &self.rewrites {
let new_term = match &term {
BasicWitnessTerm::Elem(e) => {
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
}
BasicWitnessTerm::Const(_) => term.clone(),
BasicWitnessTerm::App { function, terms } => BasicWitnessTerm::App {
function: function.clone(),
terms: terms
.iter()
.map(|e| {
let e = match e {
BasicWitnessTerm::Elem(e) => e,
_ => unreachable!(),
};
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
})
.collect(),
},
};
let new_element = *conversion_map.get(element).unwrap();
rewrites.insert(new_term, new_element);
}
let mut database = codd::Database::new();
for relation in self.relations.values() {
let new_relation = database.add_relation(relation.name()).unwrap();
let tuples = self.database.evaluate(relation).unwrap();
let new_tuples: codd::Tuples<_> = tuples
.into_tuples()
.into_iter()
.map(|tuple| {
tuple
.into_iter()
.map(|e| *conversion_map.get(&e).unwrap())
.collect_vec()
})
.collect_vec()
.into();
database.insert(&new_relation, new_tuples).unwrap();
}
self.rewrites = rewrites;
self.database = database;
}
}
impl Model for RelModel {
type TermType = BasicWitnessTerm;
fn get_id(&self) -> u64 {
self.id
}
fn domain(&self) -> Vec<E> {
self.database
.evaluate(self.relations.get(&Symbol::Domain).unwrap())
.unwrap()
.iter()
.map(|e| e[0])
.collect()
}
fn facts(&self) -> Vec<Observation<Self::TermType>> {
let mut result = Vec::new();
for (symbol, relation) in &self.relations {
match symbol {
Symbol::Domain | Symbol::Equality => {}
_ => {
let observations = Vec::new();
let tuples = self.database.evaluate(relation).unwrap();
for t in tuples.into_tuples() {
result.push(symbol.observation(&t).unwrap());
}
result.extend(observations);
}
}
}
result
}
fn witness(&self, element: &E) -> Vec<BasicWitnessTerm> {
self.rewrites
.iter()
.filter(|(_, e)| *e == element)
.map(|(t, _)| t)
.cloned()
.collect()
}
fn element(&self, witness: &BasicWitnessTerm) -> Option<E> {
match witness {
BasicWitnessTerm::Elem(element) => self.domain().into_iter().find(|e| e == element),
BasicWitnessTerm::Const(_) => self.rewrites.get(witness).cloned(),
BasicWitnessTerm::App { function, terms } => {
let terms: Vec<Option<E>> = terms.iter().map(|t| self.element(t)).collect();
if terms.iter().any(|e| e.is_none()) {
None
} else {
let terms: Vec<BasicWitnessTerm> =
terms.into_iter().map(|e| e.unwrap().into()).collect();
self.rewrites
.get(&BasicWitnessTerm::App {
function: (*function).clone(),
terms,
})
.cloned()
}
}
}
}
fn finalize(mut self) -> Self {
let rewrites = self.equation_rewrites().unwrap();
self.rewrite_model(&rewrites);
self
}
}
impl Clone for RelModel {
fn clone(&self) -> Self {
Self {
id: rand::random(),
element_index: self.element_index,
rewrites: self.rewrites.clone(),
database: self.database.clone(),
relations: self.relations.clone(),
}
}
}
impl fmt::Debug for RelModel {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let domain: Vec<String> = self.domain().into_iter().map(|e| e.to_string()).collect();
let elements: Vec<String> = self
.domain()
.iter()
.sorted()
.iter()
.map(|e| {
let witnesses: Vec<String> =
self.witness(e).iter().map(|w| w.to_string()).collect();
let witnesses = witnesses.into_iter().sorted();
format!("{} -> {}", witnesses.into_iter().sorted().join(", "), e)
})
.collect();
let facts: Vec<String> = self.facts().into_iter().map(|e| e.to_string()).collect();
write!(
f,
"Domain: {{{}}}\nElements:{}\nFacts: {}\n",
domain.join(", "),
elements.join(", "),
facts.join(", ")
)
}
}
// Creates a dictionary of signatures and their corresponding relations to
// access their instances in the database.
fn relations_map(
sig: &Sig,
db: &mut codd::Database,
) -> Result<HashMap<Symbol, rel_exp::Relation<Tuple>>, Error> {
let mut relations = HashMap::new();
for c in sig.constants().iter() {
let name = constant_instance_name(c);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(Symbol::Const(c.clone()), relation);
}
for f in sig.functions().values() {
let name = function_instance_name(&f.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Func {
symbol: f.symbol.clone(),
arity: f.arity,
},
relation,
);
}
for p in sig.predicates().values() {
if p.symbol.name() == EQUALITY {
continue; // Equality is a special case (below)
}
let name = predicate_instance_name(&p.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Pred {
symbol: p.symbol.clone(),
arity: p.arity,
},
relation,
);
}
relations.insert(Symbol::Domain, db.add_relation::<Tuple>(DOMAIN)?);
relations
.entry(Symbol::Equality)
.or_insert(db.add_relation::<Tuple>(EQUALITY)?);
Ok(relations)
}
| {
let element = E(self.element_index);
self.element_index += 1;
self.rewrites.insert(witness, element);
element
} | identifier_body |
model.rs | use super::{
constants::*, empty_named_tuple, rewrite::Rewrite, sequent::RelSequent, symbol::Symbol, Error,
NamedTuple, Tuple,
};
use crate::chase::{r#impl::basic::BasicWitnessTerm, Model, Observation, E};
use codd::expression as rel_exp;
use itertools::Itertools;
use razor_fol::syntax::Sig;
use std::{collections::HashMap, fmt};
/// Implements an instance of [`Model`] with an underlying database.
/// It uses [`BasicWitnessTerm`] from the basic implementation to represent observations.
///
/// [`Model`]: crate::chase::Model
/// [`WitnessTerm`]: crate::chase::impl::basic::BasicWitnessTerm
pub struct RelModel {
/// Is a unique identifier for this model.
id: u64,
/// Keeps track of the next index to assign to a new element of this model.
element_index: i32,
/// Maps *flat* witness terms to elements of this model.
///
/// **Hint**: Flat (witness) terms are terms that do not contain any complex sub-terms
/// that consist of functions applications.
rewrites: HashMap<BasicWitnessTerm, E>,
/// Stores the information contained in this model.
database: codd::Database,
/// Maps each symbol to their corresponding relational expression.
relations: HashMap<Symbol, rel_exp::Relation<Tuple>>,
}
impl RelModel {
/// Creates a new model over the given `signature`.
pub fn new(signature: &Sig) -> Self {
let mut database = codd::Database::new();
let relations = relations_map(signature, &mut database).unwrap();
Self {
id: rand::random(),
element_index: 0,
rewrites: HashMap::new(),
database,
relations,
}
}
/// Creates a new element for the given `witness` and records that `witness`
/// denotes the new element.
fn new_element(&mut self, witness: BasicWitnessTerm) -> E {
let element = E(self.element_index);
self.element_index += 1;
self.rewrites.insert(witness, element);
element
}
// assumes that the witness term is flat
pub(super) fn record(&mut self, witness: BasicWitnessTerm) -> E {
match witness {
BasicWitnessTerm::Elem(e) => e,
_ => self
.rewrites
.get(&witness)
.copied()
.unwrap_or_else(|| self.new_element(witness)),
}
}
/// Evaluates a sequent in the model.
pub(super) fn evaluate<'a>(&self, sequent: &'a RelSequent) -> Vec<NamedTuple<'a>> {
let tuples = self.database.evaluate(sequent.expression()).unwrap();
tuples
.into_tuples()
.into_iter()
.map(|tuple| {
let mut elements = empty_named_tuple();
for (i, attr) in sequent.attributes().iter().enumerate() {
elements.insert(attr, tuple[i]);
}
elements
})
.collect()
}
pub(super) fn insert(
&mut self,
symbol: &Symbol,
mut tuples: codd::Tuples<Tuple>,
) -> Result<(), Error> {
// record result of function applications as a witness term to minimize
// creating new elements later on:
match symbol {
Symbol::Const(_) => {
for t in tuples.iter() {
self.rewrites.entry(symbol.witness(&[])?).or_insert(t[0]);
}
}
Symbol::Func { arity, .. } => {
for t in tuples.iter() {
self.rewrites
.entry(symbol.witness(&t[0..(*arity as usize)])?)
.or_insert(t[*arity as usize]);
}
}
_ => {}
}
if let Some(relation) = self.relations.get(symbol) {
if let Symbol::Equality = symbol {
let to_add = tuples.iter().map(|t| vec![t[1], t[0]]).collect_vec();
tuples.extend(to_add);
};
self.database.insert(relation, tuples).map_err(Error::from)
} else {
Err(Error::MissingSymbol {
symbol: symbol.to_string(),
})
}
}
/// Returns a mutable reference to the underlying database of this model.
pub(super) fn database_mut(&mut self) -> &mut codd::Database {
&mut self.database
}
fn equation_rewrites(&self) -> Result<Rewrite<E>, Error> {
let mut rewrite = Rewrite::new();
let eq_relation = self
.relations
.get(&Symbol::Equality)
.ok_or(Error::MissingSymbol {
symbol: EQUALITY.into(),
})?;
let equations = self.database.evaluate(&eq_relation)?;
for eq in equations.iter() {
rewrite.rewrite(&eq[0], &eq[1])
}
Ok(rewrite)
}
fn rewrite_model(&mut self, rewrite: &Rewrite<E>) {
let mut conversion_map = HashMap::new();
let normal_forms = rewrite.normal_forms().into_iter().sorted();
for (count, item) in normal_forms.into_iter().enumerate() {
conversion_map.insert(item, E(count as i32));
}
let domain = self.domain();
for element in domain.iter() {
let canonical = rewrite.normalize(element).unwrap();
if conversion_map.contains_key(element) {
continue;
}
let convert = *conversion_map
.get(rewrite.normalize(canonical).unwrap())
.unwrap();
conversion_map.insert(element, convert);
}
let mut rewrites = HashMap::new();
for (term, element) in &self.rewrites {
let new_term = match &term {
BasicWitnessTerm::Elem(e) => {
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
}
BasicWitnessTerm::Const(_) => term.clone(),
BasicWitnessTerm::App { function, terms } => BasicWitnessTerm::App {
function: function.clone(),
terms: terms
.iter()
.map(|e| {
let e = match e {
BasicWitnessTerm::Elem(e) => e,
_ => unreachable!(),
};
BasicWitnessTerm::Elem(*conversion_map.get(e).unwrap())
})
.collect(),
},
};
let new_element = *conversion_map.get(element).unwrap();
rewrites.insert(new_term, new_element);
}
let mut database = codd::Database::new();
for relation in self.relations.values() {
let new_relation = database.add_relation(relation.name()).unwrap();
let tuples = self.database.evaluate(relation).unwrap();
let new_tuples: codd::Tuples<_> = tuples
.into_tuples()
.into_iter()
.map(|tuple| {
tuple
.into_iter()
.map(|e| *conversion_map.get(&e).unwrap())
.collect_vec()
})
.collect_vec()
.into();
database.insert(&new_relation, new_tuples).unwrap();
}
self.rewrites = rewrites;
self.database = database;
}
}
impl Model for RelModel {
type TermType = BasicWitnessTerm;
fn get_id(&self) -> u64 {
self.id
}
fn domain(&self) -> Vec<E> {
self.database
.evaluate(self.relations.get(&Symbol::Domain).unwrap())
.unwrap()
.iter()
.map(|e| e[0])
.collect()
}
fn facts(&self) -> Vec<Observation<Self::TermType>> {
let mut result = Vec::new();
for (symbol, relation) in &self.relations {
match symbol {
Symbol::Domain | Symbol::Equality => {}
_ => {
let observations = Vec::new();
let tuples = self.database.evaluate(relation).unwrap();
for t in tuples.into_tuples() {
result.push(symbol.observation(&t).unwrap());
}
result.extend(observations);
}
}
}
result
}
fn | (&self, element: &E) -> Vec<BasicWitnessTerm> {
self.rewrites
.iter()
.filter(|(_, e)| *e == element)
.map(|(t, _)| t)
.cloned()
.collect()
}
fn element(&self, witness: &BasicWitnessTerm) -> Option<E> {
match witness {
BasicWitnessTerm::Elem(element) => self.domain().into_iter().find(|e| e == element),
BasicWitnessTerm::Const(_) => self.rewrites.get(witness).cloned(),
BasicWitnessTerm::App { function, terms } => {
let terms: Vec<Option<E>> = terms.iter().map(|t| self.element(t)).collect();
if terms.iter().any(|e| e.is_none()) {
None
} else {
let terms: Vec<BasicWitnessTerm> =
terms.into_iter().map(|e| e.unwrap().into()).collect();
self.rewrites
.get(&BasicWitnessTerm::App {
function: (*function).clone(),
terms,
})
.cloned()
}
}
}
}
fn finalize(mut self) -> Self {
let rewrites = self.equation_rewrites().unwrap();
self.rewrite_model(&rewrites);
self
}
}
impl Clone for RelModel {
fn clone(&self) -> Self {
Self {
id: rand::random(),
element_index: self.element_index,
rewrites: self.rewrites.clone(),
database: self.database.clone(),
relations: self.relations.clone(),
}
}
}
impl fmt::Debug for RelModel {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let domain: Vec<String> = self.domain().into_iter().map(|e| e.to_string()).collect();
let elements: Vec<String> = self
.domain()
.iter()
.sorted()
.iter()
.map(|e| {
let witnesses: Vec<String> =
self.witness(e).iter().map(|w| w.to_string()).collect();
let witnesses = witnesses.into_iter().sorted();
format!("{} -> {}", witnesses.into_iter().sorted().join(", "), e)
})
.collect();
let facts: Vec<String> = self.facts().into_iter().map(|e| e.to_string()).collect();
write!(
f,
"Domain: {{{}}}\nElements:{}\nFacts: {}\n",
domain.join(", "),
elements.join(", "),
facts.join(", ")
)
}
}
// Creates a dictionary of signatures and their corresponding relations to
// access their instances in the database.
fn relations_map(
sig: &Sig,
db: &mut codd::Database,
) -> Result<HashMap<Symbol, rel_exp::Relation<Tuple>>, Error> {
let mut relations = HashMap::new();
for c in sig.constants().iter() {
let name = constant_instance_name(c);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(Symbol::Const(c.clone()), relation);
}
for f in sig.functions().values() {
let name = function_instance_name(&f.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Func {
symbol: f.symbol.clone(),
arity: f.arity,
},
relation,
);
}
for p in sig.predicates().values() {
if p.symbol.name() == EQUALITY {
continue; // Equality is a special case (below)
}
let name = predicate_instance_name(&p.symbol);
let relation = db.add_relation::<Tuple>(&name)?;
relations.insert(
Symbol::Pred {
symbol: p.symbol.clone(),
arity: p.arity,
},
relation,
);
}
relations.insert(Symbol::Domain, db.add_relation::<Tuple>(DOMAIN)?);
relations
.entry(Symbol::Equality)
.or_insert(db.add_relation::<Tuple>(EQUALITY)?);
Ok(relations)
}
| witness | identifier_name |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh | else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
}
}
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
}
div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error = !;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self, !> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn frame_invalid(frame: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
}
| {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res != #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} | conditional_block |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res != #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
} | div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error = !;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self, !> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn frame_invalid(frame: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
} | }
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
} | random_line_split |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
}
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res != #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
}
}
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
}
div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error = !;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self, !> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn fram | me: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
}
| e_invalid(fra | identifier_name |
mod.rs | mod graphviz;
pub mod locals;
mod source;
pub use source::initialise_statics;
use rustc_hir::definitions::DefPathData;
use rustc_mir::interpret::{AllocId, Machine, Pointer};
use rustc_target::abi::Size;
use horrorshow::{Raw, Template};
use rocket::response::content::Html;
use crate::step::Breakpoint;
use crate::PrirodaContext;
pub fn template(pcx: &PrirodaContext<'_, '_>, title: String, t: impl Template) -> Html<String> |
pub fn refresh_script(pcx: &PrirodaContext<'_, '_>) -> String {
if pcx.config.auto_refresh {
r#"<script>
setInterval(() => {
fetch("/step_count").then((res) => {
if(res.status == 200) {
return res.text();
} else {
throw "";
}
}).then((res) => {
if(res != #step_count#) {
window.location.reload();
}
}).catch(()=>{});
}, 1000);
</script>"#
.replace("#step_count#", &format!("{}", pcx.step_count))
} else {
String::new()
}
}
pub fn render_main_window(
pcx: &PrirodaContext<'_, '_>,
display_frame: Option<usize>,
message: String,
) -> Html<String> {
let is_active_stack_frame = match display_frame {
Some(n) => n == Machine::stack(&pcx.ecx).len() - 1,
None => true,
};
let frame = display_frame
.and_then(|frame| Machine::stack(&pcx.ecx).get(frame))
.or_else(|| Machine::stack(&pcx.ecx).last());
let stack: Vec<(String, String, String)> = Machine::stack(&pcx.ecx)
.iter()
.map(|frame| {
let instance = &frame.instance;
let span = frame.current_source_info().unwrap().span;
let name = if pcx
.ecx
.tcx
.def_key(instance.def_id())
.disambiguated_data
.data
== DefPathData::ClosureExpr
{
"inside call to closure".to_string()
} else {
instance.to_string()
};
let span = self::source::pretty_src_path(span);
(name, span, format!("{:?}", instance.def_id()))
})
.collect();
let rendered_breakpoints: Vec<String> = pcx
.config
.bptree
.iter()
.map(|&Breakpoint(def_id, bb, stmt)| format!("{:?}@{}:{}", def_id, bb.index(), stmt))
.collect();
let rendered_locals = frame
.map(|frame| locals::render_locals(&pcx.ecx, frame))
.unwrap_or_else(String::new);
let rendered_source = source::render_source(pcx.ecx.tcx.tcx, frame);
let mir_graph = frame.map(|frame| {
graphviz::render_html(frame, pcx.config.bptree.for_def_id(frame.instance.def_id()))
});
let filename = pcx
.ecx
.tcx
.sess
.local_crate_source_file
.as_ref()
.map(|f| f.display().to_string())
.unwrap_or_else(|| "no file name".to_string());
template(
pcx,
filename,
horrorshow::html! {
div(id="left") {
div(id="commands") {
@ if is_active_stack_frame {
a(href="/step/single") { div(title="Execute next MIR statement/terminator") { : "Step" } }
a(href="/step/next") { div(title="Run until after the next MIR statement/terminator") { : "Next" } }
a(href="/step/return") { div(title="Run until the function returns") { : "Return" } }
a(href="/step/single_back") { div(title="Execute previous MIR statement/terminator (restarts and steps till one stmt before the current stmt)") { : "Step back (slow)" } }
a(href="/step/continue") { div(title="Run until termination or breakpoint") { : "Continue" } }
a(href="/step/restart") { div(title="Abort execution and restart") { : "Restart" } }
a(href="/breakpoints/add_here") { div(title="Add breakpoint at current location") { : "Add breakpoint here"} }
a(href="/breakpoints/remove_all") { div(title="Remove all breakpoints") { : "Remove all breakpoints"} }
} else {
a(href="/") { div(title="Go to active stack frame") { : "Go back to active stack frame" } }
}
}
div(id="messages") {
p { : message }
}
div(id="mir") {
: Raw(mir_graph.unwrap_or_else(|| "no current function".to_string()))
}
}
div(id="right") {
div {
: format!("Step count: {}", pcx.step_count);
}
div(id="stack") {
table(border="1") {
@ for (i, &(ref s, ref span, ref def_id)) in stack.iter().enumerate().rev() {
tr {
@ if i == display_frame.unwrap_or(stack.len() - 1) { td { : Raw("→") } } else { td; }
td { : s }
td { : span }
td { : def_id }
@ if i == display_frame.unwrap_or(stack.len() - 1) { td; } else { td { a(href=format!("/frame/{}", i)) { : "View" } } }
}
}
}
}
div(id="breakpoints") {
: "Breakpoints: "; br;
table(border="1") {
@ for bp in rendered_breakpoints {
tr {
td { : &bp }
td { a(href=format!("/breakpoints/remove/{}", bp)) { : "remove" } }
}
}
}
}
div(id="locals") {
: Raw(rendered_locals)
}
div(id="source") {
: rendered_source
}
}
},
)
}
pub fn render_reverse_ptr(pcx: &PrirodaContext<'_, '_>, alloc_id: u64) -> Html<String> {
let allocs: Vec<_> = pcx.ecx.memory.alloc_map().iter(|values| {
values
.filter_map(|(&id, (_kind, alloc))| {
alloc
.relocations()
.values()
.find(|&&(_tag, reloc)| reloc == id)
.map(|_| id)
})
.collect()
});
template(
pcx,
format!("Allocations with pointers to Allocation {}", alloc_id),
horrorshow::html! {
@for id in allocs {
a(href=format!("/ptr/{}", id)) { : format!("Allocation {}", id) }
br;
}
},
)
}
pub fn render_ptr_memory(
pcx: &PrirodaContext<'_, '_>,
alloc_id: AllocId,
offset: u64,
) -> Html<String> {
let (mem, offset, rest) = if let Ok((_, mem, bytes)) = locals::print_ptr(
&pcx.ecx,
Pointer::new(alloc_id, Size::from_bytes(offset))
.with_tag(miri::Tag::Untagged)
.into(),
None,
) {
if bytes * 2 > offset {
(mem, offset, (bytes * 2 - offset - 1) as usize)
} else if bytes * 2 == 0 && offset == 0 {
(mem, 0, 0)
} else {
("out of bounds offset".to_string(), 0, 0)
}
} else {
("unknown memory".to_string(), 0, 0)
};
template(
pcx,
format!("Allocation {}", alloc_id),
horrorshow::html! {
span(style="font-family: monospace") {
: format!("{nil:.<offset$}┌{nil:─<rest$}", nil = "", offset = offset as usize, rest = rest)
}
br;
span(style="font-family: monospace") { : Raw(mem) }
br;
a(href=format!("/reverse_ptr/{}", alloc_id)) { : "List allocations with pointers into this allocation" }
},
)
}
pub struct FlashString(String);
impl<'a, 'r> ::rocket::request::FromRequest<'a, 'r> for FlashString {
type Error = !;
fn from_request(request: &'a rocket::Request<'r>) -> rocket::request::Outcome<Self, !> {
rocket::Outcome::Success(FlashString(
Option::<rocket::request::FlashMessage<'_, '_>>::from_request(request)?
.map(|flash| flash.msg().to_string())
.unwrap_or_else(String::new),
))
}
}
pub mod routes {
use super::*;
use crate::*;
pub fn routes() -> Vec<::rocket::Route> {
routes![index, frame, frame_invalid, ptr, reverse_ptr]
}
#[get("/")]
pub fn index(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, None, flash.0))
}
#[get("/frame/<frame>")]
pub fn frame(
sender: rocket::State<'_, crate::PrirodaSender>,
flash: FlashString,
frame: usize,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_main_window(pcx, Some(frame), flash.0))
}
#[get("/frame/<frame>", rank = 42)] // Error handler
fn frame_invalid(frame: String) -> BadRequest<String> {
BadRequest(Some(format!(
"not a number: {:?}",
frame.parse::<usize>().unwrap_err()
)))
}
#[get("/ptr/<alloc_id>/<offset>")]
pub fn ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
alloc_id: u64,
offset: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_ptr_memory(pcx, AllocId(alloc_id), offset))
}
#[get("/reverse_ptr/<ptr>")]
fn reverse_ptr(
sender: rocket::State<'_, crate::PrirodaSender>,
ptr: u64,
) -> crate::RResult<Html<String>> {
sender.do_work(move |pcx| render::render_reverse_ptr(pcx, ptr))
}
}
| {
let mut buf = String::new();
(horrorshow::html! {
html {
head {
title { : title }
meta(charset = "UTF-8") {}
script(src="/resources/svg-pan-zoom.js") {}
script(src="/resources/zoom_mir.js") {}
: Raw(refresh_script(pcx))
}
body(onload="enable_mir_mousewheel()") {
link(rel="stylesheet", href="/resources/positioning.css");
link(rel="stylesheet", href=format!("/resources/style-{}.css", pcx.config.theme));
: t
}
}
})
.write_to_string(&mut buf)
.unwrap();
Html(buf)
} | identifier_body |
window_manager.rs | use crate::geometry::{Displacement, Point};
use crate::surface::{Surface, SurfaceExt};
use crate::{
event::{Event, EventOnce},
input::seat::SeatManager,
output_manager::OutputManager,
window::Window,
window_management_policy::WmPolicyManager,
};
use log::{trace, warn};
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
#[derive(Debug, Copy, Clone)]
pub enum WindowLayer {
Background,
Bottom,
Normal,
Top,
Overlay,
}
#[derive(Default)]
struct WindowLayers {
background: Vec<Rc<Window>>,
bottom: Vec<Rc<Window>>,
normal: Vec<Rc<Window>>,
top: Vec<Rc<Window>>,
overlay: Vec<Rc<Window>>,
}
impl WindowLayers {
fn all_windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
self
.background
.iter()
.chain(self.bottom.iter())
.chain(self.normal.iter())
.chain(self.top.iter())
.chain(self.overlay.iter())
.cloned()
}
fn update<F>(&mut self, layer: WindowLayer, mut f: F)
where
F: FnMut(&mut Vec<Rc<Window>>),
{
match layer {
WindowLayer::Background => f(&mut self.background),
WindowLayer::Bottom => f(&mut self.bottom),
WindowLayer::Normal => f(&mut self.normal),
WindowLayer::Top => f(&mut self.top),
WindowLayer::Overlay => f(&mut self.overlay),
}
}
}
pub struct WindowManager {
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
output_manager: RefCell<Weak<OutputManager>>,
layers: RefCell<WindowLayers>,
foreign_toplevel_manager: *mut wlr_foreign_toplevel_manager_v1,
}
impl std::fmt::Debug for WindowManager {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(
fmt,
"WindowManager {{windows: {0}}}",
self.layers.borrow().normal.len()
)
}
}
impl WindowManager {
pub(crate) fn init(
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
display: *mut wl_display,
) -> WindowManager {
let foreign_toplevel_manager = unsafe { wlr_foreign_toplevel_manager_v1_create(display) };
WindowManager {
wm_policy_manager,
seat_manager,
output_manager: RefCell::new(Weak::<OutputManager>::new()),
layers: RefCell::new(WindowLayers::default()),
foreign_toplevel_manager,
}
}
pub fn raw_foreign_toplevel_manager(&self) -> *mut wlr_foreign_toplevel_manager_v1 {
self.foreign_toplevel_manager
}
pub fn windows_to_render(&self) -> impl '_ + Iterator<Item = Rc<Window>> {
self.windows().filter(|window| *window.mapped.borrow())
}
pub fn window_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.extents().contains(point))
}
pub(crate) fn window_buffer_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.buffer_extents().contains(point))
}
pub(crate) fn destroy_window(&self, destroyed_window: Rc<Window>) {
self
.layers
.borrow_mut()
.update(destroyed_window.layer, |windows| {
windows.retain(|window| *window != destroyed_window)
});
}
pub fn windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
let windows = self.layers.borrow().all_windows().collect::<Vec<_>>();
windows.into_iter()
}
/// Returns the window that holds keyboard focus
pub fn focused_window(&self) -> Option<Rc<Window>> {
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
self
.layers
.borrow()
.all_windows()
.find(|w| w.wlr_surface() == focused_surface)
}
/// If the window have keyboard focus
pub fn window_has_focus(&self, window: &Window) -> bool {
let wlr_surface = window.wlr_surface();
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
wlr_surface == focused_surface
}
/// Gives keyboard focus to the window
pub fn focus_window(&self, window: Rc<Window>) {
if !window.can_receive_focus() {
warn!("Window can not receive focus");
return;
}
if !self.seat_manager.is_input_allowed(&window) {
warn!("Refusing to set focus, input is inhibited");
return;
}
let wlr_surface = window.wlr_surface();
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if wlr_surface == old_wlr_surface {
return;
}
trace!("Focusing window \"{:?}\"", window.title());
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
// Move the view to the front
self.layers.borrow_mut().update(window.layer, |windows| {
windows.retain(|s| *s != window);
windows.push(window.clone());
});
// Activate the new window
window.surface().set_activated(true);
// Tell the seat to have the keyboard enter this window. wlroots will keep
// track of this and automatically send key events to the appropriate
// clients without additional work on your part.
let keyboard = wlr_seat_get_keyboard(self.seat_manager.raw_seat());
wlr_seat_keyboard_notify_enter(
self.seat_manager.raw_seat(),
wlr_surface,
(*keyboard).keycodes.as_mut_ptr(),
(*keyboard).num_keycodes,
&mut (*keyboard).modifiers,
);
}
self.wm_policy_manager.advise_focused_window(window);
}
/// Blurs the currently focused window without focusing another one
pub fn blur(&self) {
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat()) | // it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
wlr_seat_keyboard_clear_focus(self.seat_manager.raw_seat());
}
}
}
pub(crate) trait WindowManagerExt {
fn set_output_manager(&self, output_manager: Rc<OutputManager>);
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window>;
}
impl WindowManagerExt for Rc<WindowManager> {
fn set_output_manager(&self, output_manager: Rc<OutputManager>) {
*self.output_manager.borrow_mut() = Rc::downgrade(&output_manager);
let window_manager = self.clone();
output_manager
.on_output_layout_change()
.subscribe(Box::new(move |_| {
for window in window_manager.layers.borrow().all_windows() {
window.update_outputs();
}
}));
}
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window> {
let window = Rc::new(Window {
output_manager: self.output_manager.borrow().upgrade().expect("window_manager should be initialized with and output_manager before windows can be created"),
window_manager: self.clone(),
layer,
surface,
mapped: RefCell::new(false),
top_left: RefCell::new(Point::ZERO),
translate: RefCell::new(Displacement::ZERO),
outputs: RefCell::new(vec![]),
minimize_targets: RefCell::new(vec![]),
pending_updates: RefCell::new(BTreeMap::new()),
on_entered_output: Event::default(),
on_left_output: Event::default(),
on_destroy: EventOnce::default(),
event_manager: RefCell::new(None),
});
// If the window can receive focus, add it to the back so that
// the window management policy can choose if it want to focus the
// window
if window.can_receive_focus() {
self.layers.borrow_mut().update(layer, |windows| {
windows.insert(0, window.clone());
})
} else {
self.layers.borrow_mut().update(layer, |windows| {
windows.push(window.clone());
})
}
window
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::{cursor::CursorManager, event_filter::EventFilterManager};
use crate::output_manager::OutputManager;
use crate::window::WindowEventHandler;
use crate::{config::ConfigManager, window_management_policy::WmPolicyManager};
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let wm_policy_manager = Rc::new(WmPolicyManager::new());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let window_manager = Rc::new(WindowManager::init(
wm_policy_manager.clone(),
seat_manager.clone(),
ptr::null_mut(),
));
let output_manager = OutputManager::mock(
config_manager,
wm_policy_manager.clone(),
window_manager.clone(),
);
let cursor_manager = CursorManager::mock(
output_manager.clone(),
window_manager.clone(),
seat_manager.clone(),
Rc::new(EventFilterManager::new()),
ptr::null_mut(),
ptr::null_mut(),
);
window_manager.set_output_manager(output_manager.clone());
let window = window_manager.new_window(WindowLayer::Normal, Surface::Null);
let mut event_handler = WindowEventHandler {
wm_policy_manager,
output_manager: output_manager.clone(),
window_manager: window_manager.clone(),
cursor_manager: cursor_manager.clone(),
window: Rc::downgrade(&window),
foreign_toplevel_handle: None,
foreign_toplevel_event_manager: None,
};
let weak_window = Rc::downgrade(&window);
drop(window);
assert!(window_manager.windows().count() == 1);
assert!(weak_window.upgrade().is_some());
event_handler.destroy();
assert!(window_manager.windows().count() == 0);
assert!(weak_window.upgrade().is_none());
}
}
#[cfg(test)]
unsafe fn wlr_foreign_toplevel_manager_v1_create(
_display: *mut wl_display,
) -> *mut wlr_foreign_toplevel_manager_v1 {
std::ptr::null_mut()
} | .keyboard_state
.focused_surface;
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know | random_line_split |
window_manager.rs | use crate::geometry::{Displacement, Point};
use crate::surface::{Surface, SurfaceExt};
use crate::{
event::{Event, EventOnce},
input::seat::SeatManager,
output_manager::OutputManager,
window::Window,
window_management_policy::WmPolicyManager,
};
use log::{trace, warn};
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
#[derive(Debug, Copy, Clone)]
pub enum WindowLayer {
Background,
Bottom,
Normal,
Top,
Overlay,
}
#[derive(Default)]
struct WindowLayers {
background: Vec<Rc<Window>>,
bottom: Vec<Rc<Window>>,
normal: Vec<Rc<Window>>,
top: Vec<Rc<Window>>,
overlay: Vec<Rc<Window>>,
}
impl WindowLayers {
fn all_windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
self
.background
.iter()
.chain(self.bottom.iter())
.chain(self.normal.iter())
.chain(self.top.iter())
.chain(self.overlay.iter())
.cloned()
}
fn update<F>(&mut self, layer: WindowLayer, mut f: F)
where
F: FnMut(&mut Vec<Rc<Window>>),
{
match layer {
WindowLayer::Background => f(&mut self.background),
WindowLayer::Bottom => f(&mut self.bottom),
WindowLayer::Normal => f(&mut self.normal),
WindowLayer::Top => f(&mut self.top),
WindowLayer::Overlay => f(&mut self.overlay),
}
}
}
pub struct WindowManager {
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
output_manager: RefCell<Weak<OutputManager>>,
layers: RefCell<WindowLayers>,
foreign_toplevel_manager: *mut wlr_foreign_toplevel_manager_v1,
}
impl std::fmt::Debug for WindowManager {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(
fmt,
"WindowManager {{windows: {0}}}",
self.layers.borrow().normal.len()
)
}
}
impl WindowManager {
pub(crate) fn init(
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
display: *mut wl_display,
) -> WindowManager {
let foreign_toplevel_manager = unsafe { wlr_foreign_toplevel_manager_v1_create(display) };
WindowManager {
wm_policy_manager,
seat_manager,
output_manager: RefCell::new(Weak::<OutputManager>::new()),
layers: RefCell::new(WindowLayers::default()),
foreign_toplevel_manager,
}
}
pub fn raw_foreign_toplevel_manager(&self) -> *mut wlr_foreign_toplevel_manager_v1 {
self.foreign_toplevel_manager
}
pub fn windows_to_render(&self) -> impl '_ + Iterator<Item = Rc<Window>> {
self.windows().filter(|window| *window.mapped.borrow())
}
pub fn window_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.extents().contains(point))
}
pub(crate) fn window_buffer_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.buffer_extents().contains(point))
}
pub(crate) fn destroy_window(&self, destroyed_window: Rc<Window>) {
self
.layers
.borrow_mut()
.update(destroyed_window.layer, |windows| {
windows.retain(|window| *window != destroyed_window)
});
}
pub fn windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
let windows = self.layers.borrow().all_windows().collect::<Vec<_>>();
windows.into_iter()
}
/// Returns the window that holds keyboard focus
pub fn focused_window(&self) -> Option<Rc<Window>> {
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
self
.layers
.borrow()
.all_windows()
.find(|w| w.wlr_surface() == focused_surface)
}
/// If the window have keyboard focus
pub fn window_has_focus(&self, window: &Window) -> bool {
let wlr_surface = window.wlr_surface();
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
wlr_surface == focused_surface
}
/// Gives keyboard focus to the window
pub fn focus_window(&self, window: Rc<Window>) {
if !window.can_receive_focus() {
warn!("Window can not receive focus");
return;
}
if !self.seat_manager.is_input_allowed(&window) {
warn!("Refusing to set focus, input is inhibited");
return;
}
let wlr_surface = window.wlr_surface();
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if wlr_surface == old_wlr_surface {
return;
}
trace!("Focusing window \"{:?}\"", window.title());
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
// Move the view to the front
self.layers.borrow_mut().update(window.layer, |windows| {
windows.retain(|s| *s != window);
windows.push(window.clone());
});
// Activate the new window
window.surface().set_activated(true);
// Tell the seat to have the keyboard enter this window. wlroots will keep
// track of this and automatically send key events to the appropriate
// clients without additional work on your part.
let keyboard = wlr_seat_get_keyboard(self.seat_manager.raw_seat());
wlr_seat_keyboard_notify_enter(
self.seat_manager.raw_seat(),
wlr_surface,
(*keyboard).keycodes.as_mut_ptr(),
(*keyboard).num_keycodes,
&mut (*keyboard).modifiers,
);
}
self.wm_policy_manager.advise_focused_window(window);
}
/// Blurs the currently focused window without focusing another one
pub fn blur(&self) {
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
wlr_seat_keyboard_clear_focus(self.seat_manager.raw_seat());
}
}
}
pub(crate) trait WindowManagerExt {
fn set_output_manager(&self, output_manager: Rc<OutputManager>);
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window>;
}
impl WindowManagerExt for Rc<WindowManager> {
fn set_output_manager(&self, output_manager: Rc<OutputManager>) {
*self.output_manager.borrow_mut() = Rc::downgrade(&output_manager);
let window_manager = self.clone();
output_manager
.on_output_layout_change()
.subscribe(Box::new(move |_| {
for window in window_manager.layers.borrow().all_windows() {
window.update_outputs();
}
}));
}
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window> {
let window = Rc::new(Window {
output_manager: self.output_manager.borrow().upgrade().expect("window_manager should be initialized with and output_manager before windows can be created"),
window_manager: self.clone(),
layer,
surface,
mapped: RefCell::new(false),
top_left: RefCell::new(Point::ZERO),
translate: RefCell::new(Displacement::ZERO),
outputs: RefCell::new(vec![]),
minimize_targets: RefCell::new(vec![]),
pending_updates: RefCell::new(BTreeMap::new()),
on_entered_output: Event::default(),
on_left_output: Event::default(),
on_destroy: EventOnce::default(),
event_manager: RefCell::new(None),
});
// If the window can receive focus, add it to the back so that
// the window management policy can choose if it want to focus the
// window
if window.can_receive_focus() | else {
self.layers.borrow_mut().update(layer, |windows| {
windows.push(window.clone());
})
}
window
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::{cursor::CursorManager, event_filter::EventFilterManager};
use crate::output_manager::OutputManager;
use crate::window::WindowEventHandler;
use crate::{config::ConfigManager, window_management_policy::WmPolicyManager};
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let wm_policy_manager = Rc::new(WmPolicyManager::new());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let window_manager = Rc::new(WindowManager::init(
wm_policy_manager.clone(),
seat_manager.clone(),
ptr::null_mut(),
));
let output_manager = OutputManager::mock(
config_manager,
wm_policy_manager.clone(),
window_manager.clone(),
);
let cursor_manager = CursorManager::mock(
output_manager.clone(),
window_manager.clone(),
seat_manager.clone(),
Rc::new(EventFilterManager::new()),
ptr::null_mut(),
ptr::null_mut(),
);
window_manager.set_output_manager(output_manager.clone());
let window = window_manager.new_window(WindowLayer::Normal, Surface::Null);
let mut event_handler = WindowEventHandler {
wm_policy_manager,
output_manager: output_manager.clone(),
window_manager: window_manager.clone(),
cursor_manager: cursor_manager.clone(),
window: Rc::downgrade(&window),
foreign_toplevel_handle: None,
foreign_toplevel_event_manager: None,
};
let weak_window = Rc::downgrade(&window);
drop(window);
assert!(window_manager.windows().count() == 1);
assert!(weak_window.upgrade().is_some());
event_handler.destroy();
assert!(window_manager.windows().count() == 0);
assert!(weak_window.upgrade().is_none());
}
}
#[cfg(test)]
unsafe fn wlr_foreign_toplevel_manager_v1_create(
_display: *mut wl_display,
) -> *mut wlr_foreign_toplevel_manager_v1 {
std::ptr::null_mut()
}
| {
self.layers.borrow_mut().update(layer, |windows| {
windows.insert(0, window.clone());
})
} | conditional_block |
window_manager.rs | use crate::geometry::{Displacement, Point};
use crate::surface::{Surface, SurfaceExt};
use crate::{
event::{Event, EventOnce},
input::seat::SeatManager,
output_manager::OutputManager,
window::Window,
window_management_policy::WmPolicyManager,
};
use log::{trace, warn};
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::{Rc, Weak};
use wlroots_sys::*;
#[derive(Debug, Copy, Clone)]
pub enum WindowLayer {
Background,
Bottom,
Normal,
Top,
Overlay,
}
#[derive(Default)]
struct WindowLayers {
background: Vec<Rc<Window>>,
bottom: Vec<Rc<Window>>,
normal: Vec<Rc<Window>>,
top: Vec<Rc<Window>>,
overlay: Vec<Rc<Window>>,
}
impl WindowLayers {
fn all_windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
self
.background
.iter()
.chain(self.bottom.iter())
.chain(self.normal.iter())
.chain(self.top.iter())
.chain(self.overlay.iter())
.cloned()
}
fn update<F>(&mut self, layer: WindowLayer, mut f: F)
where
F: FnMut(&mut Vec<Rc<Window>>),
{
match layer {
WindowLayer::Background => f(&mut self.background),
WindowLayer::Bottom => f(&mut self.bottom),
WindowLayer::Normal => f(&mut self.normal),
WindowLayer::Top => f(&mut self.top),
WindowLayer::Overlay => f(&mut self.overlay),
}
}
}
pub struct WindowManager {
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
output_manager: RefCell<Weak<OutputManager>>,
layers: RefCell<WindowLayers>,
foreign_toplevel_manager: *mut wlr_foreign_toplevel_manager_v1,
}
impl std::fmt::Debug for WindowManager {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
write!(
fmt,
"WindowManager {{windows: {0}}}",
self.layers.borrow().normal.len()
)
}
}
impl WindowManager {
pub(crate) fn init(
wm_policy_manager: Rc<WmPolicyManager>,
seat_manager: Rc<SeatManager>,
display: *mut wl_display,
) -> WindowManager {
let foreign_toplevel_manager = unsafe { wlr_foreign_toplevel_manager_v1_create(display) };
WindowManager {
wm_policy_manager,
seat_manager,
output_manager: RefCell::new(Weak::<OutputManager>::new()),
layers: RefCell::new(WindowLayers::default()),
foreign_toplevel_manager,
}
}
pub fn raw_foreign_toplevel_manager(&self) -> *mut wlr_foreign_toplevel_manager_v1 {
self.foreign_toplevel_manager
}
pub fn | (&self) -> impl '_ + Iterator<Item = Rc<Window>> {
self.windows().filter(|window| *window.mapped.borrow())
}
pub fn window_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.extents().contains(point))
}
pub(crate) fn window_buffer_at(&self, point: &Point) -> Option<Rc<Window>> {
self
.layers
.borrow()
.all_windows()
// Reverse as windows is from back to front
.rev()
.find(|window| window.buffer_extents().contains(point))
}
pub(crate) fn destroy_window(&self, destroyed_window: Rc<Window>) {
self
.layers
.borrow_mut()
.update(destroyed_window.layer, |windows| {
windows.retain(|window| *window != destroyed_window)
});
}
pub fn windows(&self) -> impl '_ + DoubleEndedIterator<Item = Rc<Window>> {
let windows = self.layers.borrow().all_windows().collect::<Vec<_>>();
windows.into_iter()
}
/// Returns the window that holds keyboard focus
pub fn focused_window(&self) -> Option<Rc<Window>> {
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
self
.layers
.borrow()
.all_windows()
.find(|w| w.wlr_surface() == focused_surface)
}
/// If the window have keyboard focus
pub fn window_has_focus(&self, window: &Window) -> bool {
let wlr_surface = window.wlr_surface();
let focused_surface = unsafe {
(*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface
};
wlr_surface == focused_surface
}
/// Gives keyboard focus to the window
pub fn focus_window(&self, window: Rc<Window>) {
if !window.can_receive_focus() {
warn!("Window can not receive focus");
return;
}
if !self.seat_manager.is_input_allowed(&window) {
warn!("Refusing to set focus, input is inhibited");
return;
}
let wlr_surface = window.wlr_surface();
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if wlr_surface == old_wlr_surface {
return;
}
trace!("Focusing window \"{:?}\"", window.title());
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
// Move the view to the front
self.layers.borrow_mut().update(window.layer, |windows| {
windows.retain(|s| *s != window);
windows.push(window.clone());
});
// Activate the new window
window.surface().set_activated(true);
// Tell the seat to have the keyboard enter this window. wlroots will keep
// track of this and automatically send key events to the appropriate
// clients without additional work on your part.
let keyboard = wlr_seat_get_keyboard(self.seat_manager.raw_seat());
wlr_seat_keyboard_notify_enter(
self.seat_manager.raw_seat(),
wlr_surface,
(*keyboard).keycodes.as_mut_ptr(),
(*keyboard).num_keycodes,
&mut (*keyboard).modifiers,
);
}
self.wm_policy_manager.advise_focused_window(window);
}
/// Blurs the currently focused window without focusing another one
pub fn blur(&self) {
unsafe {
let old_wlr_surface = (*self.seat_manager.raw_seat())
.keyboard_state
.focused_surface;
if !old_wlr_surface.is_null() {
// Deactivate the previously focused window. This lets the client know
// it no longer has focus and the client will repaint accordingly, e.g.
// stop displaying a caret.
let surface = Surface::from_wlr_surface(old_wlr_surface);
surface.set_activated(false);
}
wlr_seat_keyboard_clear_focus(self.seat_manager.raw_seat());
}
}
}
pub(crate) trait WindowManagerExt {
fn set_output_manager(&self, output_manager: Rc<OutputManager>);
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window>;
}
impl WindowManagerExt for Rc<WindowManager> {
fn set_output_manager(&self, output_manager: Rc<OutputManager>) {
*self.output_manager.borrow_mut() = Rc::downgrade(&output_manager);
let window_manager = self.clone();
output_manager
.on_output_layout_change()
.subscribe(Box::new(move |_| {
for window in window_manager.layers.borrow().all_windows() {
window.update_outputs();
}
}));
}
fn new_window(&self, layer: WindowLayer, surface: Surface) -> Rc<Window> {
let window = Rc::new(Window {
output_manager: self.output_manager.borrow().upgrade().expect("window_manager should be initialized with and output_manager before windows can be created"),
window_manager: self.clone(),
layer,
surface,
mapped: RefCell::new(false),
top_left: RefCell::new(Point::ZERO),
translate: RefCell::new(Displacement::ZERO),
outputs: RefCell::new(vec![]),
minimize_targets: RefCell::new(vec![]),
pending_updates: RefCell::new(BTreeMap::new()),
on_entered_output: Event::default(),
on_left_output: Event::default(),
on_destroy: EventOnce::default(),
event_manager: RefCell::new(None),
});
// If the window can receive focus, add it to the back so that
// the window management policy can choose if it want to focus the
// window
if window.can_receive_focus() {
self.layers.borrow_mut().update(layer, |windows| {
windows.insert(0, window.clone());
})
} else {
self.layers.borrow_mut().update(layer, |windows| {
windows.push(window.clone());
})
}
window
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::input::{cursor::CursorManager, event_filter::EventFilterManager};
use crate::output_manager::OutputManager;
use crate::window::WindowEventHandler;
use crate::{config::ConfigManager, window_management_policy::WmPolicyManager};
use std::ptr;
use std::rc::Rc;
#[test]
fn it_drops_and_cleans_up_on_destroy() {
let config_manager = Rc::new(ConfigManager::default());
let wm_policy_manager = Rc::new(WmPolicyManager::new());
let seat_manager = SeatManager::mock(ptr::null_mut(), ptr::null_mut());
let window_manager = Rc::new(WindowManager::init(
wm_policy_manager.clone(),
seat_manager.clone(),
ptr::null_mut(),
));
let output_manager = OutputManager::mock(
config_manager,
wm_policy_manager.clone(),
window_manager.clone(),
);
let cursor_manager = CursorManager::mock(
output_manager.clone(),
window_manager.clone(),
seat_manager.clone(),
Rc::new(EventFilterManager::new()),
ptr::null_mut(),
ptr::null_mut(),
);
window_manager.set_output_manager(output_manager.clone());
let window = window_manager.new_window(WindowLayer::Normal, Surface::Null);
let mut event_handler = WindowEventHandler {
wm_policy_manager,
output_manager: output_manager.clone(),
window_manager: window_manager.clone(),
cursor_manager: cursor_manager.clone(),
window: Rc::downgrade(&window),
foreign_toplevel_handle: None,
foreign_toplevel_event_manager: None,
};
let weak_window = Rc::downgrade(&window);
drop(window);
assert!(window_manager.windows().count() == 1);
assert!(weak_window.upgrade().is_some());
event_handler.destroy();
assert!(window_manager.windows().count() == 0);
assert!(weak_window.upgrade().is_none());
}
}
#[cfg(test)]
unsafe fn wlr_foreign_toplevel_manager_v1_create(
_display: *mut wl_display,
) -> *mut wlr_foreign_toplevel_manager_v1 {
std::ptr::null_mut()
}
| windows_to_render | identifier_name |
plugin.go | package scheduler
import (
"fmt"
"sync"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/envvars"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
plugin "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/mesos"
"github.com/mesosphere/kubernetes-mesos/pkg/queue"
"gopkg.in/v2/yaml"
)
const (
enqueuePopTimeout = 200 * time.Millisecond
enqueueWaitTimeout = 3 * time.Second
yieldPopTimeout = 200 * time.Millisecond
yieldWaitTimeout = 3 * time.Second
)
// scheduler abstraction to allow for easier unit testing
type SchedulerInterface interface {
sync.Locker
RLocker() sync.Locker
SlaveIndex
algorithm() PodScheduleFunc
createPodTask(api.Context, *api.Pod) (*PodTask, error)
getTask(taskId string) (task *PodTask, currentState stateType)
offers() OfferRegistry
registerPodTask(*PodTask, error) (*PodTask, error)
taskForPod(podID string) (taskID string, ok bool)
unregisterPodTask(*PodTask)
killTask(taskId string) error
launchTask(*PodTask) error
}
type k8smScheduler struct {
*KubernetesScheduler
}
func (k *k8smScheduler) algorithm() PodScheduleFunc {
return k.KubernetesScheduler.scheduleFunc
}
func (k *k8smScheduler) offers() OfferRegistry {
return k.KubernetesScheduler.offers
}
func (k *k8smScheduler) taskForPod(podID string) (taskID string, ok bool) {
// assume caller is holding scheduler lock
taskID, ok = k.podToTask[podID]
return
}
func (k *k8smScheduler) createPodTask(ctx api.Context, pod *api.Pod) (*PodTask, error) {
return newPodTask(ctx, pod, k.executor)
}
func (k *k8smScheduler) registerPodTask(task *PodTask, err error) (*PodTask, error) {
if err == nil {
// assume caller is holding scheduler lock
k.podToTask[task.podKey] = task.ID
k.pendingTasks[task.ID] = task
}
return task, err
}
func (k *k8smScheduler) slaveFor(id string) (slave *Slave, ok bool) {
slave, ok = k.slaves[id]
return
}
func (k *k8smScheduler) unregisterPodTask(task *PodTask) {
// assume caller is holding scheduler lock
delete(k.podToTask, task.podKey)
delete(k.pendingTasks, task.ID)
}
func (k *k8smScheduler) killTask(taskId string) error {
// assume caller is holding scheduler lock
killTaskId := newTaskID(taskId)
return k.KubernetesScheduler.driver.KillTask(killTaskId)
}
func (k *k8smScheduler) launchTask(task *PodTask) error {
// assume caller is holding scheduler lock
taskList := []*mesos.TaskInfo{task.TaskInfo}
return k.KubernetesScheduler.driver.LaunchTasks(task.Offer.Details().Id, taskList, nil)
}
type binder struct {
api SchedulerInterface
client *client.Client
}
// implements binding.Registry, launches the pod-associated-task in mesos
func (b *binder) Bind(binding *api.Binding) error {
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, binding.PodID)
if err != nil {
return err
}
b.api.Lock()
defer b.api.Unlock()
taskId, exists := b.api.taskForPod(podKey)
if !exists {
log.Infof("Could not resolve pod %s to task id", podKey)
return noSuchPodErr
}
switch task, state := b.api.getTask(taskId); state {
case statePending:
return b.bind(ctx, binding, task)
default:
// in this case it's likely that the pod has been deleted between Schedule
// and Bind calls
log.Infof("No pending task for pod %s", podKey)
return noSuchPodErr
}
}
// assumes that: caller has acquired scheduler lock and that the PodTask is still pending
func (b *binder) bind(ctx api.Context, binding *api.Binding, task *PodTask) (err error) {
// sanity check: ensure that the task hasAcceptedOffer(), it's possible that between
// Schedule() and now that the offer for this task was rescinded or invalidated.
// ((we should never see this here))
if !task.hasAcceptedOffer() {
return fmt.Errorf("task has not accepted a valid offer %v", task.ID)
}
// By this time, there is a chance that the slave is disconnected.
offerId := task.GetOfferId()
if offer, ok := b.api.offers().Get(offerId); !ok || offer.HasExpired() {
// already rescinded or timed out or otherwise invalidated
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("failed prior to launchTask due to expired offer for task %v", task.ID)
}
if err = b.prepareTaskForLaunch(ctx, binding.Host, task); err == nil {
log.V(2).Infof("Attempting to bind %v to %v", binding.PodID, binding.Host)
if err = b.client.Post().Namespace(api.Namespace(ctx)).Resource("bindings").Body(binding).Do().Error(); err == nil {
log.V(2).Infof("launching task : %v", task)
if err = b.api.launchTask(task); err == nil {
b.api.offers().Invalidate(offerId)
task.Pod.Status.Host = binding.Host
task.launched = true
return
}
}
}
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("Failed to launch task %v: %v", task.ID, err)
}
func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *PodTask) error {
pod, err := b.client.Pods(api.Namespace(ctx)).Get(task.Pod.Name)
if err != nil {
return err
}
//HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
envVars, err := b.getServiceEnvironmentVariables(ctx)
if err != nil {
return err
}
boundPod := &api.BoundPod{}
if err := api.Scheme.Convert(pod, boundPod); err != nil {
return err
}
for ix, container := range boundPod.Spec.Containers {
boundPod.Spec.Containers[ix].Env = append(container.Env, envVars...)
}
// Make a dummy self link so that references to this bound pod will work.
boundPod.SelfLink = "/api/v1beta1/boundPods/" + boundPod.Name
// update the boundPod here to pick up things like environment variables that
// pod containers will use for service discovery. the kubelet-executor uses this
// boundPod to instantiate the pods and this is the last update we make before
// firing up the pod.
task.TaskInfo.Data, err = yaml.Marshal(&boundPod)
if err != nil {
log.V(2).Infof("Failed to marshal the updated boundPod")
return err
}
return nil
}
// getServiceEnvironmentVariables populates a list of environment variables that are use
// in the container environment to get access to services.
// HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
func (b *binder) getServiceEnvironmentVariables(ctx api.Context) (result []api.EnvVar, err error) {
var services *api.ServiceList
if services, err = b.client.Services(api.Namespace(ctx)).List(labels.Everything()); err == nil {
result = envvars.FromServices(services)
}
return
}
type kubeScheduler struct {
api SchedulerInterface
podStore queue.FIFO
}
// Schedule implements the Scheduler interface of the Kubernetes.
// It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod api.Pod, unused algorithm.MinionLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return "", err
}
k.api.Lock()
defer k.api.Unlock()
if taskID, ok := k.api.taskForPod(podKey); !ok {
// There's a bit of a potential race here, a pod could have been yielded() but
// and then before we get *here* it could be deleted. We use meta to index the pod
// in the store since that's what k8s client/cache/reflector does.
meta, err := meta.Accessor(&pod)
if err != nil {
log.Warningf("aborting Schedule, unable to understand pod object %+v", &pod)
return "", noSuchPodErr
}
if deleted := k.podStore.Poll(meta.Name(), queue.DELETE_EVENT); deleted {
// avoid scheduling a pod that's been deleted between yieldPod() and Schedule()
log.Infof("aborting Schedule, pod has been deleted %+v", &pod)
return "", noSuchPodErr
}
return k.doSchedule(k.api.registerPodTask(k.api.createPodTask(ctx, &pod)))
} else {
switch task, state := k.api.getTask(taskID); state {
case statePending:
if task.launched {
return "", fmt.Errorf("task %s has already been launched, aborting schedule", taskID)
} else {
return k.doSchedule(task, nil)
}
default:
return "", fmt.Errorf("task %s is not pending, nothing to schedule", taskID)
}
}
}
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *kubeScheduler) doSchedule(task *PodTask, err error) (string, error) {
var offer PerishableOffer
if err == nil {
offer, err = k.api.algorithm()(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
slaveId := offer.Details().GetSlaveId().GetValue()
if slave, ok := k.api.slaveFor(slaveId); !ok {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(offer.Details().Id.GetValue())
task.ClearTaskInfo()
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
} else {
task.FillTaskInfo(offer)
return slave.HostName, nil
}
}
type queuer struct {
lock sync.Mutex // shared by condition variables of this struct
podStore queue.FIFO // cache of pod updates to be processed
podQueue *queue.DelayFIFO // queue of pods to be scheduled
deltaCond sync.Cond // pod changes are available for processing
unscheduledCond sync.Cond // there are unscheduled pods for processing
}
func newQueuer(store queue.FIFO) *queuer {
q := &queuer{
podQueue: queue.NewDelayFIFO(),
podStore: store,
}
q.deltaCond.L = &q.lock
q.unscheduledCond.L = &q.lock
return q
}
// signal that there are probably pod updates waiting to be processed
func (q *queuer) updatesAvailable() {
q.deltaCond.Broadcast()
}
// delete a pod from the to-be-scheduled queue
func (q *queuer) dequeue(id string) {
q.podQueue.Delete(id)
}
// re-add a pod to the to-be-scheduled queue, will not overwrite existing pod data (that
// may have already changed).
func (q *queuer) requeue(pod *Pod) {
// use KeepExisting in case the pod has already been updated (can happen if binding fails
// due to constraint voilations); we don't want to overwrite a newer entry with stale data.
q.podQueue.Add(pod, queue.KeepExisting)
q.unscheduledCond.Broadcast()
}
// spawns a go-routine to watch for unscheduled pods and queue them up
// for scheduling. returns immediately.
func (q *queuer) Run() {
go util.Forever(func() {
log.Info("Watching for newly created pods")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here for short intervals so that scheduling
// may proceed even if there have been no recent pod changes
p := q.podStore.Await(enqueuePopTimeout)
if p == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.deltaCond.Wait()
}()
// we've yielded the lock
select {
case <-time.After(enqueueWaitTimeout):
q.deltaCond.Broadcast() // abort Wait()
<-signalled // wait for lock re-acquisition
log.V(4).Infoln("timed out waiting for a pod update")
case <-signalled:
// we've acquired the lock and there may be
// changes for us to process now
}
continue
}
pod := p.(*Pod)
if pod.Status.Host != "" {
q.dequeue(pod.GetUID())
} else {
// use ReplaceExisting because we are always pushing the latest state
now := time.Now()
pod.deadline = &now
q.podQueue.Offer(pod, queue.ReplaceExisting)
q.unscheduledCond.Broadcast()
log.V(3).Infof("queued pod for scheduling: %v", pod.Pod.Name)
}
}
}, 1*time.Second)
}
// implementation of scheduling plugin's NextPod func; see k8s plugin/pkg/scheduler
func (q *queuer) yield() *api.Pod {
log.V(2).Info("attempting to yield a pod")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here to short intervals so that we don't block the
// enqueuer Run() routine for very long
kpod := q.podQueue.Await(yieldPopTimeout)
if kpod == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.unscheduledCond.Wait()
}()
// lock is yielded at this point and we're going to wait for either
// a timeout, or a signal that there's data
select {
case <-time.After(yieldWaitTimeout):
q.unscheduledCond.Broadcast() // abort Wait()
<-signalled // wait for the go-routine, and the lock
log.V(4).Infoln("timed out waiting for a pod to yield")
case <-signalled:
// we have acquired the lock, and there
// may be a pod for us to pop now
}
continue
}
pod := kpod.(*Pod).Pod
if meta, err := meta.Accessor(pod); err != nil {
log.Warningf("yield unable to understand pod object %+v, will skip", pod)
} else if !q.podStore.Poll(meta.Name(), queue.POP_EVENT) {
log.V(1).Infof("yield popped a transitioning pod, skipping: %+v", pod)
} else if pod.Status.Host != "" {
// should never happen if enqueuePods is filtering properly
log.Warningf("yield popped an already-scheduled pod, skipping: %+v", pod)
} else {
return pod
}
}
}
type errorHandler struct {
api SchedulerInterface
backoff *podBackoff
qr *queuer
}
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.gc()
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
taskId, exists := k.api.taskForPod(podKey)
if !exists {
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
}
switch task, state := k.api.getTask(taskId); state {
case statePending:
if task.launched {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
offersAvailable := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
offersAvailable = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
switch task, state := k.api.getTask(taskId); state {
case statePending:
return !task.launched && task.AcceptOffer(offer)
}
return false
}))
}
delay := k.backoff.getBackoff(podKey)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: offersAvailable})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
type deleter struct {
api SchedulerInterface
qr *queuer
}
// currently monitors for "pod deleted" events, upon which handle()
// is invoked.
func (k *deleter) Run(updates <-chan queue.Entry) {
go util.Forever(func() {
for {
entry := <-updates
pod := entry.Value().(*Pod)
if entry.Is(queue.DELETE_EVENT) {
if err := k.deleteOne(pod); err != nil {
log.Error(err)
}
} else if !entry.Is(queue.POP_EVENT) {
k.qr.updatesAvailable()
}
}
}, 1*time.Second)
}
func (k *deleter) deleteOne(pod *Pod) error {
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return err
}
log.V(2).Infof("pod deleted: %v", podKey)
// order is important here: we want to make sure we have the lock before
// removing the pod from the scheduling queue. this makes the concurrent
// execution of scheduler-error-handling and delete-handling easier to
// reason about.
k.api.Lock()
defer k.api.Unlock()
// prevent the scheduler from attempting to pop this; it's also possible that
// it's concurrently being scheduled (somewhere between pod scheduling and
// binding) - if so, then we'll end up removing it from pendingTasks which
// will abort Bind()ing
k.qr.dequeue(pod.GetUID())
taskId, exists := k.api.taskForPod(podKey)
if !exists {
log.V(2).Infof("Could not resolve pod '%s' to task id", podKey)
return noSuchPodErr
}
// determine if the task has already been launched to mesos, if not then
// cleanup is easier (unregister) since there's no state to sync
switch task, state := k.api.getTask(taskId); state {
case statePending:
if !task.launched {
// we've been invoked in between Schedule() and Bind()
if task.hasAcceptedOffer() {
task.Offer.Release()
task.ClearTaskInfo()
}
k.api.unregisterPodTask(task)
return nil
}
fallthrough
case stateRunning:
// signal to watchers that the related pod is going down
task.deleted = true
task.Pod.Status.Host = ""
return k.api.killTask(taskId)
default:
log.Warningf("cannot kill pod '%s': task not found %v", podKey, taskId)
return noSuchTaskErr
}
}
// Create creates a scheduler plugin and all supporting background functions.
func (k *KubernetesScheduler) NewPluginConfig() *plugin.Config {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, defaultUpdatesBacklog)
podStore := &podStoreAdapter{queue.NewHistorical(updates)}
cache.NewReflector(createAllPodsLW(k.client), &api.Pod{}, podStore).Run()
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{k}
q := newQueuer(podStore)
podDeleter := &deleter{
api: kapi,
qr: q,
}
podDeleter.Run(updates)
q.Run()
eh := &errorHandler{
api: kapi,
backoff: &podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: realClock{},
},
qr: q,
}
return &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podStore: podStore,
},
Binder: &binder{
api: kapi,
client: k.client,
},
NextPod: q.yield,
Error: eh.handleSchedulingError,
}
}
type listWatch struct {
client *client.Client
fieldSelector labels.Selector
resource string
}
func (lw *listWatch) List() (runtime.Object, error) {
return lw.client.
Get().
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Do().
Get()
}
func (lw *listWatch) Watch(resourceVersion string) (watch.Interface, error) {
return lw.client.
Get().
Prefix("watch").
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Param("resourceVersion", resourceVersion).
Watch()
}
// createAllPodsLW returns a listWatch that finds all pods
func createAllPodsLW(cl *client.Client) *listWatch {
return &listWatch{
client: cl,
fieldSelector: labels.Everything(),
resource: "pods",
}
}
// Consumes *api.Pod, produces *Pod; the k8s reflector wants to push *api.Pod
// objects at us, but we want to store more flexible (Pod) type defined in
// this package. The adapter implementation facilitates this. It's a little
// hackish since the object type going in is different than the object type
// coming out -- you've been warned.
type podStoreAdapter struct {
queue.FIFO
}
func (psa *podStoreAdapter) Add(id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Add(id, &Pod{Pod: pod})
}
func (psa *podStoreAdapter) Update(id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Update(id, &Pod{Pod: pod})
}
// Replace will delete the contents of the store, using instead the
// given map. This store implementation does NOT take ownership of the map.
func (psa *podStoreAdapter) Replace(idToObj map[string]interface{}) {
newmap := map[string]interface{}{}
for k, v := range idToObj |
psa.FIFO.Replace(newmap)
}
| {
pod := v.(*api.Pod)
newmap[k] = &Pod{Pod: pod}
} | conditional_block |
plugin.go | package scheduler
import (
"fmt"
"sync"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/envvars"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
plugin "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/mesos"
"github.com/mesosphere/kubernetes-mesos/pkg/queue"
"gopkg.in/v2/yaml"
)
const (
enqueuePopTimeout = 200 * time.Millisecond
enqueueWaitTimeout = 3 * time.Second
yieldPopTimeout = 200 * time.Millisecond
yieldWaitTimeout = 3 * time.Second
)
// scheduler abstraction to allow for easier unit testing
type SchedulerInterface interface {
sync.Locker
RLocker() sync.Locker
SlaveIndex
algorithm() PodScheduleFunc
createPodTask(api.Context, *api.Pod) (*PodTask, error)
getTask(taskId string) (task *PodTask, currentState stateType)
offers() OfferRegistry
registerPodTask(*PodTask, error) (*PodTask, error)
taskForPod(podID string) (taskID string, ok bool)
unregisterPodTask(*PodTask)
killTask(taskId string) error
launchTask(*PodTask) error
}
type k8smScheduler struct {
*KubernetesScheduler
}
func (k *k8smScheduler) algorithm() PodScheduleFunc {
return k.KubernetesScheduler.scheduleFunc
}
func (k *k8smScheduler) offers() OfferRegistry {
return k.KubernetesScheduler.offers
}
func (k *k8smScheduler) taskForPod(podID string) (taskID string, ok bool) {
// assume caller is holding scheduler lock
taskID, ok = k.podToTask[podID]
return
}
func (k *k8smScheduler) createPodTask(ctx api.Context, pod *api.Pod) (*PodTask, error) {
return newPodTask(ctx, pod, k.executor)
}
func (k *k8smScheduler) registerPodTask(task *PodTask, err error) (*PodTask, error) {
if err == nil {
// assume caller is holding scheduler lock
k.podToTask[task.podKey] = task.ID
k.pendingTasks[task.ID] = task
}
return task, err
}
func (k *k8smScheduler) slaveFor(id string) (slave *Slave, ok bool) {
slave, ok = k.slaves[id]
return
}
func (k *k8smScheduler) unregisterPodTask(task *PodTask) {
// assume caller is holding scheduler lock
delete(k.podToTask, task.podKey)
delete(k.pendingTasks, task.ID)
}
func (k *k8smScheduler) killTask(taskId string) error {
// assume caller is holding scheduler lock
killTaskId := newTaskID(taskId)
return k.KubernetesScheduler.driver.KillTask(killTaskId)
}
func (k *k8smScheduler) launchTask(task *PodTask) error {
// assume caller is holding scheduler lock
taskList := []*mesos.TaskInfo{task.TaskInfo}
return k.KubernetesScheduler.driver.LaunchTasks(task.Offer.Details().Id, taskList, nil)
}
type binder struct {
api SchedulerInterface
client *client.Client
}
// implements binding.Registry, launches the pod-associated-task in mesos
func (b *binder) Bind(binding *api.Binding) error {
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, binding.PodID)
if err != nil {
return err
}
b.api.Lock()
defer b.api.Unlock()
taskId, exists := b.api.taskForPod(podKey)
if !exists {
log.Infof("Could not resolve pod %s to task id", podKey)
return noSuchPodErr
}
switch task, state := b.api.getTask(taskId); state {
case statePending:
return b.bind(ctx, binding, task)
default:
// in this case it's likely that the pod has been deleted between Schedule
// and Bind calls
log.Infof("No pending task for pod %s", podKey)
return noSuchPodErr
}
}
// assumes that: caller has acquired scheduler lock and that the PodTask is still pending
func (b *binder) bind(ctx api.Context, binding *api.Binding, task *PodTask) (err error) {
// sanity check: ensure that the task hasAcceptedOffer(), it's possible that between
// Schedule() and now that the offer for this task was rescinded or invalidated.
// ((we should never see this here))
if !task.hasAcceptedOffer() {
return fmt.Errorf("task has not accepted a valid offer %v", task.ID)
}
| offerId := task.GetOfferId()
if offer, ok := b.api.offers().Get(offerId); !ok || offer.HasExpired() {
// already rescinded or timed out or otherwise invalidated
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("failed prior to launchTask due to expired offer for task %v", task.ID)
}
if err = b.prepareTaskForLaunch(ctx, binding.Host, task); err == nil {
log.V(2).Infof("Attempting to bind %v to %v", binding.PodID, binding.Host)
if err = b.client.Post().Namespace(api.Namespace(ctx)).Resource("bindings").Body(binding).Do().Error(); err == nil {
log.V(2).Infof("launching task : %v", task)
if err = b.api.launchTask(task); err == nil {
b.api.offers().Invalidate(offerId)
task.Pod.Status.Host = binding.Host
task.launched = true
return
}
}
}
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("Failed to launch task %v: %v", task.ID, err)
}
func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *PodTask) error {
pod, err := b.client.Pods(api.Namespace(ctx)).Get(task.Pod.Name)
if err != nil {
return err
}
//HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
envVars, err := b.getServiceEnvironmentVariables(ctx)
if err != nil {
return err
}
boundPod := &api.BoundPod{}
if err := api.Scheme.Convert(pod, boundPod); err != nil {
return err
}
for ix, container := range boundPod.Spec.Containers {
boundPod.Spec.Containers[ix].Env = append(container.Env, envVars...)
}
// Make a dummy self link so that references to this bound pod will work.
boundPod.SelfLink = "/api/v1beta1/boundPods/" + boundPod.Name
// update the boundPod here to pick up things like environment variables that
// pod containers will use for service discovery. the kubelet-executor uses this
// boundPod to instantiate the pods and this is the last update we make before
// firing up the pod.
task.TaskInfo.Data, err = yaml.Marshal(&boundPod)
if err != nil {
log.V(2).Infof("Failed to marshal the updated boundPod")
return err
}
return nil
}
// getServiceEnvironmentVariables populates a list of environment variables that are use
// in the container environment to get access to services.
// HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
func (b *binder) getServiceEnvironmentVariables(ctx api.Context) (result []api.EnvVar, err error) {
var services *api.ServiceList
if services, err = b.client.Services(api.Namespace(ctx)).List(labels.Everything()); err == nil {
result = envvars.FromServices(services)
}
return
}
type kubeScheduler struct {
api SchedulerInterface
podStore queue.FIFO
}
// Schedule implements the Scheduler interface of the Kubernetes.
// It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod api.Pod, unused algorithm.MinionLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return "", err
}
k.api.Lock()
defer k.api.Unlock()
if taskID, ok := k.api.taskForPod(podKey); !ok {
// There's a bit of a potential race here, a pod could have been yielded() but
// and then before we get *here* it could be deleted. We use meta to index the pod
// in the store since that's what k8s client/cache/reflector does.
meta, err := meta.Accessor(&pod)
if err != nil {
log.Warningf("aborting Schedule, unable to understand pod object %+v", &pod)
return "", noSuchPodErr
}
if deleted := k.podStore.Poll(meta.Name(), queue.DELETE_EVENT); deleted {
// avoid scheduling a pod that's been deleted between yieldPod() and Schedule()
log.Infof("aborting Schedule, pod has been deleted %+v", &pod)
return "", noSuchPodErr
}
return k.doSchedule(k.api.registerPodTask(k.api.createPodTask(ctx, &pod)))
} else {
switch task, state := k.api.getTask(taskID); state {
case statePending:
if task.launched {
return "", fmt.Errorf("task %s has already been launched, aborting schedule", taskID)
} else {
return k.doSchedule(task, nil)
}
default:
return "", fmt.Errorf("task %s is not pending, nothing to schedule", taskID)
}
}
}
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *kubeScheduler) doSchedule(task *PodTask, err error) (string, error) {
var offer PerishableOffer
if err == nil {
offer, err = k.api.algorithm()(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
slaveId := offer.Details().GetSlaveId().GetValue()
if slave, ok := k.api.slaveFor(slaveId); !ok {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(offer.Details().Id.GetValue())
task.ClearTaskInfo()
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
} else {
task.FillTaskInfo(offer)
return slave.HostName, nil
}
}
type queuer struct {
lock sync.Mutex // shared by condition variables of this struct
podStore queue.FIFO // cache of pod updates to be processed
podQueue *queue.DelayFIFO // queue of pods to be scheduled
deltaCond sync.Cond // pod changes are available for processing
unscheduledCond sync.Cond // there are unscheduled pods for processing
}
func newQueuer(store queue.FIFO) *queuer {
q := &queuer{
podQueue: queue.NewDelayFIFO(),
podStore: store,
}
q.deltaCond.L = &q.lock
q.unscheduledCond.L = &q.lock
return q
}
// signal that there are probably pod updates waiting to be processed
func (q *queuer) updatesAvailable() {
q.deltaCond.Broadcast()
}
// delete a pod from the to-be-scheduled queue
func (q *queuer) dequeue(id string) {
q.podQueue.Delete(id)
}
// re-add a pod to the to-be-scheduled queue, will not overwrite existing pod data (that
// may have already changed).
func (q *queuer) requeue(pod *Pod) {
// use KeepExisting in case the pod has already been updated (can happen if binding fails
// due to constraint voilations); we don't want to overwrite a newer entry with stale data.
q.podQueue.Add(pod, queue.KeepExisting)
q.unscheduledCond.Broadcast()
}
// spawns a go-routine to watch for unscheduled pods and queue them up
// for scheduling. returns immediately.
func (q *queuer) Run() {
go util.Forever(func() {
log.Info("Watching for newly created pods")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here for short intervals so that scheduling
// may proceed even if there have been no recent pod changes
p := q.podStore.Await(enqueuePopTimeout)
if p == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.deltaCond.Wait()
}()
// we've yielded the lock
select {
case <-time.After(enqueueWaitTimeout):
q.deltaCond.Broadcast() // abort Wait()
<-signalled // wait for lock re-acquisition
log.V(4).Infoln("timed out waiting for a pod update")
case <-signalled:
// we've acquired the lock and there may be
// changes for us to process now
}
continue
}
pod := p.(*Pod)
if pod.Status.Host != "" {
q.dequeue(pod.GetUID())
} else {
// use ReplaceExisting because we are always pushing the latest state
now := time.Now()
pod.deadline = &now
q.podQueue.Offer(pod, queue.ReplaceExisting)
q.unscheduledCond.Broadcast()
log.V(3).Infof("queued pod for scheduling: %v", pod.Pod.Name)
}
}
}, 1*time.Second)
}
// implementation of scheduling plugin's NextPod func; see k8s plugin/pkg/scheduler
func (q *queuer) yield() *api.Pod {
log.V(2).Info("attempting to yield a pod")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here to short intervals so that we don't block the
// enqueuer Run() routine for very long
kpod := q.podQueue.Await(yieldPopTimeout)
if kpod == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.unscheduledCond.Wait()
}()
// lock is yielded at this point and we're going to wait for either
// a timeout, or a signal that there's data
select {
case <-time.After(yieldWaitTimeout):
q.unscheduledCond.Broadcast() // abort Wait()
<-signalled // wait for the go-routine, and the lock
log.V(4).Infoln("timed out waiting for a pod to yield")
case <-signalled:
// we have acquired the lock, and there
// may be a pod for us to pop now
}
continue
}
pod := kpod.(*Pod).Pod
if meta, err := meta.Accessor(pod); err != nil {
log.Warningf("yield unable to understand pod object %+v, will skip", pod)
} else if !q.podStore.Poll(meta.Name(), queue.POP_EVENT) {
log.V(1).Infof("yield popped a transitioning pod, skipping: %+v", pod)
} else if pod.Status.Host != "" {
// should never happen if enqueuePods is filtering properly
log.Warningf("yield popped an already-scheduled pod, skipping: %+v", pod)
} else {
return pod
}
}
}
type errorHandler struct {
api SchedulerInterface
backoff *podBackoff
qr *queuer
}
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.gc()
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
taskId, exists := k.api.taskForPod(podKey)
if !exists {
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
}
switch task, state := k.api.getTask(taskId); state {
case statePending:
if task.launched {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
offersAvailable := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
offersAvailable = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
switch task, state := k.api.getTask(taskId); state {
case statePending:
return !task.launched && task.AcceptOffer(offer)
}
return false
}))
}
delay := k.backoff.getBackoff(podKey)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: offersAvailable})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
type deleter struct {
api SchedulerInterface
qr *queuer
}
// currently monitors for "pod deleted" events, upon which handle()
// is invoked.
func (k *deleter) Run(updates <-chan queue.Entry) {
go util.Forever(func() {
for {
entry := <-updates
pod := entry.Value().(*Pod)
if entry.Is(queue.DELETE_EVENT) {
if err := k.deleteOne(pod); err != nil {
log.Error(err)
}
} else if !entry.Is(queue.POP_EVENT) {
k.qr.updatesAvailable()
}
}
}, 1*time.Second)
}
func (k *deleter) deleteOne(pod *Pod) error {
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return err
}
log.V(2).Infof("pod deleted: %v", podKey)
// order is important here: we want to make sure we have the lock before
// removing the pod from the scheduling queue. this makes the concurrent
// execution of scheduler-error-handling and delete-handling easier to
// reason about.
k.api.Lock()
defer k.api.Unlock()
// prevent the scheduler from attempting to pop this; it's also possible that
// it's concurrently being scheduled (somewhere between pod scheduling and
// binding) - if so, then we'll end up removing it from pendingTasks which
// will abort Bind()ing
k.qr.dequeue(pod.GetUID())
taskId, exists := k.api.taskForPod(podKey)
if !exists {
log.V(2).Infof("Could not resolve pod '%s' to task id", podKey)
return noSuchPodErr
}
// determine if the task has already been launched to mesos, if not then
// cleanup is easier (unregister) since there's no state to sync
switch task, state := k.api.getTask(taskId); state {
case statePending:
if !task.launched {
// we've been invoked in between Schedule() and Bind()
if task.hasAcceptedOffer() {
task.Offer.Release()
task.ClearTaskInfo()
}
k.api.unregisterPodTask(task)
return nil
}
fallthrough
case stateRunning:
// signal to watchers that the related pod is going down
task.deleted = true
task.Pod.Status.Host = ""
return k.api.killTask(taskId)
default:
log.Warningf("cannot kill pod '%s': task not found %v", podKey, taskId)
return noSuchTaskErr
}
}
// Create creates a scheduler plugin and all supporting background functions.
func (k *KubernetesScheduler) NewPluginConfig() *plugin.Config {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, defaultUpdatesBacklog)
podStore := &podStoreAdapter{queue.NewHistorical(updates)}
cache.NewReflector(createAllPodsLW(k.client), &api.Pod{}, podStore).Run()
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{k}
q := newQueuer(podStore)
podDeleter := &deleter{
api: kapi,
qr: q,
}
podDeleter.Run(updates)
q.Run()
eh := &errorHandler{
api: kapi,
backoff: &podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: realClock{},
},
qr: q,
}
return &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podStore: podStore,
},
Binder: &binder{
api: kapi,
client: k.client,
},
NextPod: q.yield,
Error: eh.handleSchedulingError,
}
}
type listWatch struct {
client *client.Client
fieldSelector labels.Selector
resource string
}
func (lw *listWatch) List() (runtime.Object, error) {
return lw.client.
Get().
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Do().
Get()
}
func (lw *listWatch) Watch(resourceVersion string) (watch.Interface, error) {
return lw.client.
Get().
Prefix("watch").
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Param("resourceVersion", resourceVersion).
Watch()
}
// createAllPodsLW returns a listWatch that finds all pods
func createAllPodsLW(cl *client.Client) *listWatch {
return &listWatch{
client: cl,
fieldSelector: labels.Everything(),
resource: "pods",
}
}
// Consumes *api.Pod, produces *Pod; the k8s reflector wants to push *api.Pod
// objects at us, but we want to store more flexible (Pod) type defined in
// this package. The adapter implementation facilitates this. It's a little
// hackish since the object type going in is different than the object type
// coming out -- you've been warned.
type podStoreAdapter struct {
queue.FIFO
}
func (psa *podStoreAdapter) Add(id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Add(id, &Pod{Pod: pod})
}
func (psa *podStoreAdapter) Update(id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Update(id, &Pod{Pod: pod})
}
// Replace will delete the contents of the store, using instead the
// given map. This store implementation does NOT take ownership of the map.
func (psa *podStoreAdapter) Replace(idToObj map[string]interface{}) {
newmap := map[string]interface{}{}
for k, v := range idToObj {
pod := v.(*api.Pod)
newmap[k] = &Pod{Pod: pod}
}
psa.FIFO.Replace(newmap)
} | // By this time, there is a chance that the slave is disconnected. | random_line_split |
plugin.go | package scheduler
import (
"fmt"
"sync"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/envvars"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
plugin "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/mesos"
"github.com/mesosphere/kubernetes-mesos/pkg/queue"
"gopkg.in/v2/yaml"
)
const (
enqueuePopTimeout = 200 * time.Millisecond
enqueueWaitTimeout = 3 * time.Second
yieldPopTimeout = 200 * time.Millisecond
yieldWaitTimeout = 3 * time.Second
)
// scheduler abstraction to allow for easier unit testing
type SchedulerInterface interface {
sync.Locker
RLocker() sync.Locker
SlaveIndex
algorithm() PodScheduleFunc
createPodTask(api.Context, *api.Pod) (*PodTask, error)
getTask(taskId string) (task *PodTask, currentState stateType)
offers() OfferRegistry
registerPodTask(*PodTask, error) (*PodTask, error)
taskForPod(podID string) (taskID string, ok bool)
unregisterPodTask(*PodTask)
killTask(taskId string) error
launchTask(*PodTask) error
}
type k8smScheduler struct {
*KubernetesScheduler
}
func (k *k8smScheduler) algorithm() PodScheduleFunc {
return k.KubernetesScheduler.scheduleFunc
}
func (k *k8smScheduler) offers() OfferRegistry {
return k.KubernetesScheduler.offers
}
func (k *k8smScheduler) taskForPod(podID string) (taskID string, ok bool) {
// assume caller is holding scheduler lock
taskID, ok = k.podToTask[podID]
return
}
func (k *k8smScheduler) createPodTask(ctx api.Context, pod *api.Pod) (*PodTask, error) {
return newPodTask(ctx, pod, k.executor)
}
func (k *k8smScheduler) registerPodTask(task *PodTask, err error) (*PodTask, error) {
if err == nil {
// assume caller is holding scheduler lock
k.podToTask[task.podKey] = task.ID
k.pendingTasks[task.ID] = task
}
return task, err
}
func (k *k8smScheduler) slaveFor(id string) (slave *Slave, ok bool) {
slave, ok = k.slaves[id]
return
}
func (k *k8smScheduler) unregisterPodTask(task *PodTask) {
// assume caller is holding scheduler lock
delete(k.podToTask, task.podKey)
delete(k.pendingTasks, task.ID)
}
func (k *k8smScheduler) killTask(taskId string) error {
// assume caller is holding scheduler lock
killTaskId := newTaskID(taskId)
return k.KubernetesScheduler.driver.KillTask(killTaskId)
}
func (k *k8smScheduler) launchTask(task *PodTask) error {
// assume caller is holding scheduler lock
taskList := []*mesos.TaskInfo{task.TaskInfo}
return k.KubernetesScheduler.driver.LaunchTasks(task.Offer.Details().Id, taskList, nil)
}
type binder struct {
api SchedulerInterface
client *client.Client
}
// implements binding.Registry, launches the pod-associated-task in mesos
func (b *binder) Bind(binding *api.Binding) error {
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, binding.PodID)
if err != nil {
return err
}
b.api.Lock()
defer b.api.Unlock()
taskId, exists := b.api.taskForPod(podKey)
if !exists {
log.Infof("Could not resolve pod %s to task id", podKey)
return noSuchPodErr
}
switch task, state := b.api.getTask(taskId); state {
case statePending:
return b.bind(ctx, binding, task)
default:
// in this case it's likely that the pod has been deleted between Schedule
// and Bind calls
log.Infof("No pending task for pod %s", podKey)
return noSuchPodErr
}
}
// assumes that: caller has acquired scheduler lock and that the PodTask is still pending
func (b *binder) bind(ctx api.Context, binding *api.Binding, task *PodTask) (err error) {
// sanity check: ensure that the task hasAcceptedOffer(), it's possible that between
// Schedule() and now that the offer for this task was rescinded or invalidated.
// ((we should never see this here))
if !task.hasAcceptedOffer() {
return fmt.Errorf("task has not accepted a valid offer %v", task.ID)
}
// By this time, there is a chance that the slave is disconnected.
offerId := task.GetOfferId()
if offer, ok := b.api.offers().Get(offerId); !ok || offer.HasExpired() {
// already rescinded or timed out or otherwise invalidated
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("failed prior to launchTask due to expired offer for task %v", task.ID)
}
if err = b.prepareTaskForLaunch(ctx, binding.Host, task); err == nil {
log.V(2).Infof("Attempting to bind %v to %v", binding.PodID, binding.Host)
if err = b.client.Post().Namespace(api.Namespace(ctx)).Resource("bindings").Body(binding).Do().Error(); err == nil {
log.V(2).Infof("launching task : %v", task)
if err = b.api.launchTask(task); err == nil {
b.api.offers().Invalidate(offerId)
task.Pod.Status.Host = binding.Host
task.launched = true
return
}
}
}
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("Failed to launch task %v: %v", task.ID, err)
}
func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *PodTask) error {
pod, err := b.client.Pods(api.Namespace(ctx)).Get(task.Pod.Name)
if err != nil {
return err
}
//HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
envVars, err := b.getServiceEnvironmentVariables(ctx)
if err != nil {
return err
}
boundPod := &api.BoundPod{}
if err := api.Scheme.Convert(pod, boundPod); err != nil {
return err
}
for ix, container := range boundPod.Spec.Containers {
boundPod.Spec.Containers[ix].Env = append(container.Env, envVars...)
}
// Make a dummy self link so that references to this bound pod will work.
boundPod.SelfLink = "/api/v1beta1/boundPods/" + boundPod.Name
// update the boundPod here to pick up things like environment variables that
// pod containers will use for service discovery. the kubelet-executor uses this
// boundPod to instantiate the pods and this is the last update we make before
// firing up the pod.
task.TaskInfo.Data, err = yaml.Marshal(&boundPod)
if err != nil {
log.V(2).Infof("Failed to marshal the updated boundPod")
return err
}
return nil
}
// getServiceEnvironmentVariables populates a list of environment variables that are use
// in the container environment to get access to services.
// HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
func (b *binder) getServiceEnvironmentVariables(ctx api.Context) (result []api.EnvVar, err error) {
var services *api.ServiceList
if services, err = b.client.Services(api.Namespace(ctx)).List(labels.Everything()); err == nil {
result = envvars.FromServices(services)
}
return
}
type kubeScheduler struct {
api SchedulerInterface
podStore queue.FIFO
}
// Schedule implements the Scheduler interface of the Kubernetes.
// It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod api.Pod, unused algorithm.MinionLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return "", err
}
k.api.Lock()
defer k.api.Unlock()
if taskID, ok := k.api.taskForPod(podKey); !ok {
// There's a bit of a potential race here, a pod could have been yielded() but
// and then before we get *here* it could be deleted. We use meta to index the pod
// in the store since that's what k8s client/cache/reflector does.
meta, err := meta.Accessor(&pod)
if err != nil {
log.Warningf("aborting Schedule, unable to understand pod object %+v", &pod)
return "", noSuchPodErr
}
if deleted := k.podStore.Poll(meta.Name(), queue.DELETE_EVENT); deleted {
// avoid scheduling a pod that's been deleted between yieldPod() and Schedule()
log.Infof("aborting Schedule, pod has been deleted %+v", &pod)
return "", noSuchPodErr
}
return k.doSchedule(k.api.registerPodTask(k.api.createPodTask(ctx, &pod)))
} else {
switch task, state := k.api.getTask(taskID); state {
case statePending:
if task.launched {
return "", fmt.Errorf("task %s has already been launched, aborting schedule", taskID)
} else {
return k.doSchedule(task, nil)
}
default:
return "", fmt.Errorf("task %s is not pending, nothing to schedule", taskID)
}
}
}
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *kubeScheduler) doSchedule(task *PodTask, err error) (string, error) {
var offer PerishableOffer
if err == nil {
offer, err = k.api.algorithm()(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
slaveId := offer.Details().GetSlaveId().GetValue()
if slave, ok := k.api.slaveFor(slaveId); !ok {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(offer.Details().Id.GetValue())
task.ClearTaskInfo()
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
} else {
task.FillTaskInfo(offer)
return slave.HostName, nil
}
}
type queuer struct {
lock sync.Mutex // shared by condition variables of this struct
podStore queue.FIFO // cache of pod updates to be processed
podQueue *queue.DelayFIFO // queue of pods to be scheduled
deltaCond sync.Cond // pod changes are available for processing
unscheduledCond sync.Cond // there are unscheduled pods for processing
}
func newQueuer(store queue.FIFO) *queuer {
q := &queuer{
podQueue: queue.NewDelayFIFO(),
podStore: store,
}
q.deltaCond.L = &q.lock
q.unscheduledCond.L = &q.lock
return q
}
// signal that there are probably pod updates waiting to be processed
func (q *queuer) updatesAvailable() {
q.deltaCond.Broadcast()
}
// delete a pod from the to-be-scheduled queue
func (q *queuer) dequeue(id string) {
q.podQueue.Delete(id)
}
// re-add a pod to the to-be-scheduled queue, will not overwrite existing pod data (that
// may have already changed).
func (q *queuer) requeue(pod *Pod) {
// use KeepExisting in case the pod has already been updated (can happen if binding fails
// due to constraint voilations); we don't want to overwrite a newer entry with stale data.
q.podQueue.Add(pod, queue.KeepExisting)
q.unscheduledCond.Broadcast()
}
// spawns a go-routine to watch for unscheduled pods and queue them up
// for scheduling. returns immediately.
func (q *queuer) Run() {
go util.Forever(func() {
log.Info("Watching for newly created pods")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here for short intervals so that scheduling
// may proceed even if there have been no recent pod changes
p := q.podStore.Await(enqueuePopTimeout)
if p == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.deltaCond.Wait()
}()
// we've yielded the lock
select {
case <-time.After(enqueueWaitTimeout):
q.deltaCond.Broadcast() // abort Wait()
<-signalled // wait for lock re-acquisition
log.V(4).Infoln("timed out waiting for a pod update")
case <-signalled:
// we've acquired the lock and there may be
// changes for us to process now
}
continue
}
pod := p.(*Pod)
if pod.Status.Host != "" {
q.dequeue(pod.GetUID())
} else {
// use ReplaceExisting because we are always pushing the latest state
now := time.Now()
pod.deadline = &now
q.podQueue.Offer(pod, queue.ReplaceExisting)
q.unscheduledCond.Broadcast()
log.V(3).Infof("queued pod for scheduling: %v", pod.Pod.Name)
}
}
}, 1*time.Second)
}
// implementation of scheduling plugin's NextPod func; see k8s plugin/pkg/scheduler
func (q *queuer) yield() *api.Pod {
log.V(2).Info("attempting to yield a pod")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here to short intervals so that we don't block the
// enqueuer Run() routine for very long
kpod := q.podQueue.Await(yieldPopTimeout)
if kpod == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.unscheduledCond.Wait()
}()
// lock is yielded at this point and we're going to wait for either
// a timeout, or a signal that there's data
select {
case <-time.After(yieldWaitTimeout):
q.unscheduledCond.Broadcast() // abort Wait()
<-signalled // wait for the go-routine, and the lock
log.V(4).Infoln("timed out waiting for a pod to yield")
case <-signalled:
// we have acquired the lock, and there
// may be a pod for us to pop now
}
continue
}
pod := kpod.(*Pod).Pod
if meta, err := meta.Accessor(pod); err != nil {
log.Warningf("yield unable to understand pod object %+v, will skip", pod)
} else if !q.podStore.Poll(meta.Name(), queue.POP_EVENT) {
log.V(1).Infof("yield popped a transitioning pod, skipping: %+v", pod)
} else if pod.Status.Host != "" {
// should never happen if enqueuePods is filtering properly
log.Warningf("yield popped an already-scheduled pod, skipping: %+v", pod)
} else {
return pod
}
}
}
type errorHandler struct {
api SchedulerInterface
backoff *podBackoff
qr *queuer
}
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.gc()
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
taskId, exists := k.api.taskForPod(podKey)
if !exists {
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
}
switch task, state := k.api.getTask(taskId); state {
case statePending:
if task.launched {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
offersAvailable := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
offersAvailable = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
switch task, state := k.api.getTask(taskId); state {
case statePending:
return !task.launched && task.AcceptOffer(offer)
}
return false
}))
}
delay := k.backoff.getBackoff(podKey)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: offersAvailable})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
type deleter struct {
api SchedulerInterface
qr *queuer
}
// currently monitors for "pod deleted" events, upon which handle()
// is invoked.
func (k *deleter) Run(updates <-chan queue.Entry) {
go util.Forever(func() {
for {
entry := <-updates
pod := entry.Value().(*Pod)
if entry.Is(queue.DELETE_EVENT) {
if err := k.deleteOne(pod); err != nil {
log.Error(err)
}
} else if !entry.Is(queue.POP_EVENT) {
k.qr.updatesAvailable()
}
}
}, 1*time.Second)
}
func (k *deleter) deleteOne(pod *Pod) error {
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return err
}
log.V(2).Infof("pod deleted: %v", podKey)
// order is important here: we want to make sure we have the lock before
// removing the pod from the scheduling queue. this makes the concurrent
// execution of scheduler-error-handling and delete-handling easier to
// reason about.
k.api.Lock()
defer k.api.Unlock()
// prevent the scheduler from attempting to pop this; it's also possible that
// it's concurrently being scheduled (somewhere between pod scheduling and
// binding) - if so, then we'll end up removing it from pendingTasks which
// will abort Bind()ing
k.qr.dequeue(pod.GetUID())
taskId, exists := k.api.taskForPod(podKey)
if !exists {
log.V(2).Infof("Could not resolve pod '%s' to task id", podKey)
return noSuchPodErr
}
// determine if the task has already been launched to mesos, if not then
// cleanup is easier (unregister) since there's no state to sync
switch task, state := k.api.getTask(taskId); state {
case statePending:
if !task.launched {
// we've been invoked in between Schedule() and Bind()
if task.hasAcceptedOffer() {
task.Offer.Release()
task.ClearTaskInfo()
}
k.api.unregisterPodTask(task)
return nil
}
fallthrough
case stateRunning:
// signal to watchers that the related pod is going down
task.deleted = true
task.Pod.Status.Host = ""
return k.api.killTask(taskId)
default:
log.Warningf("cannot kill pod '%s': task not found %v", podKey, taskId)
return noSuchTaskErr
}
}
// Create creates a scheduler plugin and all supporting background functions.
func (k *KubernetesScheduler) NewPluginConfig() *plugin.Config {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, defaultUpdatesBacklog)
podStore := &podStoreAdapter{queue.NewHistorical(updates)}
cache.NewReflector(createAllPodsLW(k.client), &api.Pod{}, podStore).Run()
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{k}
q := newQueuer(podStore)
podDeleter := &deleter{
api: kapi,
qr: q,
}
podDeleter.Run(updates)
q.Run()
eh := &errorHandler{
api: kapi,
backoff: &podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: realClock{},
},
qr: q,
}
return &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podStore: podStore,
},
Binder: &binder{
api: kapi,
client: k.client,
},
NextPod: q.yield,
Error: eh.handleSchedulingError,
}
}
type listWatch struct {
client *client.Client
fieldSelector labels.Selector
resource string
}
func (lw *listWatch) List() (runtime.Object, error) {
return lw.client.
Get().
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Do().
Get()
}
func (lw *listWatch) Watch(resourceVersion string) (watch.Interface, error) {
return lw.client.
Get().
Prefix("watch").
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Param("resourceVersion", resourceVersion).
Watch()
}
// createAllPodsLW returns a listWatch that finds all pods
func createAllPodsLW(cl *client.Client) *listWatch {
return &listWatch{
client: cl,
fieldSelector: labels.Everything(),
resource: "pods",
}
}
// Consumes *api.Pod, produces *Pod; the k8s reflector wants to push *api.Pod
// objects at us, but we want to store more flexible (Pod) type defined in
// this package. The adapter implementation facilitates this. It's a little
// hackish since the object type going in is different than the object type
// coming out -- you've been warned.
type podStoreAdapter struct {
queue.FIFO
}
func (psa *podStoreAdapter) Add(id string, obj interface{}) |
func (psa *podStoreAdapter) Update(id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Update(id, &Pod{Pod: pod})
}
// Replace will delete the contents of the store, using instead the
// given map. This store implementation does NOT take ownership of the map.
func (psa *podStoreAdapter) Replace(idToObj map[string]interface{}) {
newmap := map[string]interface{}{}
for k, v := range idToObj {
pod := v.(*api.Pod)
newmap[k] = &Pod{Pod: pod}
}
psa.FIFO.Replace(newmap)
}
| {
pod := obj.(*api.Pod)
psa.FIFO.Add(id, &Pod{Pod: pod})
} | identifier_body |
plugin.go | package scheduler
import (
"fmt"
"sync"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/envvars"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
plugin "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
log "github.com/golang/glog"
"github.com/mesos/mesos-go/mesos"
"github.com/mesosphere/kubernetes-mesos/pkg/queue"
"gopkg.in/v2/yaml"
)
const (
enqueuePopTimeout = 200 * time.Millisecond
enqueueWaitTimeout = 3 * time.Second
yieldPopTimeout = 200 * time.Millisecond
yieldWaitTimeout = 3 * time.Second
)
// scheduler abstraction to allow for easier unit testing
type SchedulerInterface interface {
sync.Locker
RLocker() sync.Locker
SlaveIndex
algorithm() PodScheduleFunc
createPodTask(api.Context, *api.Pod) (*PodTask, error)
getTask(taskId string) (task *PodTask, currentState stateType)
offers() OfferRegistry
registerPodTask(*PodTask, error) (*PodTask, error)
taskForPod(podID string) (taskID string, ok bool)
unregisterPodTask(*PodTask)
killTask(taskId string) error
launchTask(*PodTask) error
}
type k8smScheduler struct {
*KubernetesScheduler
}
func (k *k8smScheduler) algorithm() PodScheduleFunc {
return k.KubernetesScheduler.scheduleFunc
}
func (k *k8smScheduler) offers() OfferRegistry {
return k.KubernetesScheduler.offers
}
func (k *k8smScheduler) taskForPod(podID string) (taskID string, ok bool) {
// assume caller is holding scheduler lock
taskID, ok = k.podToTask[podID]
return
}
func (k *k8smScheduler) createPodTask(ctx api.Context, pod *api.Pod) (*PodTask, error) {
return newPodTask(ctx, pod, k.executor)
}
func (k *k8smScheduler) registerPodTask(task *PodTask, err error) (*PodTask, error) {
if err == nil {
// assume caller is holding scheduler lock
k.podToTask[task.podKey] = task.ID
k.pendingTasks[task.ID] = task
}
return task, err
}
func (k *k8smScheduler) slaveFor(id string) (slave *Slave, ok bool) {
slave, ok = k.slaves[id]
return
}
func (k *k8smScheduler) unregisterPodTask(task *PodTask) {
// assume caller is holding scheduler lock
delete(k.podToTask, task.podKey)
delete(k.pendingTasks, task.ID)
}
func (k *k8smScheduler) killTask(taskId string) error {
// assume caller is holding scheduler lock
killTaskId := newTaskID(taskId)
return k.KubernetesScheduler.driver.KillTask(killTaskId)
}
func (k *k8smScheduler) launchTask(task *PodTask) error {
// assume caller is holding scheduler lock
taskList := []*mesos.TaskInfo{task.TaskInfo}
return k.KubernetesScheduler.driver.LaunchTasks(task.Offer.Details().Id, taskList, nil)
}
type binder struct {
api SchedulerInterface
client *client.Client
}
// implements binding.Registry, launches the pod-associated-task in mesos
func (b *binder) Bind(binding *api.Binding) error {
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, binding.PodID)
if err != nil {
return err
}
b.api.Lock()
defer b.api.Unlock()
taskId, exists := b.api.taskForPod(podKey)
if !exists {
log.Infof("Could not resolve pod %s to task id", podKey)
return noSuchPodErr
}
switch task, state := b.api.getTask(taskId); state {
case statePending:
return b.bind(ctx, binding, task)
default:
// in this case it's likely that the pod has been deleted between Schedule
// and Bind calls
log.Infof("No pending task for pod %s", podKey)
return noSuchPodErr
}
}
// assumes that: caller has acquired scheduler lock and that the PodTask is still pending
func (b *binder) bind(ctx api.Context, binding *api.Binding, task *PodTask) (err error) {
// sanity check: ensure that the task hasAcceptedOffer(), it's possible that between
// Schedule() and now that the offer for this task was rescinded or invalidated.
// ((we should never see this here))
if !task.hasAcceptedOffer() {
return fmt.Errorf("task has not accepted a valid offer %v", task.ID)
}
// By this time, there is a chance that the slave is disconnected.
offerId := task.GetOfferId()
if offer, ok := b.api.offers().Get(offerId); !ok || offer.HasExpired() {
// already rescinded or timed out or otherwise invalidated
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("failed prior to launchTask due to expired offer for task %v", task.ID)
}
if err = b.prepareTaskForLaunch(ctx, binding.Host, task); err == nil {
log.V(2).Infof("Attempting to bind %v to %v", binding.PodID, binding.Host)
if err = b.client.Post().Namespace(api.Namespace(ctx)).Resource("bindings").Body(binding).Do().Error(); err == nil {
log.V(2).Infof("launching task : %v", task)
if err = b.api.launchTask(task); err == nil {
b.api.offers().Invalidate(offerId)
task.Pod.Status.Host = binding.Host
task.launched = true
return
}
}
}
task.Offer.Release()
task.ClearTaskInfo()
return fmt.Errorf("Failed to launch task %v: %v", task.ID, err)
}
func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *PodTask) error {
pod, err := b.client.Pods(api.Namespace(ctx)).Get(task.Pod.Name)
if err != nil {
return err
}
//HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
envVars, err := b.getServiceEnvironmentVariables(ctx)
if err != nil {
return err
}
boundPod := &api.BoundPod{}
if err := api.Scheme.Convert(pod, boundPod); err != nil {
return err
}
for ix, container := range boundPod.Spec.Containers {
boundPod.Spec.Containers[ix].Env = append(container.Env, envVars...)
}
// Make a dummy self link so that references to this bound pod will work.
boundPod.SelfLink = "/api/v1beta1/boundPods/" + boundPod.Name
// update the boundPod here to pick up things like environment variables that
// pod containers will use for service discovery. the kubelet-executor uses this
// boundPod to instantiate the pods and this is the last update we make before
// firing up the pod.
task.TaskInfo.Data, err = yaml.Marshal(&boundPod)
if err != nil {
log.V(2).Infof("Failed to marshal the updated boundPod")
return err
}
return nil
}
// getServiceEnvironmentVariables populates a list of environment variables that are use
// in the container environment to get access to services.
// HACK(jdef): adapted from https://github.com/GoogleCloudPlatform/kubernetes/blob/release-0.6/pkg/registry/pod/bound_pod_factory.go
func (b *binder) getServiceEnvironmentVariables(ctx api.Context) (result []api.EnvVar, err error) {
var services *api.ServiceList
if services, err = b.client.Services(api.Namespace(ctx)).List(labels.Everything()); err == nil {
result = envvars.FromServices(services)
}
return
}
type kubeScheduler struct {
api SchedulerInterface
podStore queue.FIFO
}
// Schedule implements the Scheduler interface of the Kubernetes.
// It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod api.Pod, unused algorithm.MinionLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return "", err
}
k.api.Lock()
defer k.api.Unlock()
if taskID, ok := k.api.taskForPod(podKey); !ok {
// There's a bit of a potential race here, a pod could have been yielded() but
// and then before we get *here* it could be deleted. We use meta to index the pod
// in the store since that's what k8s client/cache/reflector does.
meta, err := meta.Accessor(&pod)
if err != nil {
log.Warningf("aborting Schedule, unable to understand pod object %+v", &pod)
return "", noSuchPodErr
}
if deleted := k.podStore.Poll(meta.Name(), queue.DELETE_EVENT); deleted {
// avoid scheduling a pod that's been deleted between yieldPod() and Schedule()
log.Infof("aborting Schedule, pod has been deleted %+v", &pod)
return "", noSuchPodErr
}
return k.doSchedule(k.api.registerPodTask(k.api.createPodTask(ctx, &pod)))
} else {
switch task, state := k.api.getTask(taskID); state {
case statePending:
if task.launched {
return "", fmt.Errorf("task %s has already been launched, aborting schedule", taskID)
} else {
return k.doSchedule(task, nil)
}
default:
return "", fmt.Errorf("task %s is not pending, nothing to schedule", taskID)
}
}
}
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *kubeScheduler) doSchedule(task *PodTask, err error) (string, error) {
var offer PerishableOffer
if err == nil {
offer, err = k.api.algorithm()(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
slaveId := offer.Details().GetSlaveId().GetValue()
if slave, ok := k.api.slaveFor(slaveId); !ok {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(offer.Details().Id.GetValue())
task.ClearTaskInfo()
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
} else {
task.FillTaskInfo(offer)
return slave.HostName, nil
}
}
type queuer struct {
lock sync.Mutex // shared by condition variables of this struct
podStore queue.FIFO // cache of pod updates to be processed
podQueue *queue.DelayFIFO // queue of pods to be scheduled
deltaCond sync.Cond // pod changes are available for processing
unscheduledCond sync.Cond // there are unscheduled pods for processing
}
func newQueuer(store queue.FIFO) *queuer {
q := &queuer{
podQueue: queue.NewDelayFIFO(),
podStore: store,
}
q.deltaCond.L = &q.lock
q.unscheduledCond.L = &q.lock
return q
}
// signal that there are probably pod updates waiting to be processed
func (q *queuer) updatesAvailable() {
q.deltaCond.Broadcast()
}
// delete a pod from the to-be-scheduled queue
func (q *queuer) dequeue(id string) {
q.podQueue.Delete(id)
}
// re-add a pod to the to-be-scheduled queue, will not overwrite existing pod data (that
// may have already changed).
func (q *queuer) requeue(pod *Pod) {
// use KeepExisting in case the pod has already been updated (can happen if binding fails
// due to constraint voilations); we don't want to overwrite a newer entry with stale data.
q.podQueue.Add(pod, queue.KeepExisting)
q.unscheduledCond.Broadcast()
}
// spawns a go-routine to watch for unscheduled pods and queue them up
// for scheduling. returns immediately.
func (q *queuer) Run() {
go util.Forever(func() {
log.Info("Watching for newly created pods")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here for short intervals so that scheduling
// may proceed even if there have been no recent pod changes
p := q.podStore.Await(enqueuePopTimeout)
if p == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.deltaCond.Wait()
}()
// we've yielded the lock
select {
case <-time.After(enqueueWaitTimeout):
q.deltaCond.Broadcast() // abort Wait()
<-signalled // wait for lock re-acquisition
log.V(4).Infoln("timed out waiting for a pod update")
case <-signalled:
// we've acquired the lock and there may be
// changes for us to process now
}
continue
}
pod := p.(*Pod)
if pod.Status.Host != "" {
q.dequeue(pod.GetUID())
} else {
// use ReplaceExisting because we are always pushing the latest state
now := time.Now()
pod.deadline = &now
q.podQueue.Offer(pod, queue.ReplaceExisting)
q.unscheduledCond.Broadcast()
log.V(3).Infof("queued pod for scheduling: %v", pod.Pod.Name)
}
}
}, 1*time.Second)
}
// implementation of scheduling plugin's NextPod func; see k8s plugin/pkg/scheduler
func (q *queuer) yield() *api.Pod {
log.V(2).Info("attempting to yield a pod")
q.lock.Lock()
defer q.lock.Unlock()
for {
// limit blocking here to short intervals so that we don't block the
// enqueuer Run() routine for very long
kpod := q.podQueue.Await(yieldPopTimeout)
if kpod == nil {
signalled := make(chan struct{})
go func() {
defer close(signalled)
q.unscheduledCond.Wait()
}()
// lock is yielded at this point and we're going to wait for either
// a timeout, or a signal that there's data
select {
case <-time.After(yieldWaitTimeout):
q.unscheduledCond.Broadcast() // abort Wait()
<-signalled // wait for the go-routine, and the lock
log.V(4).Infoln("timed out waiting for a pod to yield")
case <-signalled:
// we have acquired the lock, and there
// may be a pod for us to pop now
}
continue
}
pod := kpod.(*Pod).Pod
if meta, err := meta.Accessor(pod); err != nil {
log.Warningf("yield unable to understand pod object %+v, will skip", pod)
} else if !q.podStore.Poll(meta.Name(), queue.POP_EVENT) {
log.V(1).Infof("yield popped a transitioning pod, skipping: %+v", pod)
} else if pod.Status.Host != "" {
// should never happen if enqueuePods is filtering properly
log.Warningf("yield popped an already-scheduled pod, skipping: %+v", pod)
} else {
return pod
}
}
}
type errorHandler struct {
api SchedulerInterface
backoff *podBackoff
qr *queuer
}
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.gc()
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
taskId, exists := k.api.taskForPod(podKey)
if !exists {
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
}
switch task, state := k.api.getTask(taskId); state {
case statePending:
if task.launched {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
offersAvailable := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
offersAvailable = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.RLocker().Lock()
defer k.api.RLocker().Unlock()
switch task, state := k.api.getTask(taskId); state {
case statePending:
return !task.launched && task.AcceptOffer(offer)
}
return false
}))
}
delay := k.backoff.getBackoff(podKey)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: offersAvailable})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
type deleter struct {
api SchedulerInterface
qr *queuer
}
// currently monitors for "pod deleted" events, upon which handle()
// is invoked.
func (k *deleter) Run(updates <-chan queue.Entry) {
go util.Forever(func() {
for {
entry := <-updates
pod := entry.Value().(*Pod)
if entry.Is(queue.DELETE_EVENT) {
if err := k.deleteOne(pod); err != nil {
log.Error(err)
}
} else if !entry.Is(queue.POP_EVENT) {
k.qr.updatesAvailable()
}
}
}, 1*time.Second)
}
func (k *deleter) deleteOne(pod *Pod) error {
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := makePodKey(ctx, pod.Name)
if err != nil {
return err
}
log.V(2).Infof("pod deleted: %v", podKey)
// order is important here: we want to make sure we have the lock before
// removing the pod from the scheduling queue. this makes the concurrent
// execution of scheduler-error-handling and delete-handling easier to
// reason about.
k.api.Lock()
defer k.api.Unlock()
// prevent the scheduler from attempting to pop this; it's also possible that
// it's concurrently being scheduled (somewhere between pod scheduling and
// binding) - if so, then we'll end up removing it from pendingTasks which
// will abort Bind()ing
k.qr.dequeue(pod.GetUID())
taskId, exists := k.api.taskForPod(podKey)
if !exists {
log.V(2).Infof("Could not resolve pod '%s' to task id", podKey)
return noSuchPodErr
}
// determine if the task has already been launched to mesos, if not then
// cleanup is easier (unregister) since there's no state to sync
switch task, state := k.api.getTask(taskId); state {
case statePending:
if !task.launched {
// we've been invoked in between Schedule() and Bind()
if task.hasAcceptedOffer() {
task.Offer.Release()
task.ClearTaskInfo()
}
k.api.unregisterPodTask(task)
return nil
}
fallthrough
case stateRunning:
// signal to watchers that the related pod is going down
task.deleted = true
task.Pod.Status.Host = ""
return k.api.killTask(taskId)
default:
log.Warningf("cannot kill pod '%s': task not found %v", podKey, taskId)
return noSuchTaskErr
}
}
// Create creates a scheduler plugin and all supporting background functions.
func (k *KubernetesScheduler) NewPluginConfig() *plugin.Config {
// Watch and queue pods that need scheduling.
updates := make(chan queue.Entry, defaultUpdatesBacklog)
podStore := &podStoreAdapter{queue.NewHistorical(updates)}
cache.NewReflector(createAllPodsLW(k.client), &api.Pod{}, podStore).Run()
// lock that guards critial sections that involve transferring pods from
// the store (cache) to the scheduling queue; its purpose is to maintain
// an ordering (vs interleaving) of operations that's easier to reason about.
kapi := &k8smScheduler{k}
q := newQueuer(podStore)
podDeleter := &deleter{
api: kapi,
qr: q,
}
podDeleter.Run(updates)
q.Run()
eh := &errorHandler{
api: kapi,
backoff: &podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: realClock{},
},
qr: q,
}
return &plugin.Config{
MinionLister: nil,
Algorithm: &kubeScheduler{
api: kapi,
podStore: podStore,
},
Binder: &binder{
api: kapi,
client: k.client,
},
NextPod: q.yield,
Error: eh.handleSchedulingError,
}
}
type listWatch struct {
client *client.Client
fieldSelector labels.Selector
resource string
}
func (lw *listWatch) List() (runtime.Object, error) {
return lw.client.
Get().
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Do().
Get()
}
func (lw *listWatch) Watch(resourceVersion string) (watch.Interface, error) {
return lw.client.
Get().
Prefix("watch").
Resource(lw.resource).
SelectorParam("fields", lw.fieldSelector).
Param("resourceVersion", resourceVersion).
Watch()
}
// createAllPodsLW returns a listWatch that finds all pods
func createAllPodsLW(cl *client.Client) *listWatch {
return &listWatch{
client: cl,
fieldSelector: labels.Everything(),
resource: "pods",
}
}
// Consumes *api.Pod, produces *Pod; the k8s reflector wants to push *api.Pod
// objects at us, but we want to store more flexible (Pod) type defined in
// this package. The adapter implementation facilitates this. It's a little
// hackish since the object type going in is different than the object type
// coming out -- you've been warned.
type podStoreAdapter struct {
queue.FIFO
}
func (psa *podStoreAdapter) Add(id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Add(id, &Pod{Pod: pod})
}
func (psa *podStoreAdapter) | (id string, obj interface{}) {
pod := obj.(*api.Pod)
psa.FIFO.Update(id, &Pod{Pod: pod})
}
// Replace will delete the contents of the store, using instead the
// given map. This store implementation does NOT take ownership of the map.
func (psa *podStoreAdapter) Replace(idToObj map[string]interface{}) {
newmap := map[string]interface{}{}
for k, v := range idToObj {
pod := v.(*api.Pod)
newmap[k] = &Pod{Pod: pod}
}
psa.FIFO.Replace(newmap)
}
| Update | identifier_name |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes| !bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
}
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() && !overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes| !bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" | else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
}
| {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} | conditional_block |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes| !bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
}
| .as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() && !overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes| !bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
} | /// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes | random_line_split |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes| !bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool {
self.genesis_state_bytes.is_some()
}
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn | (&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() && !overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes| !bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
}
| write_to_file | identifier_name |
lib.rs | //! This crate should eventually represent the structure at this repo:
//!
//! https://github.com/eth2-clients/eth2-testnets/tree/master/nimbus/testnet1
//!
//! It is not accurate at the moment, we include extra files and we also don't support a few
//! others. We are unable to conform to the repo until we have the following PR merged:
//!
//! https://github.com/sigp/lighthouse/pull/605
//!
use eth2_config::{testnets_dir, *};
use enr::{CombinedKey, Enr};
use ssz::Decode;
use std::fs::{create_dir_all, File};
use std::io::{Read, Write};
use std::path::PathBuf;
use types::{Address, BeaconState, EthSpec, EthSpecId, YamlConfig};
pub const ADDRESS_FILE: &str = "deposit_contract.txt";
pub const DEPLOY_BLOCK_FILE: &str = "deploy_block.txt";
pub const BOOT_ENR_FILE: &str = "boot_enr.yaml";
pub const GENESIS_STATE_FILE: &str = "genesis.ssz";
pub const YAML_CONFIG_FILE: &str = "config.yaml";
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct HardcodedNet {
pub name: &'static str,
pub genesis_is_known: bool,
pub yaml_config: &'static [u8],
pub deploy_block: &'static [u8],
pub boot_enr: &'static [u8],
pub deposit_contract_address: &'static [u8],
pub genesis_state_bytes: &'static [u8],
}
macro_rules! define_net {
($mod: ident, $include_file: tt) => {{
use eth2_config::$mod::ETH2_NET_DIR;
HardcodedNet {
name: ETH2_NET_DIR.name,
genesis_is_known: ETH2_NET_DIR.genesis_is_known,
yaml_config: $include_file!("../", "config.yaml"),
deploy_block: $include_file!("../", "deploy_block.txt"),
boot_enr: $include_file!("../", "boot_enr.yaml"),
deposit_contract_address: $include_file!("../", "deposit_contract.txt"),
genesis_state_bytes: $include_file!("../", "genesis.ssz"),
}
}};
}
const ALTONA: HardcodedNet = define_net!(altona, include_altona_file);
const MEDALLA: HardcodedNet = define_net!(medalla, include_medalla_file);
const SPADINA: HardcodedNet = define_net!(spadina, include_spadina_file);
const PYRMONT: HardcodedNet = define_net!(pyrmont, include_pyrmont_file);
const MAINNET: HardcodedNet = define_net!(mainnet, include_mainnet_file);
const TOLEDO: HardcodedNet = define_net!(toledo, include_toledo_file);
const HARDCODED_NETS: &[HardcodedNet] = &[ALTONA, MEDALLA, SPADINA, PYRMONT, MAINNET, TOLEDO];
pub const DEFAULT_HARDCODED_TESTNET: &str = "medalla";
/// Specifies an Eth2 testnet.
///
/// See the crate-level documentation for more details.
#[derive(Clone, PartialEq, Debug)]
pub struct Eth2TestnetConfig {
pub deposit_contract_address: String,
/// Note: instead of the block where the contract is deployed, it is acceptable to set this
/// value to be the block number where the first deposit occurs.
pub deposit_contract_deploy_block: u64,
pub boot_enr: Option<Vec<Enr<CombinedKey>>>,
pub genesis_state_bytes: Option<Vec<u8>>,
pub yaml_config: Option<YamlConfig>,
}
impl Eth2TestnetConfig {
/// Returns the default hard coded testnet.
pub fn hard_coded_default() -> Result<Option<Self>, String> {
Self::constant(DEFAULT_HARDCODED_TESTNET)
}
/// When Lighthouse is built it includes zero or more "hardcoded" network specifications. This
/// function allows for instantiating one of these nets by name.
pub fn constant(name: &str) -> Result<Option<Self>, String> {
HARDCODED_NETS
.iter()
.find(|net| net.name == name)
.map(Self::from_hardcoded_net)
.transpose()
}
/// Instantiates `Self` from a `HardcodedNet`.
fn from_hardcoded_net(net: &HardcodedNet) -> Result<Self, String> {
Ok(Self {
deposit_contract_address: serde_yaml::from_reader(net.deposit_contract_address)
.map_err(|e| format!("Unable to parse contract address: {:?}", e))?,
deposit_contract_deploy_block: serde_yaml::from_reader(net.deploy_block)
.map_err(|e| format!("Unable to parse deploy block: {:?}", e))?,
boot_enr: Some(
serde_yaml::from_reader(net.boot_enr)
.map_err(|e| format!("Unable to parse boot enr: {:?}", e))?,
),
genesis_state_bytes: Some(net.genesis_state_bytes.to_vec())
.filter(|bytes| !bytes.is_empty()),
yaml_config: Some(
serde_yaml::from_reader(net.yaml_config)
.map_err(|e| format!("Unable to parse yaml config: {:?}", e))?,
),
})
}
/// Returns an identifier that should be used for selecting an `EthSpec` instance for this
/// testnet.
pub fn eth_spec_id(&self) -> Result<EthSpecId, String> {
self.yaml_config
.as_ref()
.ok_or_else(|| "YAML specification file missing".to_string())
.and_then(|config| {
config
.eth_spec_id()
.ok_or_else(|| format!("Unknown CONFIG_NAME: {}", config.config_name))
})
}
/// Returns `true` if this configuration contains a `BeaconState`.
pub fn beacon_state_is_known(&self) -> bool |
/// Attempts to deserialize `self.beacon_state`, returning an error if it's missing or invalid.
pub fn beacon_state<E: EthSpec>(&self) -> Result<BeaconState<E>, String> {
let genesis_state_bytes = self
.genesis_state_bytes
.as_ref()
.ok_or_else(|| "Genesis state is unknown".to_string())?;
BeaconState::from_ssz_bytes(genesis_state_bytes)
.map_err(|e| format!("Genesis state SSZ bytes are invalid: {:?}", e))
}
/// Write the files to the directory.
///
/// Overwrites files if specified to do so.
pub fn write_to_file(&self, base_dir: PathBuf, overwrite: bool) -> Result<(), String> {
if base_dir.exists() && !overwrite {
return Err("Testnet directory already exists".to_string());
}
self.force_write_to_file(base_dir)
}
/// Write the files to the directory, even if the directory already exists.
pub fn force_write_to_file(&self, base_dir: PathBuf) -> Result<(), String> {
create_dir_all(&base_dir)
.map_err(|e| format!("Unable to create testnet directory: {:?}", e))?;
macro_rules! write_to_yaml_file {
($file: ident, $variable: expr) => {
File::create(base_dir.join($file))
.map_err(|e| format!("Unable to create {}: {:?}", $file, e))
.and_then(|mut file| {
let yaml = serde_yaml::to_string(&$variable)
.map_err(|e| format!("Unable to YAML encode {}: {:?}", $file, e))?;
// Remove the doc header from the YAML file.
//
// This allows us to play nice with other clients that are expecting
// plain-text, not YAML.
let no_doc_header = if yaml.starts_with("---\n") {
&yaml[4..]
} else {
&yaml
};
file.write_all(no_doc_header.as_bytes())
.map_err(|e| format!("Unable to write {}: {:?}", $file, e))
})?;
};
}
write_to_yaml_file!(ADDRESS_FILE, self.deposit_contract_address);
write_to_yaml_file!(DEPLOY_BLOCK_FILE, self.deposit_contract_deploy_block);
if let Some(boot_enr) = &self.boot_enr {
write_to_yaml_file!(BOOT_ENR_FILE, boot_enr);
}
if let Some(yaml_config) = &self.yaml_config {
write_to_yaml_file!(YAML_CONFIG_FILE, yaml_config);
}
// The genesis state is a special case because it uses SSZ, not YAML.
if let Some(genesis_state_bytes) = &self.genesis_state_bytes {
let file = base_dir.join(GENESIS_STATE_FILE);
File::create(&file)
.map_err(|e| format!("Unable to create {:?}: {:?}", file, e))
.and_then(|mut file| {
file.write_all(genesis_state_bytes)
.map_err(|e| format!("Unable to write {:?}: {:?}", file, e))
})?;
}
Ok(())
}
pub fn load(base_dir: PathBuf) -> Result<Self, String> {
macro_rules! load_from_file {
($file: ident) => {
File::open(base_dir.join($file))
.map_err(|e| format!("Unable to open {}: {:?}", $file, e))
.and_then(|file| {
serde_yaml::from_reader(file)
.map_err(|e| format!("Unable to parse {}: {:?}", $file, e))
})?;
};
}
macro_rules! optional_load_from_file {
($file: ident) => {
if base_dir.join($file).exists() {
Some(load_from_file!($file))
} else {
None
}
};
}
let deposit_contract_address = load_from_file!(ADDRESS_FILE);
let deposit_contract_deploy_block = load_from_file!(DEPLOY_BLOCK_FILE);
let boot_enr = optional_load_from_file!(BOOT_ENR_FILE);
let yaml_config = optional_load_from_file!(YAML_CONFIG_FILE);
// The genesis state is a special case because it uses SSZ, not YAML.
let genesis_file_path = base_dir.join(GENESIS_STATE_FILE);
let genesis_state_bytes = if genesis_file_path.exists() {
let mut bytes = vec![];
File::open(&genesis_file_path)
.map_err(|e| format!("Unable to open {:?}: {:?}", genesis_file_path, e))
.and_then(|mut file| {
file.read_to_end(&mut bytes)
.map_err(|e| format!("Unable to read {:?}: {:?}", file, e))
})?;
Some(bytes).filter(|bytes| !bytes.is_empty())
} else {
None
};
Ok(Self {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes,
yaml_config,
})
}
pub fn deposit_contract_address(&self) -> Result<Address, String> {
if self.deposit_contract_address.starts_with("0x") {
self.deposit_contract_address[2..]
.parse()
.map_err(|e| format!("Corrupted address, unable to parse: {:?}", e))
} else {
Err("Corrupted address, must start with 0x".to_string())
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ssz::Encode;
use tempdir::TempDir;
use types::{Eth1Data, Hash256, MainnetEthSpec, V012LegacyEthSpec, YamlConfig};
type E = V012LegacyEthSpec;
#[test]
fn hard_coded_nets_work() {
for net in HARDCODED_NETS {
let config =
Eth2TestnetConfig::from_hardcoded_net(net).expect(&format!("{:?}", net.name));
if net.name == "mainnet" || net.name == "toledo" || net.name == "pyrmont" {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<MainnetEthSpec>(&E::default_spec())
.unwrap();
} else {
// Ensure we can parse the YAML config to a chain spec.
config
.yaml_config
.as_ref()
.unwrap()
.apply_to_chain_spec::<V012LegacyEthSpec>(&E::default_spec())
.unwrap();
}
assert_eq!(
config.genesis_state_bytes.is_some(),
net.genesis_is_known,
"{:?}",
net.name
);
}
}
#[test]
fn round_trip() {
let spec = &E::default_spec();
let eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
deposit_count: 0,
block_hash: Hash256::zero(),
};
// TODO: figure out how to generate ENR and add some here.
let boot_enr = None;
let genesis_state = Some(BeaconState::new(42, eth1_data, spec));
let yaml_config = Some(YamlConfig::from_spec::<E>(spec));
do_test::<E>(boot_enr, genesis_state, yaml_config);
do_test::<E>(None, None, None);
}
fn do_test<E: EthSpec>(
boot_enr: Option<Vec<Enr<CombinedKey>>>,
genesis_state: Option<BeaconState<E>>,
yaml_config: Option<YamlConfig>,
) {
let temp_dir = TempDir::new("eth2_testnet_test").expect("should create temp dir");
let base_dir = temp_dir.path().join("my_testnet");
let deposit_contract_address = "0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413".to_string();
let deposit_contract_deploy_block = 42;
let testnet: Eth2TestnetConfig = Eth2TestnetConfig {
deposit_contract_address,
deposit_contract_deploy_block,
boot_enr,
genesis_state_bytes: genesis_state.as_ref().map(Encode::as_ssz_bytes),
yaml_config,
};
testnet
.write_to_file(base_dir.clone(), false)
.expect("should write to file");
let decoded = Eth2TestnetConfig::load(base_dir).expect("should load struct");
assert_eq!(testnet, decoded, "should decode as encoded");
}
}
| {
self.genesis_state_bytes.is_some()
} | identifier_body |
input_selection_wb_test.go | // Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package votingpool
import (
"bytes"
"reflect"
"sort"
"testing"
"github.com/gcash/bchd/chaincfg/chainhash"
"github.com/gcash/bchd/wire"
"github.com/gcash/bchutil"
"github.com/gcash/bchwallet/walletdb"
"github.com/gcash/bchwallet/wtxmgr"
)
var (
// random small number of satoshis used as dustThreshold
dustThreshold bchutil.Amount = 1e4
)
func TestGetEligibleInputs(t *testing.T) {
tearDown, db, pool, store := TstCreatePoolAndTxStore(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
scripts := append(
getPKScriptsForAddressRange(t, dbtx, pool, 1, 0, 2, 0, 4),
getPKScriptsForAddressRange(t, dbtx, pool, 2, 0, 2, 0, 6)...)
// Create two eligible inputs locked to each of the PKScripts above.
expNoEligibleInputs := 2 * len(scripts)
eligibleAmounts := []int64{int64(dustThreshold + 1), int64(dustThreshold + 1)}
var inputs []wtxmgr.Credit
for i := 0; i < len(scripts); i++ {
created := TstCreateCreditsOnStore(t, dbtx, store, scripts[i], eligibleAmounts)
inputs = append(inputs, created...)
}
startAddr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 0)
lastSeriesID := uint32(2)
currentBlock := TstInputsBlock + eligibleInputMinConfirmations + 1
var eligibles []Credit
txmgrNs := dbtx.ReadBucket(txmgrNamespaceKey)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
eligibles, err = pool.getEligibleInputs(ns, addrmgrNs,
store, txmgrNs, *startAddr, lastSeriesID, dustThreshold, currentBlock,
eligibleInputMinConfirmations)
})
if err != nil {
t.Fatal("InputSelection failed:", err)
}
// Check we got the expected number of eligible inputs.
if len(eligibles) != expNoEligibleInputs {
t.Fatalf("Wrong number of eligible inputs returned. Got: %d, want: %d.",
len(eligibles), expNoEligibleInputs)
}
// Check that the returned eligibles are reverse sorted by address.
if !sort.IsSorted(sort.Reverse(byAddress(eligibles))) |
// Check that all credits are unique
checkUniqueness(t, eligibles)
}
func TestNextAddrWithVaryingHighestIndices(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(2)
// Populate the used addr DB for branch 0 and indices ranging from 0 to 2.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(0), 2)
// Populate the used addr DB for branch 1 and indices ranging from 0 to 1.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(1), 1)
// Start with the address for branch==0, index==1.
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 1)
// The first call to nextAddr() should give us the address for branch==1
// and index==1.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(1), 1)
// The next call should give us the address for branch==0, index==2 since
// there are no used addresses for branch==2.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(0), 2)
// Since the last addr for branch==1 was the one with index==1, a subsequent
// call will return nil.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong next addr; got '%s', want 'nil'", addr.addrIdentifier())
}
}
func TestNextAddr(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(3)
lastIdx := Index(10)
// Populate used addresses DB with entries for seriesID==1, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(i), lastIdx)
}
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, lastIdx-1)
// nextAddr() first increments just the branch, which ranges from 0 to 3
// here (because our series has 3 public keys).
for _, i := range []int{1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx-1)
}
// The last nextAddr() above gave us the addr with branch=3,
// idx=lastIdx-1, so the next 4 calls should give us the addresses with
// branch=[0-3] and idx=lastIdx.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx)
}
// Populate used addresses DB with entries for seriesID==2, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 2, Branch(i), lastIdx)
}
// Now we've gone through all the available branch/idx combinations, so
// we should move to the next series and start again with branch=0, idx=0.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 2, Branch(i), 0)
}
// Finally check that nextAddr() returns nil when we've reached the last
// available address before stopSeriesID.
addr = TstNewWithdrawalAddress(t, dbtx, pool, 2, 3, lastIdx)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong WithdrawalAddress; got %s, want nil", addr.addrIdentifier())
}
}
func TestEligibleInputsAreEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
if !pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is not eligible and it should be.")
}
}
func TestNonEligibleInputsAreNotEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold - 1)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
// Check that Credit below dustThreshold is rejected.
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
// Check that a Credit with not enough confirmations is rejected.
_, credits = tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c = credits[0]
// The calculation of if it has been confirmed does this: chainheigt - bh +
// 1 >= target, which is quite weird, but the reason why I need to put 902
// is *that* makes 1000 - 902 +1 = 99 >= 100 false
c.BlockMeta.Height = int32(902)
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
}
func TestCreditSortingByAddress(t *testing.T) {
teardown, db, pool := TstCreatePool(t)
defer teardown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
shaHash0 := bytes.Repeat([]byte{0}, 32)
shaHash1 := bytes.Repeat([]byte{1}, 32)
shaHash2 := bytes.Repeat([]byte{2}, 32)
c0 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 0)
c1 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 1)
c2 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash1, 0)
c3 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash2, 0)
c4 := newDummyCredit(t, dbtx, pool, 1, 0, 1, shaHash0, 0)
c5 := newDummyCredit(t, dbtx, pool, 1, 1, 0, shaHash0, 0)
c6 := newDummyCredit(t, dbtx, pool, 2, 0, 0, shaHash0, 0)
randomCredits := [][]Credit{
{c6, c5, c4, c3, c2, c1, c0},
{c2, c1, c0, c6, c5, c4, c3},
{c6, c4, c5, c2, c3, c0, c1},
}
want := []Credit{c0, c1, c2, c3, c4, c5, c6}
for _, random := range randomCredits {
sort.Sort(byAddress(random))
got := random
if len(got) != len(want) {
t.Fatalf("Sorted Credit slice size wrong: Got: %d, want: %d",
len(got), len(want))
}
for idx := 0; idx < len(want); idx++ {
if !reflect.DeepEqual(got[idx], want[idx]) {
t.Errorf("Wrong output index. Got: %v, want: %v",
got[idx], want[idx])
}
}
}
}
// newDummyCredit creates a new Credit with the given hash and outpointIdx,
// locked to the votingpool address identified by the given
// series/index/branch.
func newDummyCredit(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, series uint32, index Index, branch Branch,
txHash []byte, outpointIdx uint32) Credit {
var hash chainhash.Hash
if err := hash.SetBytes(txHash); err != nil {
t.Fatal(err)
}
// Ensure the address defined by the given series/branch/index is present on
// the set of used addresses as that's a requirement of WithdrawalAddress.
TstEnsureUsedAddr(t, dbtx, pool, series, branch, index)
addr := TstNewWithdrawalAddress(t, dbtx, pool, series, branch, index)
c := wtxmgr.Credit{
OutPoint: wire.OutPoint{
Hash: hash,
Index: outpointIdx,
},
}
return newCredit(c, *addr)
}
func checkUniqueness(t *testing.T, credits byAddress) {
type uniq struct {
series uint32
branch Branch
index Index
hash chainhash.Hash
outputIndex uint32
}
uniqMap := make(map[uniq]bool)
for _, c := range credits {
u := uniq{
series: c.addr.SeriesID(),
branch: c.addr.Branch(),
index: c.addr.Index(),
hash: c.OutPoint.Hash,
outputIndex: c.OutPoint.Index,
}
if _, exists := uniqMap[u]; exists {
t.Fatalf("Duplicate found: %v", u)
} else {
uniqMap[u] = true
}
}
}
func getPKScriptsForAddressRange(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, seriesID uint32,
startBranch, stopBranch Branch, startIdx, stopIdx Index) [][]byte {
var pkScripts [][]byte
for idx := startIdx; idx <= stopIdx; idx++ {
for branch := startBranch; branch <= stopBranch; branch++ {
pkScripts = append(pkScripts, TstCreatePkScript(t, dbtx, pool, seriesID, branch, idx))
}
}
return pkScripts
}
func checkWithdrawalAddressMatches(t *testing.T, addr *WithdrawalAddress, seriesID uint32,
branch Branch, index Index) {
if addr.SeriesID() != seriesID {
t.Fatalf("Wrong seriesID; got %d, want %d", addr.SeriesID(), seriesID)
}
if addr.Branch() != branch {
t.Fatalf("Wrong branch; got %d, want %d", addr.Branch(), branch)
}
if addr.Index() != index {
t.Fatalf("Wrong index; got %d, want %d", addr.Index(), index)
}
}
| {
t.Fatal("Eligible inputs are not sorted.")
} | conditional_block |
input_selection_wb_test.go | // Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package votingpool
import (
"bytes"
"reflect"
"sort"
"testing"
"github.com/gcash/bchd/chaincfg/chainhash"
"github.com/gcash/bchd/wire"
"github.com/gcash/bchutil"
"github.com/gcash/bchwallet/walletdb"
"github.com/gcash/bchwallet/wtxmgr"
)
var (
// random small number of satoshis used as dustThreshold
dustThreshold bchutil.Amount = 1e4
)
func TestGetEligibleInputs(t *testing.T) {
tearDown, db, pool, store := TstCreatePoolAndTxStore(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
scripts := append(
getPKScriptsForAddressRange(t, dbtx, pool, 1, 0, 2, 0, 4),
getPKScriptsForAddressRange(t, dbtx, pool, 2, 0, 2, 0, 6)...)
// Create two eligible inputs locked to each of the PKScripts above.
expNoEligibleInputs := 2 * len(scripts)
eligibleAmounts := []int64{int64(dustThreshold + 1), int64(dustThreshold + 1)}
var inputs []wtxmgr.Credit
for i := 0; i < len(scripts); i++ {
created := TstCreateCreditsOnStore(t, dbtx, store, scripts[i], eligibleAmounts)
inputs = append(inputs, created...)
}
startAddr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 0)
lastSeriesID := uint32(2)
currentBlock := TstInputsBlock + eligibleInputMinConfirmations + 1
var eligibles []Credit
txmgrNs := dbtx.ReadBucket(txmgrNamespaceKey)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
eligibles, err = pool.getEligibleInputs(ns, addrmgrNs,
store, txmgrNs, *startAddr, lastSeriesID, dustThreshold, currentBlock,
eligibleInputMinConfirmations)
})
if err != nil {
t.Fatal("InputSelection failed:", err)
}
// Check we got the expected number of eligible inputs.
if len(eligibles) != expNoEligibleInputs {
t.Fatalf("Wrong number of eligible inputs returned. Got: %d, want: %d.",
len(eligibles), expNoEligibleInputs)
}
// Check that the returned eligibles are reverse sorted by address.
if !sort.IsSorted(sort.Reverse(byAddress(eligibles))) {
t.Fatal("Eligible inputs are not sorted.")
}
// Check that all credits are unique
checkUniqueness(t, eligibles)
}
func TestNextAddrWithVaryingHighestIndices(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown() | }
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(2)
// Populate the used addr DB for branch 0 and indices ranging from 0 to 2.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(0), 2)
// Populate the used addr DB for branch 1 and indices ranging from 0 to 1.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(1), 1)
// Start with the address for branch==0, index==1.
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 1)
// The first call to nextAddr() should give us the address for branch==1
// and index==1.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(1), 1)
// The next call should give us the address for branch==0, index==2 since
// there are no used addresses for branch==2.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(0), 2)
// Since the last addr for branch==1 was the one with index==1, a subsequent
// call will return nil.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong next addr; got '%s', want 'nil'", addr.addrIdentifier())
}
}
func TestNextAddr(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(3)
lastIdx := Index(10)
// Populate used addresses DB with entries for seriesID==1, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(i), lastIdx)
}
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, lastIdx-1)
// nextAddr() first increments just the branch, which ranges from 0 to 3
// here (because our series has 3 public keys).
for _, i := range []int{1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx-1)
}
// The last nextAddr() above gave us the addr with branch=3,
// idx=lastIdx-1, so the next 4 calls should give us the addresses with
// branch=[0-3] and idx=lastIdx.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx)
}
// Populate used addresses DB with entries for seriesID==2, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 2, Branch(i), lastIdx)
}
// Now we've gone through all the available branch/idx combinations, so
// we should move to the next series and start again with branch=0, idx=0.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 2, Branch(i), 0)
}
// Finally check that nextAddr() returns nil when we've reached the last
// available address before stopSeriesID.
addr = TstNewWithdrawalAddress(t, dbtx, pool, 2, 3, lastIdx)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong WithdrawalAddress; got %s, want nil", addr.addrIdentifier())
}
}
func TestEligibleInputsAreEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
if !pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is not eligible and it should be.")
}
}
func TestNonEligibleInputsAreNotEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold - 1)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
// Check that Credit below dustThreshold is rejected.
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
// Check that a Credit with not enough confirmations is rejected.
_, credits = tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c = credits[0]
// The calculation of if it has been confirmed does this: chainheigt - bh +
// 1 >= target, which is quite weird, but the reason why I need to put 902
// is *that* makes 1000 - 902 +1 = 99 >= 100 false
c.BlockMeta.Height = int32(902)
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
}
func TestCreditSortingByAddress(t *testing.T) {
teardown, db, pool := TstCreatePool(t)
defer teardown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
shaHash0 := bytes.Repeat([]byte{0}, 32)
shaHash1 := bytes.Repeat([]byte{1}, 32)
shaHash2 := bytes.Repeat([]byte{2}, 32)
c0 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 0)
c1 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 1)
c2 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash1, 0)
c3 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash2, 0)
c4 := newDummyCredit(t, dbtx, pool, 1, 0, 1, shaHash0, 0)
c5 := newDummyCredit(t, dbtx, pool, 1, 1, 0, shaHash0, 0)
c6 := newDummyCredit(t, dbtx, pool, 2, 0, 0, shaHash0, 0)
randomCredits := [][]Credit{
{c6, c5, c4, c3, c2, c1, c0},
{c2, c1, c0, c6, c5, c4, c3},
{c6, c4, c5, c2, c3, c0, c1},
}
want := []Credit{c0, c1, c2, c3, c4, c5, c6}
for _, random := range randomCredits {
sort.Sort(byAddress(random))
got := random
if len(got) != len(want) {
t.Fatalf("Sorted Credit slice size wrong: Got: %d, want: %d",
len(got), len(want))
}
for idx := 0; idx < len(want); idx++ {
if !reflect.DeepEqual(got[idx], want[idx]) {
t.Errorf("Wrong output index. Got: %v, want: %v",
got[idx], want[idx])
}
}
}
}
// newDummyCredit creates a new Credit with the given hash and outpointIdx,
// locked to the votingpool address identified by the given
// series/index/branch.
func newDummyCredit(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, series uint32, index Index, branch Branch,
txHash []byte, outpointIdx uint32) Credit {
var hash chainhash.Hash
if err := hash.SetBytes(txHash); err != nil {
t.Fatal(err)
}
// Ensure the address defined by the given series/branch/index is present on
// the set of used addresses as that's a requirement of WithdrawalAddress.
TstEnsureUsedAddr(t, dbtx, pool, series, branch, index)
addr := TstNewWithdrawalAddress(t, dbtx, pool, series, branch, index)
c := wtxmgr.Credit{
OutPoint: wire.OutPoint{
Hash: hash,
Index: outpointIdx,
},
}
return newCredit(c, *addr)
}
func checkUniqueness(t *testing.T, credits byAddress) {
type uniq struct {
series uint32
branch Branch
index Index
hash chainhash.Hash
outputIndex uint32
}
uniqMap := make(map[uniq]bool)
for _, c := range credits {
u := uniq{
series: c.addr.SeriesID(),
branch: c.addr.Branch(),
index: c.addr.Index(),
hash: c.OutPoint.Hash,
outputIndex: c.OutPoint.Index,
}
if _, exists := uniqMap[u]; exists {
t.Fatalf("Duplicate found: %v", u)
} else {
uniqMap[u] = true
}
}
}
func getPKScriptsForAddressRange(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, seriesID uint32,
startBranch, stopBranch Branch, startIdx, stopIdx Index) [][]byte {
var pkScripts [][]byte
for idx := startIdx; idx <= stopIdx; idx++ {
for branch := startBranch; branch <= stopBranch; branch++ {
pkScripts = append(pkScripts, TstCreatePkScript(t, dbtx, pool, seriesID, branch, idx))
}
}
return pkScripts
}
func checkWithdrawalAddressMatches(t *testing.T, addr *WithdrawalAddress, seriesID uint32,
branch Branch, index Index) {
if addr.SeriesID() != seriesID {
t.Fatalf("Wrong seriesID; got %d, want %d", addr.SeriesID(), seriesID)
}
if addr.Branch() != branch {
t.Fatalf("Wrong branch; got %d, want %d", addr.Branch(), branch)
}
if addr.Index() != index {
t.Fatalf("Wrong index; got %d, want %d", addr.Index(), index)
}
} |
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err) | random_line_split |
input_selection_wb_test.go | // Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package votingpool
import (
"bytes"
"reflect"
"sort"
"testing"
"github.com/gcash/bchd/chaincfg/chainhash"
"github.com/gcash/bchd/wire"
"github.com/gcash/bchutil"
"github.com/gcash/bchwallet/walletdb"
"github.com/gcash/bchwallet/wtxmgr"
)
var (
// random small number of satoshis used as dustThreshold
dustThreshold bchutil.Amount = 1e4
)
func TestGetEligibleInputs(t *testing.T) {
tearDown, db, pool, store := TstCreatePoolAndTxStore(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
scripts := append(
getPKScriptsForAddressRange(t, dbtx, pool, 1, 0, 2, 0, 4),
getPKScriptsForAddressRange(t, dbtx, pool, 2, 0, 2, 0, 6)...)
// Create two eligible inputs locked to each of the PKScripts above.
expNoEligibleInputs := 2 * len(scripts)
eligibleAmounts := []int64{int64(dustThreshold + 1), int64(dustThreshold + 1)}
var inputs []wtxmgr.Credit
for i := 0; i < len(scripts); i++ {
created := TstCreateCreditsOnStore(t, dbtx, store, scripts[i], eligibleAmounts)
inputs = append(inputs, created...)
}
startAddr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 0)
lastSeriesID := uint32(2)
currentBlock := TstInputsBlock + eligibleInputMinConfirmations + 1
var eligibles []Credit
txmgrNs := dbtx.ReadBucket(txmgrNamespaceKey)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
eligibles, err = pool.getEligibleInputs(ns, addrmgrNs,
store, txmgrNs, *startAddr, lastSeriesID, dustThreshold, currentBlock,
eligibleInputMinConfirmations)
})
if err != nil {
t.Fatal("InputSelection failed:", err)
}
// Check we got the expected number of eligible inputs.
if len(eligibles) != expNoEligibleInputs {
t.Fatalf("Wrong number of eligible inputs returned. Got: %d, want: %d.",
len(eligibles), expNoEligibleInputs)
}
// Check that the returned eligibles are reverse sorted by address.
if !sort.IsSorted(sort.Reverse(byAddress(eligibles))) {
t.Fatal("Eligible inputs are not sorted.")
}
// Check that all credits are unique
checkUniqueness(t, eligibles)
}
func TestNextAddrWithVaryingHighestIndices(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(2)
// Populate the used addr DB for branch 0 and indices ranging from 0 to 2.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(0), 2)
// Populate the used addr DB for branch 1 and indices ranging from 0 to 1.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(1), 1)
// Start with the address for branch==0, index==1.
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 1)
// The first call to nextAddr() should give us the address for branch==1
// and index==1.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(1), 1)
// The next call should give us the address for branch==0, index==2 since
// there are no used addresses for branch==2.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(0), 2)
// Since the last addr for branch==1 was the one with index==1, a subsequent
// call will return nil.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong next addr; got '%s', want 'nil'", addr.addrIdentifier())
}
}
func TestNextAddr(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(3)
lastIdx := Index(10)
// Populate used addresses DB with entries for seriesID==1, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(i), lastIdx)
}
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, lastIdx-1)
// nextAddr() first increments just the branch, which ranges from 0 to 3
// here (because our series has 3 public keys).
for _, i := range []int{1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx-1)
}
// The last nextAddr() above gave us the addr with branch=3,
// idx=lastIdx-1, so the next 4 calls should give us the addresses with
// branch=[0-3] and idx=lastIdx.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx)
}
// Populate used addresses DB with entries for seriesID==2, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 2, Branch(i), lastIdx)
}
// Now we've gone through all the available branch/idx combinations, so
// we should move to the next series and start again with branch=0, idx=0.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 2, Branch(i), 0)
}
// Finally check that nextAddr() returns nil when we've reached the last
// available address before stopSeriesID.
addr = TstNewWithdrawalAddress(t, dbtx, pool, 2, 3, lastIdx)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong WithdrawalAddress; got %s, want nil", addr.addrIdentifier())
}
}
func TestEligibleInputsAreEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
if !pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is not eligible and it should be.")
}
}
func TestNonEligibleInputsAreNotEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold - 1)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
// Check that Credit below dustThreshold is rejected.
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
// Check that a Credit with not enough confirmations is rejected.
_, credits = tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c = credits[0]
// The calculation of if it has been confirmed does this: chainheigt - bh +
// 1 >= target, which is quite weird, but the reason why I need to put 902
// is *that* makes 1000 - 902 +1 = 99 >= 100 false
c.BlockMeta.Height = int32(902)
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
}
func TestCreditSortingByAddress(t *testing.T) {
teardown, db, pool := TstCreatePool(t)
defer teardown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
shaHash0 := bytes.Repeat([]byte{0}, 32)
shaHash1 := bytes.Repeat([]byte{1}, 32)
shaHash2 := bytes.Repeat([]byte{2}, 32)
c0 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 0)
c1 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 1)
c2 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash1, 0)
c3 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash2, 0)
c4 := newDummyCredit(t, dbtx, pool, 1, 0, 1, shaHash0, 0)
c5 := newDummyCredit(t, dbtx, pool, 1, 1, 0, shaHash0, 0)
c6 := newDummyCredit(t, dbtx, pool, 2, 0, 0, shaHash0, 0)
randomCredits := [][]Credit{
{c6, c5, c4, c3, c2, c1, c0},
{c2, c1, c0, c6, c5, c4, c3},
{c6, c4, c5, c2, c3, c0, c1},
}
want := []Credit{c0, c1, c2, c3, c4, c5, c6}
for _, random := range randomCredits {
sort.Sort(byAddress(random))
got := random
if len(got) != len(want) {
t.Fatalf("Sorted Credit slice size wrong: Got: %d, want: %d",
len(got), len(want))
}
for idx := 0; idx < len(want); idx++ {
if !reflect.DeepEqual(got[idx], want[idx]) {
t.Errorf("Wrong output index. Got: %v, want: %v",
got[idx], want[idx])
}
}
}
}
// newDummyCredit creates a new Credit with the given hash and outpointIdx,
// locked to the votingpool address identified by the given
// series/index/branch.
func newDummyCredit(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, series uint32, index Index, branch Branch,
txHash []byte, outpointIdx uint32) Credit {
var hash chainhash.Hash
if err := hash.SetBytes(txHash); err != nil {
t.Fatal(err)
}
// Ensure the address defined by the given series/branch/index is present on
// the set of used addresses as that's a requirement of WithdrawalAddress.
TstEnsureUsedAddr(t, dbtx, pool, series, branch, index)
addr := TstNewWithdrawalAddress(t, dbtx, pool, series, branch, index)
c := wtxmgr.Credit{
OutPoint: wire.OutPoint{
Hash: hash,
Index: outpointIdx,
},
}
return newCredit(c, *addr)
}
func checkUniqueness(t *testing.T, credits byAddress) {
type uniq struct {
series uint32
branch Branch
index Index
hash chainhash.Hash
outputIndex uint32
}
uniqMap := make(map[uniq]bool)
for _, c := range credits {
u := uniq{
series: c.addr.SeriesID(),
branch: c.addr.Branch(),
index: c.addr.Index(),
hash: c.OutPoint.Hash,
outputIndex: c.OutPoint.Index,
}
if _, exists := uniqMap[u]; exists {
t.Fatalf("Duplicate found: %v", u)
} else {
uniqMap[u] = true
}
}
}
func getPKScriptsForAddressRange(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, seriesID uint32,
startBranch, stopBranch Branch, startIdx, stopIdx Index) [][]byte {
var pkScripts [][]byte
for idx := startIdx; idx <= stopIdx; idx++ {
for branch := startBranch; branch <= stopBranch; branch++ {
pkScripts = append(pkScripts, TstCreatePkScript(t, dbtx, pool, seriesID, branch, idx))
}
}
return pkScripts
}
func checkWithdrawalAddressMatches(t *testing.T, addr *WithdrawalAddress, seriesID uint32,
branch Branch, index Index) | {
if addr.SeriesID() != seriesID {
t.Fatalf("Wrong seriesID; got %d, want %d", addr.SeriesID(), seriesID)
}
if addr.Branch() != branch {
t.Fatalf("Wrong branch; got %d, want %d", addr.Branch(), branch)
}
if addr.Index() != index {
t.Fatalf("Wrong index; got %d, want %d", addr.Index(), index)
}
} | identifier_body | |
input_selection_wb_test.go | // Copyright (c) 2015-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package votingpool
import (
"bytes"
"reflect"
"sort"
"testing"
"github.com/gcash/bchd/chaincfg/chainhash"
"github.com/gcash/bchd/wire"
"github.com/gcash/bchutil"
"github.com/gcash/bchwallet/walletdb"
"github.com/gcash/bchwallet/wtxmgr"
)
var (
// random small number of satoshis used as dustThreshold
dustThreshold bchutil.Amount = 1e4
)
func TestGetEligibleInputs(t *testing.T) {
tearDown, db, pool, store := TstCreatePoolAndTxStore(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
scripts := append(
getPKScriptsForAddressRange(t, dbtx, pool, 1, 0, 2, 0, 4),
getPKScriptsForAddressRange(t, dbtx, pool, 2, 0, 2, 0, 6)...)
// Create two eligible inputs locked to each of the PKScripts above.
expNoEligibleInputs := 2 * len(scripts)
eligibleAmounts := []int64{int64(dustThreshold + 1), int64(dustThreshold + 1)}
var inputs []wtxmgr.Credit
for i := 0; i < len(scripts); i++ {
created := TstCreateCreditsOnStore(t, dbtx, store, scripts[i], eligibleAmounts)
inputs = append(inputs, created...)
}
startAddr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 0)
lastSeriesID := uint32(2)
currentBlock := TstInputsBlock + eligibleInputMinConfirmations + 1
var eligibles []Credit
txmgrNs := dbtx.ReadBucket(txmgrNamespaceKey)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
eligibles, err = pool.getEligibleInputs(ns, addrmgrNs,
store, txmgrNs, *startAddr, lastSeriesID, dustThreshold, currentBlock,
eligibleInputMinConfirmations)
})
if err != nil {
t.Fatal("InputSelection failed:", err)
}
// Check we got the expected number of eligible inputs.
if len(eligibles) != expNoEligibleInputs {
t.Fatalf("Wrong number of eligible inputs returned. Got: %d, want: %d.",
len(eligibles), expNoEligibleInputs)
}
// Check that the returned eligibles are reverse sorted by address.
if !sort.IsSorted(sort.Reverse(byAddress(eligibles))) {
t.Fatal("Eligible inputs are not sorted.")
}
// Check that all credits are unique
checkUniqueness(t, eligibles)
}
func TestNextAddrWithVaryingHighestIndices(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(2)
// Populate the used addr DB for branch 0 and indices ranging from 0 to 2.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(0), 2)
// Populate the used addr DB for branch 1 and indices ranging from 0 to 1.
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(1), 1)
// Start with the address for branch==0, index==1.
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, 1)
// The first call to nextAddr() should give us the address for branch==1
// and index==1.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(1), 1)
// The next call should give us the address for branch==0, index==2 since
// there are no used addresses for branch==2.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(0), 2)
// Since the last addr for branch==1 was the one with index==1, a subsequent
// call will return nil.
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong next addr; got '%s', want 'nil'", addr.addrIdentifier())
}
}
func TestNextAddr(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
ns, addrmgrNs := TstRWNamespaces(dbtx)
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
stopSeriesID := uint32(3)
lastIdx := Index(10)
// Populate used addresses DB with entries for seriesID==1, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 1, Branch(i), lastIdx)
}
addr := TstNewWithdrawalAddress(t, dbtx, pool, 1, 0, lastIdx-1)
// nextAddr() first increments just the branch, which ranges from 0 to 3
// here (because our series has 3 public keys).
for _, i := range []int{1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx-1)
}
// The last nextAddr() above gave us the addr with branch=3,
// idx=lastIdx-1, so the next 4 calls should give us the addresses with
// branch=[0-3] and idx=lastIdx.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 1, Branch(i), lastIdx)
}
// Populate used addresses DB with entries for seriesID==2, branch==0..3,
// idx==0..10.
for _, i := range []int{0, 1, 2, 3} {
TstEnsureUsedAddr(t, dbtx, pool, 2, Branch(i), lastIdx)
}
// Now we've gone through all the available branch/idx combinations, so
// we should move to the next series and start again with branch=0, idx=0.
for _, i := range []int{0, 1, 2, 3} {
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
checkWithdrawalAddressMatches(t, addr, 2, Branch(i), 0)
}
// Finally check that nextAddr() returns nil when we've reached the last
// available address before stopSeriesID.
addr = TstNewWithdrawalAddress(t, dbtx, pool, 2, 3, lastIdx)
TstRunWithManagerUnlocked(t, pool.Manager(), addrmgrNs, func() {
addr, err = nextAddr(pool, ns, addrmgrNs, addr.seriesID, addr.branch, addr.index, stopSeriesID)
})
if err != nil {
t.Fatalf("Failed to get next address: %v", err)
}
if addr != nil {
t.Fatalf("Wrong WithdrawalAddress; got %s, want nil", addr.addrIdentifier())
}
}
func TestEligibleInputsAreEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
if !pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is not eligible and it should be.")
}
}
func TestNonEligibleInputsAreNotEligible(t *testing.T) {
tearDown, db, pool := TstCreatePool(t)
defer tearDown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
var chainHeight int32 = 1000
_, credits := tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold - 1)})
c := credits[0]
// Make sure Credit is old enough to pass the minConf check.
c.BlockMeta.Height = int32(eligibleInputMinConfirmations)
// Check that Credit below dustThreshold is rejected.
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
// Check that a Credit with not enough confirmations is rejected.
_, credits = tstCreateCreditsOnNewSeries(t, dbtx, pool, []int64{int64(dustThreshold)})
c = credits[0]
// The calculation of if it has been confirmed does this: chainheigt - bh +
// 1 >= target, which is quite weird, but the reason why I need to put 902
// is *that* makes 1000 - 902 +1 = 99 >= 100 false
c.BlockMeta.Height = int32(902)
if pool.isCreditEligible(c, eligibleInputMinConfirmations, chainHeight, dustThreshold) {
t.Errorf("Input is eligible and it should not be.")
}
}
func TestCreditSortingByAddress(t *testing.T) {
teardown, db, pool := TstCreatePool(t)
defer teardown()
dbtx, err := db.BeginReadWriteTx()
if err != nil {
t.Fatal(err)
}
defer dbtx.Commit()
series := []TstSeriesDef{
{ReqSigs: 2, PubKeys: TstPubKeys[1:4], SeriesID: 1},
{ReqSigs: 2, PubKeys: TstPubKeys[3:6], SeriesID: 2},
}
TstCreateSeries(t, dbtx, pool, series)
shaHash0 := bytes.Repeat([]byte{0}, 32)
shaHash1 := bytes.Repeat([]byte{1}, 32)
shaHash2 := bytes.Repeat([]byte{2}, 32)
c0 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 0)
c1 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash0, 1)
c2 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash1, 0)
c3 := newDummyCredit(t, dbtx, pool, 1, 0, 0, shaHash2, 0)
c4 := newDummyCredit(t, dbtx, pool, 1, 0, 1, shaHash0, 0)
c5 := newDummyCredit(t, dbtx, pool, 1, 1, 0, shaHash0, 0)
c6 := newDummyCredit(t, dbtx, pool, 2, 0, 0, shaHash0, 0)
randomCredits := [][]Credit{
{c6, c5, c4, c3, c2, c1, c0},
{c2, c1, c0, c6, c5, c4, c3},
{c6, c4, c5, c2, c3, c0, c1},
}
want := []Credit{c0, c1, c2, c3, c4, c5, c6}
for _, random := range randomCredits {
sort.Sort(byAddress(random))
got := random
if len(got) != len(want) {
t.Fatalf("Sorted Credit slice size wrong: Got: %d, want: %d",
len(got), len(want))
}
for idx := 0; idx < len(want); idx++ {
if !reflect.DeepEqual(got[idx], want[idx]) {
t.Errorf("Wrong output index. Got: %v, want: %v",
got[idx], want[idx])
}
}
}
}
// newDummyCredit creates a new Credit with the given hash and outpointIdx,
// locked to the votingpool address identified by the given
// series/index/branch.
func newDummyCredit(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, series uint32, index Index, branch Branch,
txHash []byte, outpointIdx uint32) Credit {
var hash chainhash.Hash
if err := hash.SetBytes(txHash); err != nil {
t.Fatal(err)
}
// Ensure the address defined by the given series/branch/index is present on
// the set of used addresses as that's a requirement of WithdrawalAddress.
TstEnsureUsedAddr(t, dbtx, pool, series, branch, index)
addr := TstNewWithdrawalAddress(t, dbtx, pool, series, branch, index)
c := wtxmgr.Credit{
OutPoint: wire.OutPoint{
Hash: hash,
Index: outpointIdx,
},
}
return newCredit(c, *addr)
}
func | (t *testing.T, credits byAddress) {
type uniq struct {
series uint32
branch Branch
index Index
hash chainhash.Hash
outputIndex uint32
}
uniqMap := make(map[uniq]bool)
for _, c := range credits {
u := uniq{
series: c.addr.SeriesID(),
branch: c.addr.Branch(),
index: c.addr.Index(),
hash: c.OutPoint.Hash,
outputIndex: c.OutPoint.Index,
}
if _, exists := uniqMap[u]; exists {
t.Fatalf("Duplicate found: %v", u)
} else {
uniqMap[u] = true
}
}
}
func getPKScriptsForAddressRange(t *testing.T, dbtx walletdb.ReadWriteTx, pool *Pool, seriesID uint32,
startBranch, stopBranch Branch, startIdx, stopIdx Index) [][]byte {
var pkScripts [][]byte
for idx := startIdx; idx <= stopIdx; idx++ {
for branch := startBranch; branch <= stopBranch; branch++ {
pkScripts = append(pkScripts, TstCreatePkScript(t, dbtx, pool, seriesID, branch, idx))
}
}
return pkScripts
}
func checkWithdrawalAddressMatches(t *testing.T, addr *WithdrawalAddress, seriesID uint32,
branch Branch, index Index) {
if addr.SeriesID() != seriesID {
t.Fatalf("Wrong seriesID; got %d, want %d", addr.SeriesID(), seriesID)
}
if addr.Branch() != branch {
t.Fatalf("Wrong branch; got %d, want %d", addr.Branch(), branch)
}
if addr.Index() != index {
t.Fatalf("Wrong index; got %d, want %d", addr.Index(), index)
}
}
| checkUniqueness | identifier_name |
showcase_custom_layout.js |
const ColumnsNumber = {
One: 1,
Two: 2,
Three: 3
}
const LayoutShowcaseConsts = {
margin: 15,
minPageWidth: 270
}
var LayoutShowcaseState = {
columns: ColumnsNumber.Three,
layoutVisuals: null,
layoutReport: null,
layoutPageName: null
}
// Embed the report and retrieve all report visuals
function embedCustomLayoutReport() {
// Load custom layout report properties into session
LoadLayoutShowcaseReportIntoSession().then(function () {
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embed application token from session
var accessToken = GetSession(SessionKeys.AccessToken);
// Get embed URL from session
var embedUrl = GetSession(SessionKeys.EmbedUrl);
// Get report Id from session
var embedReportId = GetSession(SessionKeys.EmbedId);
// We give the user View permissions
var permissions = models.Permissions.View;
// Embed configuration used to describe the what and how to embed
// This object is used when calling powerbi.embed
// This also includes settings and options such as filters
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Embed-Configuration-Details
var config= {
type: 'report',
tokenType: models.TokenType.Embed,
accessToken: accessToken,
embedUrl: embedUrl,
id: embedReportId,
permissions: permissions,
settings: {
filterPaneEnabled: false,
navContentPaneEnabled: false
}
};
// Get a reference to the embedded report HTML element
var embedContainer = $('#embedContainer')[0];
// Embed the report and display it within the div container
LayoutShowcaseState.layoutReport = powerbi.embed(embedContainer, config);
// Report.on will add an event handler for report loaded event
LayoutShowcaseState.layoutReport.on("loaded", function() {
// After report is loaded, we find the active page and get all the visuals on it
// Retrieve the page collection
LayoutShowcaseState.layoutReport.getPages().then(function (pages) {
// Retrieve active page
let activePage = jQuery.grep(pages, function (page) { return page.isActive })[0];
// Set layoutPageName to active page name
LayoutShowcaseState.layoutPageName = activePage.name;
// Retrieve active page visuals.
activePage.getVisuals().then(function (visuals) {
var reportVisuals = visuals.map(function (visual) {
return {
name: visual.name,
title: visual.title,
checked: true
};
});
// Create visuals array from the visuals of the active page
createVisualsArray(reportVisuals);
});
});
});
});
}
// Create visuals array from the report visuals and update the HTML
function createVisualsArray(reportVisuals) |
// Render all visuals with current configuration
function renderVisuals() {
// render only if report and visuals initialized
if (!LayoutShowcaseState.layoutReport || !LayoutShowcaseState.layoutVisuals)
return;
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embedContainer width and height
let pageWidth = $('#embedContainer').width();
let pageHeight = $('#embedContainer').height();
// Calculating the overall width of the visuals in each row
let visualsTotalWidth = pageWidth - (LayoutShowcaseConsts.margin * (LayoutShowcaseState.columns + 1));
// Calculate the width of a single visual, according to the number of columns
// For one and three columns visuals width will be a third of visuals total width
let width = (LayoutShowcaseState.columns === ColumnsNumber.Two) ? (visualsTotalWidth / 2) : (visualsTotalWidth / 3);
// For one column, set page width to visual's width with margins
if (LayoutShowcaseState.columns === ColumnsNumber.One) {
pageWidth = width + 2 * LayoutShowcaseConsts.margin;
// Check if page width is smaller than minimum width and update accordingly
if (pageWidth < LayoutShowcaseConsts.minPageWidth) {
pageWidth = LayoutShowcaseConsts.minPageWidth;
// Visuals width is set to fit minimum page width with margins on both sides
width = LayoutShowcaseConsts.minPageWidth - 2 * LayoutShowcaseConsts.margin;
}
}
// Set visuals height according to width - 9:16 ratio
const height = width * (9 / 16);
// Visuals starting point
let x = LayoutShowcaseConsts.margin, y = LayoutShowcaseConsts.margin;
// Filter the visuals list to display only the checked visuals
let checkedVisuals = LayoutShowcaseState.layoutVisuals.filter(function (visual) { return visual.checked; });
// Calculate the number of lines
const lines = Math.ceil(checkedVisuals.length / LayoutShowcaseState.columns);
// Calculate page height with margins
pageHeight = Math.max(pageHeight, ((lines * height) + ((lines + 1) * LayoutShowcaseConsts.margin)));
// Building visualsLayout object
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Custom-Layout
let visualsLayout = {};
for (let i = 0; i < checkedVisuals.length; i++) {
visualsLayout[checkedVisuals[i].name] = {
x: x,
y: y,
width: width,
height: height,
displayState: {
// Change the selected visuals display mode to visible
mode: models.VisualContainerDisplayMode.Visible
}
}
// Calculating (x,y) position for the next visual
x += width + LayoutShowcaseConsts.margin;
if (x >= pageWidth) {
x = LayoutShowcaseConsts.margin;
y += height + LayoutShowcaseConsts.margin;
}
}
// Building pagesLayout object
let pagesLayout = {};
pagesLayout[LayoutShowcaseState.layoutPageName] = {
defaultLayout: {
displayState: {
// Default display mode for visuals is hidden
mode: models.VisualContainerDisplayMode.Hidden
}
},
visualsLayout: visualsLayout
};
// Building settings object
let settings = {
layoutType: models.LayoutType.Custom,
customLayout: {
pageSize: {
type: models.PageSizeType.Custom,
width: pageWidth - 10,
height: pageHeight - 20
},
displayOption: models.DisplayOption.FitToPage,
pagesLayout: pagesLayout
}
};
// If pageWidth or pageHeight is changed, change display option to actual size to add scroll bar
if (pageWidth !== $('#embedContainer').width() || pageHeight !== $('#embedContainer').height()) {
settings.customLayout.displayOption = models.DisplayOption.ActualSize;
}
// Change page background to transparent on Two / Three columns configuration
settings.background = (LayoutShowcaseState.columns === ColumnsNumber.One) ? models.BackgroundType.Default : models.BackgroundType.Transparent;
// Call updateSettings with the new settings object
LayoutShowcaseState.layoutReport.updateSettings(settings);
}
// Update the visuals list with the change and rerender all visuals
function onCheckboxClicked(checkbox) {
let visual = jQuery.grep(LayoutShowcaseState.layoutVisuals, function (visual) { return visual.name === checkbox.value })[0];
visual.checked = $(checkbox).is(':checked');
renderVisuals();
};
// Update columns number and rerender the visuals
function onColumnsClicked(num) {
LayoutShowcaseState.columns = num;
setColumnButtonActive(num);
renderVisuals();
}
// Build visual checkbox HTML element
function buildVisualElement(visual) {
var labelElement = document.createElement("label");
labelElement.setAttribute("class", "checkboxContainer checked");
var inputElement = document.createElement("input");
inputElement.setAttribute("type", "checkbox");
inputElement.setAttribute("id", 'visual_' + visual.name);
inputElement.setAttribute("value", visual.name);
inputElement.setAttribute("onclick", "onCheckboxClicked(this);");
inputElement.setAttribute("checked", "true");
labelElement.appendChild(inputElement);
var spanElement = document.createElement("span");
spanElement.setAttribute("class", "checkboxCheckmark");
labelElement.appendChild(spanElement);
var secondSpanElement = document.createElement("span");
secondSpanElement.setAttribute("class", "checkboxTitle");
var checkboxTitleElement = document.createTextNode(visual.title);
secondSpanElement.appendChild(checkboxTitleElement);
labelElement.appendChild(secondSpanElement);
return labelElement;
}
// Set clicked columns button active
function setColumnButtonActive(num) {
const active_btn_class = "active-columns-btn";
$('#btnOneCol').removeClass(active_btn_class);
$('#btnTwoCols').removeClass(active_btn_class);
$('#btnThreeCols').removeClass(active_btn_class);
if (num === ColumnsNumber.Three) {
$('#btnThreeCols').addClass(active_btn_class);
} else if (num === ColumnsNumber.Two) {
$('#btnTwoCols').addClass(active_btn_class);
} else {
$('#btnOneCol').addClass(active_btn_class);
}
}
| {
// Remove all visuals without titles (i.e cards)
LayoutShowcaseState.layoutVisuals = reportVisuals.filter(function (visual) {
return visual.title !== undefined;
});
// Clear visuals list div
$('#visualsList').empty();
// Build checkbox html list and insert the html code to visualsList div
for (let i = 0; i < LayoutShowcaseState.layoutVisuals.length; i++) {
$('#visualsList').append(buildVisualElement(LayoutShowcaseState.layoutVisuals[i]));
}
// Render all visuals
renderVisuals();
} | identifier_body |
showcase_custom_layout.js | const ColumnsNumber = {
One: 1,
Two: 2,
Three: 3
}
const LayoutShowcaseConsts = {
margin: 15,
minPageWidth: 270
}
var LayoutShowcaseState = {
columns: ColumnsNumber.Three,
layoutVisuals: null,
layoutReport: null,
layoutPageName: null
}
// Embed the report and retrieve all report visuals
function embedCustomLayoutReport() {
// Load custom layout report properties into session
LoadLayoutShowcaseReportIntoSession().then(function () {
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embed application token from session
var accessToken = GetSession(SessionKeys.AccessToken);
// Get embed URL from session
var embedUrl = GetSession(SessionKeys.EmbedUrl);
// Get report Id from session
var embedReportId = GetSession(SessionKeys.EmbedId);
// We give the user View permissions
var permissions = models.Permissions.View;
// Embed configuration used to describe the what and how to embed
// This object is used when calling powerbi.embed
// This also includes settings and options such as filters
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Embed-Configuration-Details
var config= {
type: 'report',
tokenType: models.TokenType.Embed,
accessToken: accessToken,
embedUrl: embedUrl,
id: embedReportId,
permissions: permissions,
settings: {
filterPaneEnabled: false,
navContentPaneEnabled: false
}
};
// Get a reference to the embedded report HTML element
var embedContainer = $('#embedContainer')[0];
// Embed the report and display it within the div container
LayoutShowcaseState.layoutReport = powerbi.embed(embedContainer, config);
// Report.on will add an event handler for report loaded event
LayoutShowcaseState.layoutReport.on("loaded", function() {
// After report is loaded, we find the active page and get all the visuals on it
// Retrieve the page collection
LayoutShowcaseState.layoutReport.getPages().then(function (pages) {
// Retrieve active page
let activePage = jQuery.grep(pages, function (page) { return page.isActive })[0];
// Set layoutPageName to active page name
LayoutShowcaseState.layoutPageName = activePage.name;
// Retrieve active page visuals.
activePage.getVisuals().then(function (visuals) {
var reportVisuals = visuals.map(function (visual) {
return {
name: visual.name,
title: visual.title,
checked: true
};
});
// Create visuals array from the visuals of the active page
createVisualsArray(reportVisuals);
});
});
});
});
}
// Create visuals array from the report visuals and update the HTML
function createVisualsArray(reportVisuals) {
// Remove all visuals without titles (i.e cards)
LayoutShowcaseState.layoutVisuals = reportVisuals.filter(function (visual) {
return visual.title !== undefined;
});
// Clear visuals list div
$('#visualsList').empty();
// Build checkbox html list and insert the html code to visualsList div
for (let i = 0; i < LayoutShowcaseState.layoutVisuals.length; i++) {
$('#visualsList').append(buildVisualElement(LayoutShowcaseState.layoutVisuals[i]));
}
// Render all visuals
renderVisuals();
}
// Render all visuals with current configuration
function renderVisuals() {
// render only if report and visuals initialized
if (!LayoutShowcaseState.layoutReport || !LayoutShowcaseState.layoutVisuals)
return;
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embedContainer width and height
let pageWidth = $('#embedContainer').width();
let pageHeight = $('#embedContainer').height();
// Calculating the overall width of the visuals in each row
let visualsTotalWidth = pageWidth - (LayoutShowcaseConsts.margin * (LayoutShowcaseState.columns + 1));
// Calculate the width of a single visual, according to the number of columns
// For one and three columns visuals width will be a third of visuals total width
let width = (LayoutShowcaseState.columns === ColumnsNumber.Two) ? (visualsTotalWidth / 2) : (visualsTotalWidth / 3);
// For one column, set page width to visual's width with margins
if (LayoutShowcaseState.columns === ColumnsNumber.One) {
pageWidth = width + 2 * LayoutShowcaseConsts.margin;
// Check if page width is smaller than minimum width and update accordingly
if (pageWidth < LayoutShowcaseConsts.minPageWidth) {
pageWidth = LayoutShowcaseConsts.minPageWidth;
// Visuals width is set to fit minimum page width with margins on both sides
width = LayoutShowcaseConsts.minPageWidth - 2 * LayoutShowcaseConsts.margin;
}
}
// Set visuals height according to width - 9:16 ratio
const height = width * (9 / 16);
// Visuals starting point
let x = LayoutShowcaseConsts.margin, y = LayoutShowcaseConsts.margin;
// Filter the visuals list to display only the checked visuals
let checkedVisuals = LayoutShowcaseState.layoutVisuals.filter(function (visual) { return visual.checked; });
// Calculate the number of lines
const lines = Math.ceil(checkedVisuals.length / LayoutShowcaseState.columns);
// Calculate page height with margins
pageHeight = Math.max(pageHeight, ((lines * height) + ((lines + 1) * LayoutShowcaseConsts.margin)));
// Building visualsLayout object
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Custom-Layout
let visualsLayout = {};
for (let i = 0; i < checkedVisuals.length; i++) {
visualsLayout[checkedVisuals[i].name] = {
x: x,
y: y,
width: width,
height: height,
displayState: {
// Change the selected visuals display mode to visible
mode: models.VisualContainerDisplayMode.Visible
}
}
// Calculating (x,y) position for the next visual
x += width + LayoutShowcaseConsts.margin;
if (x >= pageWidth) {
x = LayoutShowcaseConsts.margin;
y += height + LayoutShowcaseConsts.margin;
}
}
// Building pagesLayout object
let pagesLayout = {};
pagesLayout[LayoutShowcaseState.layoutPageName] = {
defaultLayout: {
displayState: {
// Default display mode for visuals is hidden
mode: models.VisualContainerDisplayMode.Hidden
}
},
visualsLayout: visualsLayout
};
// Building settings object
let settings = {
layoutType: models.LayoutType.Custom,
customLayout: {
pageSize: {
type: models.PageSizeType.Custom,
width: pageWidth - 10,
height: pageHeight - 20
},
displayOption: models.DisplayOption.FitToPage,
pagesLayout: pagesLayout
}
};
// If pageWidth or pageHeight is changed, change display option to actual size to add scroll bar
if (pageWidth !== $('#embedContainer').width() || pageHeight !== $('#embedContainer').height()) {
settings.customLayout.displayOption = models.DisplayOption.ActualSize;
}
// Change page background to transparent on Two / Three columns configuration
settings.background = (LayoutShowcaseState.columns === ColumnsNumber.One) ? models.BackgroundType.Default : models.BackgroundType.Transparent;
// Call updateSettings with the new settings object
LayoutShowcaseState.layoutReport.updateSettings(settings);
}
// Update the visuals list with the change and rerender all visuals
function onCheckboxClicked(checkbox) {
let visual = jQuery.grep(LayoutShowcaseState.layoutVisuals, function (visual) { return visual.name === checkbox.value })[0];
visual.checked = $(checkbox).is(':checked');
renderVisuals();
};
// Update columns number and rerender the visuals
function onColumnsClicked(num) {
LayoutShowcaseState.columns = num;
setColumnButtonActive(num);
renderVisuals();
}
// Build visual checkbox HTML element
function buildVisualElement(visual) {
var labelElement = document.createElement("label");
labelElement.setAttribute("class", "checkboxContainer checked");
var inputElement = document.createElement("input");
inputElement.setAttribute("type", "checkbox");
inputElement.setAttribute("id", 'visual_' + visual.name);
inputElement.setAttribute("value", visual.name);
inputElement.setAttribute("onclick", "onCheckboxClicked(this);");
inputElement.setAttribute("checked", "true");
labelElement.appendChild(inputElement);
var spanElement = document.createElement("span");
spanElement.setAttribute("class", "checkboxCheckmark");
labelElement.appendChild(spanElement);
var secondSpanElement = document.createElement("span");
secondSpanElement.setAttribute("class", "checkboxTitle");
var checkboxTitleElement = document.createTextNode(visual.title);
secondSpanElement.appendChild(checkboxTitleElement);
labelElement.appendChild(secondSpanElement);
return labelElement;
}
// Set clicked columns button active |
if (num === ColumnsNumber.Three) {
$('#btnThreeCols').addClass(active_btn_class);
} else if (num === ColumnsNumber.Two) {
$('#btnTwoCols').addClass(active_btn_class);
} else {
$('#btnOneCol').addClass(active_btn_class);
}
} | function setColumnButtonActive(num) {
const active_btn_class = "active-columns-btn";
$('#btnOneCol').removeClass(active_btn_class);
$('#btnTwoCols').removeClass(active_btn_class);
$('#btnThreeCols').removeClass(active_btn_class); | random_line_split |
showcase_custom_layout.js |
const ColumnsNumber = {
One: 1,
Two: 2,
Three: 3
}
const LayoutShowcaseConsts = {
margin: 15,
minPageWidth: 270
}
var LayoutShowcaseState = {
columns: ColumnsNumber.Three,
layoutVisuals: null,
layoutReport: null,
layoutPageName: null
}
// Embed the report and retrieve all report visuals
function embedCustomLayoutReport() {
// Load custom layout report properties into session
LoadLayoutShowcaseReportIntoSession().then(function () {
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embed application token from session
var accessToken = GetSession(SessionKeys.AccessToken);
// Get embed URL from session
var embedUrl = GetSession(SessionKeys.EmbedUrl);
// Get report Id from session
var embedReportId = GetSession(SessionKeys.EmbedId);
// We give the user View permissions
var permissions = models.Permissions.View;
// Embed configuration used to describe the what and how to embed
// This object is used when calling powerbi.embed
// This also includes settings and options such as filters
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Embed-Configuration-Details
var config= {
type: 'report',
tokenType: models.TokenType.Embed,
accessToken: accessToken,
embedUrl: embedUrl,
id: embedReportId,
permissions: permissions,
settings: {
filterPaneEnabled: false,
navContentPaneEnabled: false
}
};
// Get a reference to the embedded report HTML element
var embedContainer = $('#embedContainer')[0];
// Embed the report and display it within the div container
LayoutShowcaseState.layoutReport = powerbi.embed(embedContainer, config);
// Report.on will add an event handler for report loaded event
LayoutShowcaseState.layoutReport.on("loaded", function() {
// After report is loaded, we find the active page and get all the visuals on it
// Retrieve the page collection
LayoutShowcaseState.layoutReport.getPages().then(function (pages) {
// Retrieve active page
let activePage = jQuery.grep(pages, function (page) { return page.isActive })[0];
// Set layoutPageName to active page name
LayoutShowcaseState.layoutPageName = activePage.name;
// Retrieve active page visuals.
activePage.getVisuals().then(function (visuals) {
var reportVisuals = visuals.map(function (visual) {
return {
name: visual.name,
title: visual.title,
checked: true
};
});
// Create visuals array from the visuals of the active page
createVisualsArray(reportVisuals);
});
});
});
});
}
// Create visuals array from the report visuals and update the HTML
function createVisualsArray(reportVisuals) {
// Remove all visuals without titles (i.e cards)
LayoutShowcaseState.layoutVisuals = reportVisuals.filter(function (visual) {
return visual.title !== undefined;
});
// Clear visuals list div
$('#visualsList').empty();
// Build checkbox html list and insert the html code to visualsList div
for (let i = 0; i < LayoutShowcaseState.layoutVisuals.length; i++) {
$('#visualsList').append(buildVisualElement(LayoutShowcaseState.layoutVisuals[i]));
}
// Render all visuals
renderVisuals();
}
// Render all visuals with current configuration
function renderVisuals() {
// render only if report and visuals initialized
if (!LayoutShowcaseState.layoutReport || !LayoutShowcaseState.layoutVisuals)
return;
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embedContainer width and height
let pageWidth = $('#embedContainer').width();
let pageHeight = $('#embedContainer').height();
// Calculating the overall width of the visuals in each row
let visualsTotalWidth = pageWidth - (LayoutShowcaseConsts.margin * (LayoutShowcaseState.columns + 1));
// Calculate the width of a single visual, according to the number of columns
// For one and three columns visuals width will be a third of visuals total width
let width = (LayoutShowcaseState.columns === ColumnsNumber.Two) ? (visualsTotalWidth / 2) : (visualsTotalWidth / 3);
// For one column, set page width to visual's width with margins
if (LayoutShowcaseState.columns === ColumnsNumber.One) |
// Set visuals height according to width - 9:16 ratio
const height = width * (9 / 16);
// Visuals starting point
let x = LayoutShowcaseConsts.margin, y = LayoutShowcaseConsts.margin;
// Filter the visuals list to display only the checked visuals
let checkedVisuals = LayoutShowcaseState.layoutVisuals.filter(function (visual) { return visual.checked; });
// Calculate the number of lines
const lines = Math.ceil(checkedVisuals.length / LayoutShowcaseState.columns);
// Calculate page height with margins
pageHeight = Math.max(pageHeight, ((lines * height) + ((lines + 1) * LayoutShowcaseConsts.margin)));
// Building visualsLayout object
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Custom-Layout
let visualsLayout = {};
for (let i = 0; i < checkedVisuals.length; i++) {
visualsLayout[checkedVisuals[i].name] = {
x: x,
y: y,
width: width,
height: height,
displayState: {
// Change the selected visuals display mode to visible
mode: models.VisualContainerDisplayMode.Visible
}
}
// Calculating (x,y) position for the next visual
x += width + LayoutShowcaseConsts.margin;
if (x >= pageWidth) {
x = LayoutShowcaseConsts.margin;
y += height + LayoutShowcaseConsts.margin;
}
}
// Building pagesLayout object
let pagesLayout = {};
pagesLayout[LayoutShowcaseState.layoutPageName] = {
defaultLayout: {
displayState: {
// Default display mode for visuals is hidden
mode: models.VisualContainerDisplayMode.Hidden
}
},
visualsLayout: visualsLayout
};
// Building settings object
let settings = {
layoutType: models.LayoutType.Custom,
customLayout: {
pageSize: {
type: models.PageSizeType.Custom,
width: pageWidth - 10,
height: pageHeight - 20
},
displayOption: models.DisplayOption.FitToPage,
pagesLayout: pagesLayout
}
};
// If pageWidth or pageHeight is changed, change display option to actual size to add scroll bar
if (pageWidth !== $('#embedContainer').width() || pageHeight !== $('#embedContainer').height()) {
settings.customLayout.displayOption = models.DisplayOption.ActualSize;
}
// Change page background to transparent on Two / Three columns configuration
settings.background = (LayoutShowcaseState.columns === ColumnsNumber.One) ? models.BackgroundType.Default : models.BackgroundType.Transparent;
// Call updateSettings with the new settings object
LayoutShowcaseState.layoutReport.updateSettings(settings);
}
// Update the visuals list with the change and rerender all visuals
function onCheckboxClicked(checkbox) {
let visual = jQuery.grep(LayoutShowcaseState.layoutVisuals, function (visual) { return visual.name === checkbox.value })[0];
visual.checked = $(checkbox).is(':checked');
renderVisuals();
};
// Update columns number and rerender the visuals
function onColumnsClicked(num) {
LayoutShowcaseState.columns = num;
setColumnButtonActive(num);
renderVisuals();
}
// Build visual checkbox HTML element
function buildVisualElement(visual) {
var labelElement = document.createElement("label");
labelElement.setAttribute("class", "checkboxContainer checked");
var inputElement = document.createElement("input");
inputElement.setAttribute("type", "checkbox");
inputElement.setAttribute("id", 'visual_' + visual.name);
inputElement.setAttribute("value", visual.name);
inputElement.setAttribute("onclick", "onCheckboxClicked(this);");
inputElement.setAttribute("checked", "true");
labelElement.appendChild(inputElement);
var spanElement = document.createElement("span");
spanElement.setAttribute("class", "checkboxCheckmark");
labelElement.appendChild(spanElement);
var secondSpanElement = document.createElement("span");
secondSpanElement.setAttribute("class", "checkboxTitle");
var checkboxTitleElement = document.createTextNode(visual.title);
secondSpanElement.appendChild(checkboxTitleElement);
labelElement.appendChild(secondSpanElement);
return labelElement;
}
// Set clicked columns button active
function setColumnButtonActive(num) {
const active_btn_class = "active-columns-btn";
$('#btnOneCol').removeClass(active_btn_class);
$('#btnTwoCols').removeClass(active_btn_class);
$('#btnThreeCols').removeClass(active_btn_class);
if (num === ColumnsNumber.Three) {
$('#btnThreeCols').addClass(active_btn_class);
} else if (num === ColumnsNumber.Two) {
$('#btnTwoCols').addClass(active_btn_class);
} else {
$('#btnOneCol').addClass(active_btn_class);
}
}
| {
pageWidth = width + 2 * LayoutShowcaseConsts.margin;
// Check if page width is smaller than minimum width and update accordingly
if (pageWidth < LayoutShowcaseConsts.minPageWidth) {
pageWidth = LayoutShowcaseConsts.minPageWidth;
// Visuals width is set to fit minimum page width with margins on both sides
width = LayoutShowcaseConsts.minPageWidth - 2 * LayoutShowcaseConsts.margin;
}
} | conditional_block |
showcase_custom_layout.js |
const ColumnsNumber = {
One: 1,
Two: 2,
Three: 3
}
const LayoutShowcaseConsts = {
margin: 15,
minPageWidth: 270
}
var LayoutShowcaseState = {
columns: ColumnsNumber.Three,
layoutVisuals: null,
layoutReport: null,
layoutPageName: null
}
// Embed the report and retrieve all report visuals
function embedCustomLayoutReport() {
// Load custom layout report properties into session
LoadLayoutShowcaseReportIntoSession().then(function () {
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embed application token from session
var accessToken = GetSession(SessionKeys.AccessToken);
// Get embed URL from session
var embedUrl = GetSession(SessionKeys.EmbedUrl);
// Get report Id from session
var embedReportId = GetSession(SessionKeys.EmbedId);
// We give the user View permissions
var permissions = models.Permissions.View;
// Embed configuration used to describe the what and how to embed
// This object is used when calling powerbi.embed
// This also includes settings and options such as filters
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Embed-Configuration-Details
var config= {
type: 'report',
tokenType: models.TokenType.Embed,
accessToken: accessToken,
embedUrl: embedUrl,
id: embedReportId,
permissions: permissions,
settings: {
filterPaneEnabled: false,
navContentPaneEnabled: false
}
};
// Get a reference to the embedded report HTML element
var embedContainer = $('#embedContainer')[0];
// Embed the report and display it within the div container
LayoutShowcaseState.layoutReport = powerbi.embed(embedContainer, config);
// Report.on will add an event handler for report loaded event
LayoutShowcaseState.layoutReport.on("loaded", function() {
// After report is loaded, we find the active page and get all the visuals on it
// Retrieve the page collection
LayoutShowcaseState.layoutReport.getPages().then(function (pages) {
// Retrieve active page
let activePage = jQuery.grep(pages, function (page) { return page.isActive })[0];
// Set layoutPageName to active page name
LayoutShowcaseState.layoutPageName = activePage.name;
// Retrieve active page visuals.
activePage.getVisuals().then(function (visuals) {
var reportVisuals = visuals.map(function (visual) {
return {
name: visual.name,
title: visual.title,
checked: true
};
});
// Create visuals array from the visuals of the active page
createVisualsArray(reportVisuals);
});
});
});
});
}
// Create visuals array from the report visuals and update the HTML
function createVisualsArray(reportVisuals) {
// Remove all visuals without titles (i.e cards)
LayoutShowcaseState.layoutVisuals = reportVisuals.filter(function (visual) {
return visual.title !== undefined;
});
// Clear visuals list div
$('#visualsList').empty();
// Build checkbox html list and insert the html code to visualsList div
for (let i = 0; i < LayoutShowcaseState.layoutVisuals.length; i++) {
$('#visualsList').append(buildVisualElement(LayoutShowcaseState.layoutVisuals[i]));
}
// Render all visuals
renderVisuals();
}
// Render all visuals with current configuration
function renderVisuals() {
// render only if report and visuals initialized
if (!LayoutShowcaseState.layoutReport || !LayoutShowcaseState.layoutVisuals)
return;
// Get models. models contains enums that can be used
const models = window['powerbi-client'].models;
// Get embedContainer width and height
let pageWidth = $('#embedContainer').width();
let pageHeight = $('#embedContainer').height();
// Calculating the overall width of the visuals in each row
let visualsTotalWidth = pageWidth - (LayoutShowcaseConsts.margin * (LayoutShowcaseState.columns + 1));
// Calculate the width of a single visual, according to the number of columns
// For one and three columns visuals width will be a third of visuals total width
let width = (LayoutShowcaseState.columns === ColumnsNumber.Two) ? (visualsTotalWidth / 2) : (visualsTotalWidth / 3);
// For one column, set page width to visual's width with margins
if (LayoutShowcaseState.columns === ColumnsNumber.One) {
pageWidth = width + 2 * LayoutShowcaseConsts.margin;
// Check if page width is smaller than minimum width and update accordingly
if (pageWidth < LayoutShowcaseConsts.minPageWidth) {
pageWidth = LayoutShowcaseConsts.minPageWidth;
// Visuals width is set to fit minimum page width with margins on both sides
width = LayoutShowcaseConsts.minPageWidth - 2 * LayoutShowcaseConsts.margin;
}
}
// Set visuals height according to width - 9:16 ratio
const height = width * (9 / 16);
// Visuals starting point
let x = LayoutShowcaseConsts.margin, y = LayoutShowcaseConsts.margin;
// Filter the visuals list to display only the checked visuals
let checkedVisuals = LayoutShowcaseState.layoutVisuals.filter(function (visual) { return visual.checked; });
// Calculate the number of lines
const lines = Math.ceil(checkedVisuals.length / LayoutShowcaseState.columns);
// Calculate page height with margins
pageHeight = Math.max(pageHeight, ((lines * height) + ((lines + 1) * LayoutShowcaseConsts.margin)));
// Building visualsLayout object
// You can find more information at https://github.com/Microsoft/PowerBI-JavaScript/wiki/Custom-Layout
let visualsLayout = {};
for (let i = 0; i < checkedVisuals.length; i++) {
visualsLayout[checkedVisuals[i].name] = {
x: x,
y: y,
width: width,
height: height,
displayState: {
// Change the selected visuals display mode to visible
mode: models.VisualContainerDisplayMode.Visible
}
}
// Calculating (x,y) position for the next visual
x += width + LayoutShowcaseConsts.margin;
if (x >= pageWidth) {
x = LayoutShowcaseConsts.margin;
y += height + LayoutShowcaseConsts.margin;
}
}
// Building pagesLayout object
let pagesLayout = {};
pagesLayout[LayoutShowcaseState.layoutPageName] = {
defaultLayout: {
displayState: {
// Default display mode for visuals is hidden
mode: models.VisualContainerDisplayMode.Hidden
}
},
visualsLayout: visualsLayout
};
// Building settings object
let settings = {
layoutType: models.LayoutType.Custom,
customLayout: {
pageSize: {
type: models.PageSizeType.Custom,
width: pageWidth - 10,
height: pageHeight - 20
},
displayOption: models.DisplayOption.FitToPage,
pagesLayout: pagesLayout
}
};
// If pageWidth or pageHeight is changed, change display option to actual size to add scroll bar
if (pageWidth !== $('#embedContainer').width() || pageHeight !== $('#embedContainer').height()) {
settings.customLayout.displayOption = models.DisplayOption.ActualSize;
}
// Change page background to transparent on Two / Three columns configuration
settings.background = (LayoutShowcaseState.columns === ColumnsNumber.One) ? models.BackgroundType.Default : models.BackgroundType.Transparent;
// Call updateSettings with the new settings object
LayoutShowcaseState.layoutReport.updateSettings(settings);
}
// Update the visuals list with the change and rerender all visuals
function onCheckboxClicked(checkbox) {
let visual = jQuery.grep(LayoutShowcaseState.layoutVisuals, function (visual) { return visual.name === checkbox.value })[0];
visual.checked = $(checkbox).is(':checked');
renderVisuals();
};
// Update columns number and rerender the visuals
function onColumnsClicked(num) {
LayoutShowcaseState.columns = num;
setColumnButtonActive(num);
renderVisuals();
}
// Build visual checkbox HTML element
function buildVisualElement(visual) {
var labelElement = document.createElement("label");
labelElement.setAttribute("class", "checkboxContainer checked");
var inputElement = document.createElement("input");
inputElement.setAttribute("type", "checkbox");
inputElement.setAttribute("id", 'visual_' + visual.name);
inputElement.setAttribute("value", visual.name);
inputElement.setAttribute("onclick", "onCheckboxClicked(this);");
inputElement.setAttribute("checked", "true");
labelElement.appendChild(inputElement);
var spanElement = document.createElement("span");
spanElement.setAttribute("class", "checkboxCheckmark");
labelElement.appendChild(spanElement);
var secondSpanElement = document.createElement("span");
secondSpanElement.setAttribute("class", "checkboxTitle");
var checkboxTitleElement = document.createTextNode(visual.title);
secondSpanElement.appendChild(checkboxTitleElement);
labelElement.appendChild(secondSpanElement);
return labelElement;
}
// Set clicked columns button active
function | (num) {
const active_btn_class = "active-columns-btn";
$('#btnOneCol').removeClass(active_btn_class);
$('#btnTwoCols').removeClass(active_btn_class);
$('#btnThreeCols').removeClass(active_btn_class);
if (num === ColumnsNumber.Three) {
$('#btnThreeCols').addClass(active_btn_class);
} else if (num === ColumnsNumber.Two) {
$('#btnTwoCols').addClass(active_btn_class);
} else {
$('#btnOneCol').addClass(active_btn_class);
}
}
| setColumnButtonActive | identifier_name |
driver_ceph.go | package drivers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os/exec"
"strings"
"github.com/lxc/incus/incusd/migration"
"github.com/lxc/incus/incusd/operations"
"github.com/lxc/incus/incusd/revert"
"github.com/lxc/incus/shared"
"github.com/lxc/incus/shared/api"
"github.com/lxc/incus/shared/logger"
"github.com/lxc/incus/shared/units"
"github.com/lxc/incus/shared/validate"
)
var cephVersion string
var cephLoaded bool
type ceph struct {
common
}
// load is used to run one-time action per-driver rather than per-pool.
func (d *ceph) load() error {
// Register the patches.
d.patches = map[string]func() error{
"storage_lvm_skipactivation": nil,
"storage_missing_snapshot_records": nil,
"storage_delete_old_snapshot_records": nil,
"storage_zfs_drop_block_volume_filesystem_extension": nil,
"storage_prefix_bucket_names_with_project": nil,
}
// Done if previously loaded.
if cephLoaded {
return nil
}
// Validate the required binaries.
for _, tool := range []string{"ceph", "rbd"} {
_, err := exec.LookPath(tool)
if err != nil {
return fmt.Errorf("Required tool '%s' is missing", tool)
}
}
// Detect and record the version.
if cephVersion == "" {
out, err := shared.RunCommand("rbd", "--version")
if err != nil {
return err
}
out = strings.TrimSpace(out)
fields := strings.Split(out, " ")
if strings.HasPrefix(out, "ceph version ") && len(fields) > 2 {
cephVersion = fields[2]
} else {
cephVersion = out
}
}
cephLoaded = true
return nil
}
// isRemote returns true indicating this driver uses remote storage.
func (d *ceph) isRemote() bool {
return true
}
// Info returns info about the driver and its environment.
func (d *ceph) Info() Info {
return Info{
Name: "ceph",
Version: cephVersion,
OptimizedImages: true,
PreservesInodes: false,
Remote: d.isRemote(),
VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
BlockBacking: true,
RunningCopyFreeze: true,
DirectIO: true,
IOUring: true,
MountedRoot: false,
}
}
// getPlaceholderVolume returns the volume used to indicate if the pool is in use.
func (d *ceph) getPlaceholderVolume() Volume {
return NewVolume(d, d.name, VolumeType("incus"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil)
}
// FillConfig populates the storage pool's configuration file with the default values.
func (d *ceph) FillConfig() error {
if d.config["ceph.cluster_name"] == "" {
d.config["ceph.cluster_name"] = CephDefaultCluster
}
if d.config["ceph.user.name"] == "" {
d.config["ceph.user.name"] = CephDefaultUser
}
if d.config["ceph.osd.pg_num"] == "" {
d.config["ceph.osd.pg_num"] = "32"
}
return nil
}
// Create is called during pool creation and is effectively using an empty driver struct.
// WARNING: The Create() function cannot rely on any of the struct attributes being set.
func (d *ceph) Create() error {
revert := revert.New()
defer revert.Fail()
d.config["volatile.initial_source"] = d.config["source"]
err := d.FillConfig()
if err != nil |
// Validate.
_, err = units.ParseByteSizeString(d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
// Quick check.
if d.config["source"] != "" && d.config["ceph.osd.pool_name"] != "" && d.config["source"] != d.config["ceph.osd.pool_name"] {
return fmt.Errorf(`The "source" and "ceph.osd.pool_name" property must not differ for Ceph OSD storage pools`)
}
// Use an existing OSD pool.
if d.config["source"] != "" {
d.config["ceph.osd.pool_name"] = d.config["source"]
}
if d.config["ceph.osd.pool_name"] == "" {
d.config["ceph.osd.pool_name"] = d.name
d.config["source"] = d.name
}
placeholderVol := d.getPlaceholderVolume()
if !d.osdPoolExists() {
// Create new osd pool.
_, err := shared.TryRunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"create",
d.config["ceph.osd.pool_name"],
d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
revert.Add(func() { _ = d.osdDeletePool() })
// Initialize the pool. This is not necessary but allows the pool to be monitored.
_, err = shared.TryRunCommand("rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"pool",
"init",
d.config["ceph.osd.pool_name"])
if err != nil {
d.logger.Warn("Failed to initialize pool", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err = d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
} else {
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return err
}
if volExists {
// ceph.osd.force_reuse is deprecated and should not be used. OSD pools are a logical
// construct there is no good reason not to create one for dedicated use by the daemon.
if shared.IsFalseOrEmpty(d.config["ceph.osd.force_reuse"]) {
return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another Incus instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"])
}
d.config["volatile.pool.pristine"] = "false"
} else {
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err := d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
}
// Use existing OSD pool.
msg, err := shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"get",
d.config["ceph.osd.pool_name"],
"pg_num")
if err != nil {
return err
}
idx := strings.Index(msg, "pg_num:")
if idx == -1 {
return fmt.Errorf("Failed to parse number of placement groups for pool: %s", msg)
}
msg = msg[(idx + len("pg_num:")):]
msg = strings.TrimSpace(msg)
// It is ok to update the pool configuration since storage pool
// creation via API is implemented such that the storage pool is
// checked for a changed config after this function returns and
// if so the db for it is updated.
d.config["ceph.osd.pg_num"] = msg
}
revert.Success()
return nil
}
// Delete removes the storage pool from the storage device.
func (d *ceph) Delete(op *operations.Operation) error {
// Test if the pool exists.
poolExists := d.osdPoolExists()
if !poolExists {
d.logger.Warn("Pool does not exist", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Check whether we own the pool and only remove in this case.
if shared.IsTrue(d.config["volatile.pool.pristine"]) {
// Delete the osd pool.
if poolExists {
err := d.osdDeletePool()
if err != nil {
return err
}
}
}
// If the user completely destroyed it, call it done.
if !shared.PathExists(GetPoolMountPath(d.name)) {
return nil
}
// On delete, wipe everything in the directory.
err := wipeDirectory(GetPoolMountPath(d.name))
if err != nil {
return err
}
return nil
}
// Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.
func (d *ceph) Validate(config map[string]string) error {
rules := map[string]func(value string) error{
"ceph.cluster_name": validate.IsAny,
"ceph.osd.force_reuse": validate.Optional(validate.IsBool), // Deprecated, should not be used.
"ceph.osd.pg_num": validate.IsAny,
"ceph.osd.pool_name": validate.IsAny,
"ceph.osd.data_pool_name": validate.IsAny,
"ceph.rbd.clone_copy": validate.Optional(validate.IsBool),
"ceph.rbd.du": validate.Optional(validate.IsBool),
"ceph.rbd.features": validate.IsAny,
"ceph.user.name": validate.IsAny,
"volatile.pool.pristine": validate.IsAny,
}
return d.validatePool(config, rules, d.commonVolumeRules())
}
// Update applies any driver changes required from a configuration change.
func (d *ceph) Update(changedConfig map[string]string) error {
return nil
}
// Mount mounts the storage pool.
func (d *ceph) Mount() (bool, error) {
placeholderVol := d.getPlaceholderVolume()
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return false, err
}
if !volExists {
return false, fmt.Errorf("Placeholder volume does not exist")
}
return true, nil
}
// Unmount unmounts the storage pool.
func (d *ceph) Unmount() (bool, error) {
// Nothing to do here.
return true, nil
}
// GetResources returns the pool resource usage information.
func (d *ceph) GetResources() (*api.ResourcesStoragePool, error) {
var stdout bytes.Buffer
err := shared.RunCommandWithFds(context.TODO(), nil, &stdout,
"ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"df",
"-f", "json")
if err != nil {
return nil, err
}
// Temporary structs for parsing.
type cephDfPoolStats struct {
BytesUsed int64 `json:"bytes_used"`
BytesAvailable int64 `json:"max_avail"`
}
type cephDfPool struct {
Name string `json:"name"`
Stats cephDfPoolStats `json:"stats"`
}
type cephDf struct {
Pools []cephDfPool `json:"pools"`
}
// Parse the JSON output.
df := cephDf{}
err = json.NewDecoder(&stdout).Decode(&df)
if err != nil {
return nil, err
}
var pool *cephDfPool
for _, entry := range df.Pools {
if entry.Name == d.config["ceph.osd.pool_name"] {
pool = &entry
break
}
}
if pool == nil {
return nil, fmt.Errorf("OSD pool missing in df output")
}
spaceUsed := uint64(pool.Stats.BytesUsed)
spaceAvailable := uint64(pool.Stats.BytesAvailable)
res := api.ResourcesStoragePool{}
res.Space.Total = spaceAvailable + spaceUsed
res.Space.Used = spaceUsed
return &res, nil
}
// MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.
func (d *ceph) MigrationTypes(contentType ContentType, refresh bool, copySnapshots bool) []migration.Type {
var rsyncFeatures []string
// Do not pass compression argument to rsync if the associated
// config key, that is rsync.compression, is set to false.
if shared.IsFalse(d.Config()["rsync.compression"]) {
rsyncFeatures = []string{"xattrs", "delete", "bidirectional"}
} else {
rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"}
}
if refresh {
var transportType migration.MigrationFSType
if IsContentBlock(contentType) {
transportType = migration.MigrationFSType_BLOCK_AND_RSYNC
} else {
transportType = migration.MigrationFSType_RSYNC
}
return []migration.Type{
{
FSType: transportType,
Features: rsyncFeatures,
},
}
}
if contentType == ContentTypeBlock {
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_BLOCK_AND_RSYNC,
Features: rsyncFeatures,
},
}
}
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_RSYNC,
Features: rsyncFeatures,
},
}
}
| {
return err
} | conditional_block |
driver_ceph.go | package drivers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os/exec"
"strings"
"github.com/lxc/incus/incusd/migration"
"github.com/lxc/incus/incusd/operations"
"github.com/lxc/incus/incusd/revert"
"github.com/lxc/incus/shared"
"github.com/lxc/incus/shared/api"
"github.com/lxc/incus/shared/logger"
"github.com/lxc/incus/shared/units"
"github.com/lxc/incus/shared/validate"
)
var cephVersion string
var cephLoaded bool
type ceph struct {
common
}
// load is used to run one-time action per-driver rather than per-pool.
func (d *ceph) load() error {
// Register the patches.
d.patches = map[string]func() error{
"storage_lvm_skipactivation": nil,
"storage_missing_snapshot_records": nil,
"storage_delete_old_snapshot_records": nil,
"storage_zfs_drop_block_volume_filesystem_extension": nil,
"storage_prefix_bucket_names_with_project": nil,
}
// Done if previously loaded.
if cephLoaded {
return nil
}
// Validate the required binaries.
for _, tool := range []string{"ceph", "rbd"} {
_, err := exec.LookPath(tool)
if err != nil {
return fmt.Errorf("Required tool '%s' is missing", tool)
}
}
// Detect and record the version.
if cephVersion == "" {
out, err := shared.RunCommand("rbd", "--version")
if err != nil {
return err
}
out = strings.TrimSpace(out)
fields := strings.Split(out, " ")
if strings.HasPrefix(out, "ceph version ") && len(fields) > 2 {
cephVersion = fields[2]
} else {
cephVersion = out
}
}
cephLoaded = true
return nil
}
// isRemote returns true indicating this driver uses remote storage.
func (d *ceph) isRemote() bool {
return true
}
// Info returns info about the driver and its environment.
func (d *ceph) | () Info {
return Info{
Name: "ceph",
Version: cephVersion,
OptimizedImages: true,
PreservesInodes: false,
Remote: d.isRemote(),
VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
BlockBacking: true,
RunningCopyFreeze: true,
DirectIO: true,
IOUring: true,
MountedRoot: false,
}
}
// getPlaceholderVolume returns the volume used to indicate if the pool is in use.
func (d *ceph) getPlaceholderVolume() Volume {
return NewVolume(d, d.name, VolumeType("incus"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil)
}
// FillConfig populates the storage pool's configuration file with the default values.
func (d *ceph) FillConfig() error {
if d.config["ceph.cluster_name"] == "" {
d.config["ceph.cluster_name"] = CephDefaultCluster
}
if d.config["ceph.user.name"] == "" {
d.config["ceph.user.name"] = CephDefaultUser
}
if d.config["ceph.osd.pg_num"] == "" {
d.config["ceph.osd.pg_num"] = "32"
}
return nil
}
// Create is called during pool creation and is effectively using an empty driver struct.
// WARNING: The Create() function cannot rely on any of the struct attributes being set.
func (d *ceph) Create() error {
revert := revert.New()
defer revert.Fail()
d.config["volatile.initial_source"] = d.config["source"]
err := d.FillConfig()
if err != nil {
return err
}
// Validate.
_, err = units.ParseByteSizeString(d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
// Quick check.
if d.config["source"] != "" && d.config["ceph.osd.pool_name"] != "" && d.config["source"] != d.config["ceph.osd.pool_name"] {
return fmt.Errorf(`The "source" and "ceph.osd.pool_name" property must not differ for Ceph OSD storage pools`)
}
// Use an existing OSD pool.
if d.config["source"] != "" {
d.config["ceph.osd.pool_name"] = d.config["source"]
}
if d.config["ceph.osd.pool_name"] == "" {
d.config["ceph.osd.pool_name"] = d.name
d.config["source"] = d.name
}
placeholderVol := d.getPlaceholderVolume()
if !d.osdPoolExists() {
// Create new osd pool.
_, err := shared.TryRunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"create",
d.config["ceph.osd.pool_name"],
d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
revert.Add(func() { _ = d.osdDeletePool() })
// Initialize the pool. This is not necessary but allows the pool to be monitored.
_, err = shared.TryRunCommand("rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"pool",
"init",
d.config["ceph.osd.pool_name"])
if err != nil {
d.logger.Warn("Failed to initialize pool", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err = d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
} else {
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return err
}
if volExists {
// ceph.osd.force_reuse is deprecated and should not be used. OSD pools are a logical
// construct there is no good reason not to create one for dedicated use by the daemon.
if shared.IsFalseOrEmpty(d.config["ceph.osd.force_reuse"]) {
return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another Incus instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"])
}
d.config["volatile.pool.pristine"] = "false"
} else {
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err := d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
}
// Use existing OSD pool.
msg, err := shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"get",
d.config["ceph.osd.pool_name"],
"pg_num")
if err != nil {
return err
}
idx := strings.Index(msg, "pg_num:")
if idx == -1 {
return fmt.Errorf("Failed to parse number of placement groups for pool: %s", msg)
}
msg = msg[(idx + len("pg_num:")):]
msg = strings.TrimSpace(msg)
// It is ok to update the pool configuration since storage pool
// creation via API is implemented such that the storage pool is
// checked for a changed config after this function returns and
// if so the db for it is updated.
d.config["ceph.osd.pg_num"] = msg
}
revert.Success()
return nil
}
// Delete removes the storage pool from the storage device.
func (d *ceph) Delete(op *operations.Operation) error {
// Test if the pool exists.
poolExists := d.osdPoolExists()
if !poolExists {
d.logger.Warn("Pool does not exist", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Check whether we own the pool and only remove in this case.
if shared.IsTrue(d.config["volatile.pool.pristine"]) {
// Delete the osd pool.
if poolExists {
err := d.osdDeletePool()
if err != nil {
return err
}
}
}
// If the user completely destroyed it, call it done.
if !shared.PathExists(GetPoolMountPath(d.name)) {
return nil
}
// On delete, wipe everything in the directory.
err := wipeDirectory(GetPoolMountPath(d.name))
if err != nil {
return err
}
return nil
}
// Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.
func (d *ceph) Validate(config map[string]string) error {
rules := map[string]func(value string) error{
"ceph.cluster_name": validate.IsAny,
"ceph.osd.force_reuse": validate.Optional(validate.IsBool), // Deprecated, should not be used.
"ceph.osd.pg_num": validate.IsAny,
"ceph.osd.pool_name": validate.IsAny,
"ceph.osd.data_pool_name": validate.IsAny,
"ceph.rbd.clone_copy": validate.Optional(validate.IsBool),
"ceph.rbd.du": validate.Optional(validate.IsBool),
"ceph.rbd.features": validate.IsAny,
"ceph.user.name": validate.IsAny,
"volatile.pool.pristine": validate.IsAny,
}
return d.validatePool(config, rules, d.commonVolumeRules())
}
// Update applies any driver changes required from a configuration change.
func (d *ceph) Update(changedConfig map[string]string) error {
return nil
}
// Mount mounts the storage pool.
func (d *ceph) Mount() (bool, error) {
placeholderVol := d.getPlaceholderVolume()
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return false, err
}
if !volExists {
return false, fmt.Errorf("Placeholder volume does not exist")
}
return true, nil
}
// Unmount unmounts the storage pool.
func (d *ceph) Unmount() (bool, error) {
// Nothing to do here.
return true, nil
}
// GetResources returns the pool resource usage information.
func (d *ceph) GetResources() (*api.ResourcesStoragePool, error) {
var stdout bytes.Buffer
err := shared.RunCommandWithFds(context.TODO(), nil, &stdout,
"ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"df",
"-f", "json")
if err != nil {
return nil, err
}
// Temporary structs for parsing.
type cephDfPoolStats struct {
BytesUsed int64 `json:"bytes_used"`
BytesAvailable int64 `json:"max_avail"`
}
type cephDfPool struct {
Name string `json:"name"`
Stats cephDfPoolStats `json:"stats"`
}
type cephDf struct {
Pools []cephDfPool `json:"pools"`
}
// Parse the JSON output.
df := cephDf{}
err = json.NewDecoder(&stdout).Decode(&df)
if err != nil {
return nil, err
}
var pool *cephDfPool
for _, entry := range df.Pools {
if entry.Name == d.config["ceph.osd.pool_name"] {
pool = &entry
break
}
}
if pool == nil {
return nil, fmt.Errorf("OSD pool missing in df output")
}
spaceUsed := uint64(pool.Stats.BytesUsed)
spaceAvailable := uint64(pool.Stats.BytesAvailable)
res := api.ResourcesStoragePool{}
res.Space.Total = spaceAvailable + spaceUsed
res.Space.Used = spaceUsed
return &res, nil
}
// MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.
func (d *ceph) MigrationTypes(contentType ContentType, refresh bool, copySnapshots bool) []migration.Type {
var rsyncFeatures []string
// Do not pass compression argument to rsync if the associated
// config key, that is rsync.compression, is set to false.
if shared.IsFalse(d.Config()["rsync.compression"]) {
rsyncFeatures = []string{"xattrs", "delete", "bidirectional"}
} else {
rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"}
}
if refresh {
var transportType migration.MigrationFSType
if IsContentBlock(contentType) {
transportType = migration.MigrationFSType_BLOCK_AND_RSYNC
} else {
transportType = migration.MigrationFSType_RSYNC
}
return []migration.Type{
{
FSType: transportType,
Features: rsyncFeatures,
},
}
}
if contentType == ContentTypeBlock {
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_BLOCK_AND_RSYNC,
Features: rsyncFeatures,
},
}
}
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_RSYNC,
Features: rsyncFeatures,
},
}
}
| Info | identifier_name |
driver_ceph.go | package drivers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os/exec"
"strings"
"github.com/lxc/incus/incusd/migration"
"github.com/lxc/incus/incusd/operations"
"github.com/lxc/incus/incusd/revert"
"github.com/lxc/incus/shared"
"github.com/lxc/incus/shared/api"
"github.com/lxc/incus/shared/logger"
"github.com/lxc/incus/shared/units"
"github.com/lxc/incus/shared/validate"
)
var cephVersion string
var cephLoaded bool
type ceph struct {
common | }
// load is used to run one-time action per-driver rather than per-pool.
func (d *ceph) load() error {
// Register the patches.
d.patches = map[string]func() error{
"storage_lvm_skipactivation": nil,
"storage_missing_snapshot_records": nil,
"storage_delete_old_snapshot_records": nil,
"storage_zfs_drop_block_volume_filesystem_extension": nil,
"storage_prefix_bucket_names_with_project": nil,
}
// Done if previously loaded.
if cephLoaded {
return nil
}
// Validate the required binaries.
for _, tool := range []string{"ceph", "rbd"} {
_, err := exec.LookPath(tool)
if err != nil {
return fmt.Errorf("Required tool '%s' is missing", tool)
}
}
// Detect and record the version.
if cephVersion == "" {
out, err := shared.RunCommand("rbd", "--version")
if err != nil {
return err
}
out = strings.TrimSpace(out)
fields := strings.Split(out, " ")
if strings.HasPrefix(out, "ceph version ") && len(fields) > 2 {
cephVersion = fields[2]
} else {
cephVersion = out
}
}
cephLoaded = true
return nil
}
// isRemote returns true indicating this driver uses remote storage.
func (d *ceph) isRemote() bool {
return true
}
// Info returns info about the driver and its environment.
func (d *ceph) Info() Info {
return Info{
Name: "ceph",
Version: cephVersion,
OptimizedImages: true,
PreservesInodes: false,
Remote: d.isRemote(),
VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
BlockBacking: true,
RunningCopyFreeze: true,
DirectIO: true,
IOUring: true,
MountedRoot: false,
}
}
// getPlaceholderVolume returns the volume used to indicate if the pool is in use.
func (d *ceph) getPlaceholderVolume() Volume {
return NewVolume(d, d.name, VolumeType("incus"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil)
}
// FillConfig populates the storage pool's configuration file with the default values.
func (d *ceph) FillConfig() error {
if d.config["ceph.cluster_name"] == "" {
d.config["ceph.cluster_name"] = CephDefaultCluster
}
if d.config["ceph.user.name"] == "" {
d.config["ceph.user.name"] = CephDefaultUser
}
if d.config["ceph.osd.pg_num"] == "" {
d.config["ceph.osd.pg_num"] = "32"
}
return nil
}
// Create is called during pool creation and is effectively using an empty driver struct.
// WARNING: The Create() function cannot rely on any of the struct attributes being set.
func (d *ceph) Create() error {
revert := revert.New()
defer revert.Fail()
d.config["volatile.initial_source"] = d.config["source"]
err := d.FillConfig()
if err != nil {
return err
}
// Validate.
_, err = units.ParseByteSizeString(d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
// Quick check.
if d.config["source"] != "" && d.config["ceph.osd.pool_name"] != "" && d.config["source"] != d.config["ceph.osd.pool_name"] {
return fmt.Errorf(`The "source" and "ceph.osd.pool_name" property must not differ for Ceph OSD storage pools`)
}
// Use an existing OSD pool.
if d.config["source"] != "" {
d.config["ceph.osd.pool_name"] = d.config["source"]
}
if d.config["ceph.osd.pool_name"] == "" {
d.config["ceph.osd.pool_name"] = d.name
d.config["source"] = d.name
}
placeholderVol := d.getPlaceholderVolume()
if !d.osdPoolExists() {
// Create new osd pool.
_, err := shared.TryRunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"create",
d.config["ceph.osd.pool_name"],
d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
revert.Add(func() { _ = d.osdDeletePool() })
// Initialize the pool. This is not necessary but allows the pool to be monitored.
_, err = shared.TryRunCommand("rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"pool",
"init",
d.config["ceph.osd.pool_name"])
if err != nil {
d.logger.Warn("Failed to initialize pool", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err = d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
} else {
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return err
}
if volExists {
// ceph.osd.force_reuse is deprecated and should not be used. OSD pools are a logical
// construct there is no good reason not to create one for dedicated use by the daemon.
if shared.IsFalseOrEmpty(d.config["ceph.osd.force_reuse"]) {
return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another Incus instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"])
}
d.config["volatile.pool.pristine"] = "false"
} else {
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err := d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
}
// Use existing OSD pool.
msg, err := shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"get",
d.config["ceph.osd.pool_name"],
"pg_num")
if err != nil {
return err
}
idx := strings.Index(msg, "pg_num:")
if idx == -1 {
return fmt.Errorf("Failed to parse number of placement groups for pool: %s", msg)
}
msg = msg[(idx + len("pg_num:")):]
msg = strings.TrimSpace(msg)
// It is ok to update the pool configuration since storage pool
// creation via API is implemented such that the storage pool is
// checked for a changed config after this function returns and
// if so the db for it is updated.
d.config["ceph.osd.pg_num"] = msg
}
revert.Success()
return nil
}
// Delete removes the storage pool from the storage device.
func (d *ceph) Delete(op *operations.Operation) error {
// Test if the pool exists.
poolExists := d.osdPoolExists()
if !poolExists {
d.logger.Warn("Pool does not exist", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Check whether we own the pool and only remove in this case.
if shared.IsTrue(d.config["volatile.pool.pristine"]) {
// Delete the osd pool.
if poolExists {
err := d.osdDeletePool()
if err != nil {
return err
}
}
}
// If the user completely destroyed it, call it done.
if !shared.PathExists(GetPoolMountPath(d.name)) {
return nil
}
// On delete, wipe everything in the directory.
err := wipeDirectory(GetPoolMountPath(d.name))
if err != nil {
return err
}
return nil
}
// Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.
func (d *ceph) Validate(config map[string]string) error {
rules := map[string]func(value string) error{
"ceph.cluster_name": validate.IsAny,
"ceph.osd.force_reuse": validate.Optional(validate.IsBool), // Deprecated, should not be used.
"ceph.osd.pg_num": validate.IsAny,
"ceph.osd.pool_name": validate.IsAny,
"ceph.osd.data_pool_name": validate.IsAny,
"ceph.rbd.clone_copy": validate.Optional(validate.IsBool),
"ceph.rbd.du": validate.Optional(validate.IsBool),
"ceph.rbd.features": validate.IsAny,
"ceph.user.name": validate.IsAny,
"volatile.pool.pristine": validate.IsAny,
}
return d.validatePool(config, rules, d.commonVolumeRules())
}
// Update applies any driver changes required from a configuration change.
func (d *ceph) Update(changedConfig map[string]string) error {
return nil
}
// Mount mounts the storage pool.
func (d *ceph) Mount() (bool, error) {
placeholderVol := d.getPlaceholderVolume()
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return false, err
}
if !volExists {
return false, fmt.Errorf("Placeholder volume does not exist")
}
return true, nil
}
// Unmount unmounts the storage pool.
func (d *ceph) Unmount() (bool, error) {
// Nothing to do here.
return true, nil
}
// GetResources returns the pool resource usage information.
func (d *ceph) GetResources() (*api.ResourcesStoragePool, error) {
var stdout bytes.Buffer
err := shared.RunCommandWithFds(context.TODO(), nil, &stdout,
"ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"df",
"-f", "json")
if err != nil {
return nil, err
}
// Temporary structs for parsing.
type cephDfPoolStats struct {
BytesUsed int64 `json:"bytes_used"`
BytesAvailable int64 `json:"max_avail"`
}
type cephDfPool struct {
Name string `json:"name"`
Stats cephDfPoolStats `json:"stats"`
}
type cephDf struct {
Pools []cephDfPool `json:"pools"`
}
// Parse the JSON output.
df := cephDf{}
err = json.NewDecoder(&stdout).Decode(&df)
if err != nil {
return nil, err
}
var pool *cephDfPool
for _, entry := range df.Pools {
if entry.Name == d.config["ceph.osd.pool_name"] {
pool = &entry
break
}
}
if pool == nil {
return nil, fmt.Errorf("OSD pool missing in df output")
}
spaceUsed := uint64(pool.Stats.BytesUsed)
spaceAvailable := uint64(pool.Stats.BytesAvailable)
res := api.ResourcesStoragePool{}
res.Space.Total = spaceAvailable + spaceUsed
res.Space.Used = spaceUsed
return &res, nil
}
// MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.
func (d *ceph) MigrationTypes(contentType ContentType, refresh bool, copySnapshots bool) []migration.Type {
var rsyncFeatures []string
// Do not pass compression argument to rsync if the associated
// config key, that is rsync.compression, is set to false.
if shared.IsFalse(d.Config()["rsync.compression"]) {
rsyncFeatures = []string{"xattrs", "delete", "bidirectional"}
} else {
rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"}
}
if refresh {
var transportType migration.MigrationFSType
if IsContentBlock(contentType) {
transportType = migration.MigrationFSType_BLOCK_AND_RSYNC
} else {
transportType = migration.MigrationFSType_RSYNC
}
return []migration.Type{
{
FSType: transportType,
Features: rsyncFeatures,
},
}
}
if contentType == ContentTypeBlock {
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_BLOCK_AND_RSYNC,
Features: rsyncFeatures,
},
}
}
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_RSYNC,
Features: rsyncFeatures,
},
}
} | random_line_split | |
driver_ceph.go | package drivers
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os/exec"
"strings"
"github.com/lxc/incus/incusd/migration"
"github.com/lxc/incus/incusd/operations"
"github.com/lxc/incus/incusd/revert"
"github.com/lxc/incus/shared"
"github.com/lxc/incus/shared/api"
"github.com/lxc/incus/shared/logger"
"github.com/lxc/incus/shared/units"
"github.com/lxc/incus/shared/validate"
)
var cephVersion string
var cephLoaded bool
type ceph struct {
common
}
// load is used to run one-time action per-driver rather than per-pool.
func (d *ceph) load() error {
// Register the patches.
d.patches = map[string]func() error{
"storage_lvm_skipactivation": nil,
"storage_missing_snapshot_records": nil,
"storage_delete_old_snapshot_records": nil,
"storage_zfs_drop_block_volume_filesystem_extension": nil,
"storage_prefix_bucket_names_with_project": nil,
}
// Done if previously loaded.
if cephLoaded {
return nil
}
// Validate the required binaries.
for _, tool := range []string{"ceph", "rbd"} {
_, err := exec.LookPath(tool)
if err != nil {
return fmt.Errorf("Required tool '%s' is missing", tool)
}
}
// Detect and record the version.
if cephVersion == "" {
out, err := shared.RunCommand("rbd", "--version")
if err != nil {
return err
}
out = strings.TrimSpace(out)
fields := strings.Split(out, " ")
if strings.HasPrefix(out, "ceph version ") && len(fields) > 2 {
cephVersion = fields[2]
} else {
cephVersion = out
}
}
cephLoaded = true
return nil
}
// isRemote returns true indicating this driver uses remote storage.
func (d *ceph) isRemote() bool {
return true
}
// Info returns info about the driver and its environment.
func (d *ceph) Info() Info {
return Info{
Name: "ceph",
Version: cephVersion,
OptimizedImages: true,
PreservesInodes: false,
Remote: d.isRemote(),
VolumeTypes: []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
BlockBacking: true,
RunningCopyFreeze: true,
DirectIO: true,
IOUring: true,
MountedRoot: false,
}
}
// getPlaceholderVolume returns the volume used to indicate if the pool is in use.
func (d *ceph) getPlaceholderVolume() Volume {
return NewVolume(d, d.name, VolumeType("incus"), ContentTypeFS, d.config["ceph.osd.pool_name"], nil, nil)
}
// FillConfig populates the storage pool's configuration file with the default values.
func (d *ceph) FillConfig() error {
if d.config["ceph.cluster_name"] == "" {
d.config["ceph.cluster_name"] = CephDefaultCluster
}
if d.config["ceph.user.name"] == "" {
d.config["ceph.user.name"] = CephDefaultUser
}
if d.config["ceph.osd.pg_num"] == "" {
d.config["ceph.osd.pg_num"] = "32"
}
return nil
}
// Create is called during pool creation and is effectively using an empty driver struct.
// WARNING: The Create() function cannot rely on any of the struct attributes being set.
func (d *ceph) Create() error {
revert := revert.New()
defer revert.Fail()
d.config["volatile.initial_source"] = d.config["source"]
err := d.FillConfig()
if err != nil {
return err
}
// Validate.
_, err = units.ParseByteSizeString(d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
// Quick check.
if d.config["source"] != "" && d.config["ceph.osd.pool_name"] != "" && d.config["source"] != d.config["ceph.osd.pool_name"] {
return fmt.Errorf(`The "source" and "ceph.osd.pool_name" property must not differ for Ceph OSD storage pools`)
}
// Use an existing OSD pool.
if d.config["source"] != "" {
d.config["ceph.osd.pool_name"] = d.config["source"]
}
if d.config["ceph.osd.pool_name"] == "" {
d.config["ceph.osd.pool_name"] = d.name
d.config["source"] = d.name
}
placeholderVol := d.getPlaceholderVolume()
if !d.osdPoolExists() {
// Create new osd pool.
_, err := shared.TryRunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"create",
d.config["ceph.osd.pool_name"],
d.config["ceph.osd.pg_num"])
if err != nil {
return err
}
revert.Add(func() { _ = d.osdDeletePool() })
// Initialize the pool. This is not necessary but allows the pool to be monitored.
_, err = shared.TryRunCommand("rbd",
"--id", d.config["ceph.user.name"],
"--cluster", d.config["ceph.cluster_name"],
"pool",
"init",
d.config["ceph.osd.pool_name"])
if err != nil {
d.logger.Warn("Failed to initialize pool", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err = d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
} else {
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return err
}
if volExists {
// ceph.osd.force_reuse is deprecated and should not be used. OSD pools are a logical
// construct there is no good reason not to create one for dedicated use by the daemon.
if shared.IsFalseOrEmpty(d.config["ceph.osd.force_reuse"]) {
return fmt.Errorf("Pool '%s' in cluster '%s' seems to be in use by another Incus instance. Use 'ceph.osd.force_reuse=true' to force", d.config["ceph.osd.pool_name"], d.config["ceph.cluster_name"])
}
d.config["volatile.pool.pristine"] = "false"
} else {
// Create placeholder storage volume. Other instances will use this to detect whether this osd
// pool is already in use by another instance.
err := d.rbdCreateVolume(placeholderVol, "0")
if err != nil {
return err
}
d.config["volatile.pool.pristine"] = "true"
}
// Use existing OSD pool.
msg, err := shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"osd",
"pool",
"get",
d.config["ceph.osd.pool_name"],
"pg_num")
if err != nil {
return err
}
idx := strings.Index(msg, "pg_num:")
if idx == -1 {
return fmt.Errorf("Failed to parse number of placement groups for pool: %s", msg)
}
msg = msg[(idx + len("pg_num:")):]
msg = strings.TrimSpace(msg)
// It is ok to update the pool configuration since storage pool
// creation via API is implemented such that the storage pool is
// checked for a changed config after this function returns and
// if so the db for it is updated.
d.config["ceph.osd.pg_num"] = msg
}
revert.Success()
return nil
}
// Delete removes the storage pool from the storage device.
func (d *ceph) Delete(op *operations.Operation) error |
// Validate checks that all provide keys are supported and that no conflicting or missing configuration is present.
func (d *ceph) Validate(config map[string]string) error {
rules := map[string]func(value string) error{
"ceph.cluster_name": validate.IsAny,
"ceph.osd.force_reuse": validate.Optional(validate.IsBool), // Deprecated, should not be used.
"ceph.osd.pg_num": validate.IsAny,
"ceph.osd.pool_name": validate.IsAny,
"ceph.osd.data_pool_name": validate.IsAny,
"ceph.rbd.clone_copy": validate.Optional(validate.IsBool),
"ceph.rbd.du": validate.Optional(validate.IsBool),
"ceph.rbd.features": validate.IsAny,
"ceph.user.name": validate.IsAny,
"volatile.pool.pristine": validate.IsAny,
}
return d.validatePool(config, rules, d.commonVolumeRules())
}
// Update applies any driver changes required from a configuration change.
func (d *ceph) Update(changedConfig map[string]string) error {
return nil
}
// Mount mounts the storage pool.
func (d *ceph) Mount() (bool, error) {
placeholderVol := d.getPlaceholderVolume()
volExists, err := d.HasVolume(placeholderVol)
if err != nil {
return false, err
}
if !volExists {
return false, fmt.Errorf("Placeholder volume does not exist")
}
return true, nil
}
// Unmount unmounts the storage pool.
func (d *ceph) Unmount() (bool, error) {
// Nothing to do here.
return true, nil
}
// GetResources returns the pool resource usage information.
func (d *ceph) GetResources() (*api.ResourcesStoragePool, error) {
var stdout bytes.Buffer
err := shared.RunCommandWithFds(context.TODO(), nil, &stdout,
"ceph",
"--name", fmt.Sprintf("client.%s", d.config["ceph.user.name"]),
"--cluster", d.config["ceph.cluster_name"],
"df",
"-f", "json")
if err != nil {
return nil, err
}
// Temporary structs for parsing.
type cephDfPoolStats struct {
BytesUsed int64 `json:"bytes_used"`
BytesAvailable int64 `json:"max_avail"`
}
type cephDfPool struct {
Name string `json:"name"`
Stats cephDfPoolStats `json:"stats"`
}
type cephDf struct {
Pools []cephDfPool `json:"pools"`
}
// Parse the JSON output.
df := cephDf{}
err = json.NewDecoder(&stdout).Decode(&df)
if err != nil {
return nil, err
}
var pool *cephDfPool
for _, entry := range df.Pools {
if entry.Name == d.config["ceph.osd.pool_name"] {
pool = &entry
break
}
}
if pool == nil {
return nil, fmt.Errorf("OSD pool missing in df output")
}
spaceUsed := uint64(pool.Stats.BytesUsed)
spaceAvailable := uint64(pool.Stats.BytesAvailable)
res := api.ResourcesStoragePool{}
res.Space.Total = spaceAvailable + spaceUsed
res.Space.Used = spaceUsed
return &res, nil
}
// MigrationType returns the type of transfer methods to be used when doing migrations between pools in preference order.
func (d *ceph) MigrationTypes(contentType ContentType, refresh bool, copySnapshots bool) []migration.Type {
var rsyncFeatures []string
// Do not pass compression argument to rsync if the associated
// config key, that is rsync.compression, is set to false.
if shared.IsFalse(d.Config()["rsync.compression"]) {
rsyncFeatures = []string{"xattrs", "delete", "bidirectional"}
} else {
rsyncFeatures = []string{"xattrs", "delete", "compress", "bidirectional"}
}
if refresh {
var transportType migration.MigrationFSType
if IsContentBlock(contentType) {
transportType = migration.MigrationFSType_BLOCK_AND_RSYNC
} else {
transportType = migration.MigrationFSType_RSYNC
}
return []migration.Type{
{
FSType: transportType,
Features: rsyncFeatures,
},
}
}
if contentType == ContentTypeBlock {
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_BLOCK_AND_RSYNC,
Features: rsyncFeatures,
},
}
}
return []migration.Type{
{
FSType: migration.MigrationFSType_RBD,
},
{
FSType: migration.MigrationFSType_RSYNC,
Features: rsyncFeatures,
},
}
}
| {
// Test if the pool exists.
poolExists := d.osdPoolExists()
if !poolExists {
d.logger.Warn("Pool does not exist", logger.Ctx{"pool": d.config["ceph.osd.pool_name"], "cluster": d.config["ceph.cluster_name"]})
}
// Check whether we own the pool and only remove in this case.
if shared.IsTrue(d.config["volatile.pool.pristine"]) {
// Delete the osd pool.
if poolExists {
err := d.osdDeletePool()
if err != nil {
return err
}
}
}
// If the user completely destroyed it, call it done.
if !shared.PathExists(GetPoolMountPath(d.name)) {
return nil
}
// On delete, wipe everything in the directory.
err := wipeDirectory(GetPoolMountPath(d.name))
if err != nil {
return err
}
return nil
} | identifier_body |
getinfo.go | package main
import (
"fmt"
"log"
"time"
"strings"
"strconv"
"net/http"
"net/url"
"github.com/PuerkitoBio/goquery"
)
type GetInfo struct {}
type SearchResult struct {
Items []SearchItem
Total int64
Page int64
TotalPage int64
}
type SearchItem struct {
Name string
Link string
Ids []string
Type string
Status string
Law string
Price string
Currency string
Customer string
CustomerLink string
Description string
Lots []SearchItemLot
PublishDate int64
UpdateDate int64
Actions []SearchItemAction
}
type SearchItemLot struct {
Name string
Description string
Price string
Currency string
}
type SearchItemAction struct {
Name string
Link string
}
func (_ GetInfo) SearchQueryToParams(searchQuery SearchQuery) string {
params := ""
for _, item := range searchQuery.LawNumber {
if len(params) > 0 {
params += "&"
}
switch item {
case "44-fz":
params += "fz44=on"
case "223-fz":
params += "fz223=on"
case "pp_rf_615":
params += "ppRf615=on"
case "94-fz":
params += "fz94=on"
}
}
for _, item := range searchQuery.ProcedureStatus {
if len(params) > 0 {
params += "&"
}
switch item {
case "applicationSubmission":
params += "af=on"
case "commissionWork":
params += "ca=on"
case "procedureCompleted":
params += "pc=on"
case "procedureAborted":
params += "pa=on"
}
}
if len(searchQuery.SortDirection) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortDirection {
case "up":
params += "sortDirection=true"
case "down":
params += "sortDirection=false"
}
}
if len(searchQuery.SortBy) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortBy {
case "updateDate":
params += "sortBy=UPDATE_DATE"
case "publishDate":
params += "sortBy=PUBLISH_DATE"
case "price":
params += "sortBy=PRICE"
case "relevance":
params += "sortBy=RELEVANCE"
}
}
if len(searchQuery.CityName) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.CityName {
case "st_petersburg":
params += "regions=5277347"
case "moscow":
params += "regions=5277335"
}
}
if searchQuery.PublishDateFrom > 0 {
if len(params) > 0 {
params += "&"
}
dateFrom := time.Unix(searchQuery.PublishDateFrom, 0)
params += "publishDateFrom=" + url.QueryEscape(dateFrom.Format("02.01.2006"))
}
if searchQuery.PublishDateTo > 0 {
if len(params) > 0 {
params += "&"
}
dateTo := time.Unix(searchQuery.PublishDateTo, 0)
params += "publishDateTo=" + url.QueryEscape(dateTo.Format("02.01.2006"))
}
if searchQuery.PageNumber > 0 {
if len(params) > 0 {
params += "&"
}
params += "pageNumber=" + strconv.FormatInt(searchQuery.PageNumber, 10)
}
if len(params) > 0 {
params += "&"
}
params += "searchString=" + url.QueryEscape(searchQuery.SearchString)
return params
}
func (m GetInfo) | (searchQuery SearchQuery) (SearchResult, error) {
var result SearchResult
var err error
searchUrl := "http://zakupki.gov.ru/epz/order/quicksearch/search.html?" + m.SearchQueryToParams(searchQuery)
page, err := http.Get(searchUrl)
if err != nil {
return SearchResult{}, err
}
defer page.Body.Close()
if page.StatusCode != 200 {
return SearchResult{}, fmt.Errorf("Search page status error: %s", page.Status)
}
doc, err := goquery.NewDocumentFromReader(page.Body)
if err != nil {
return SearchResult{}, err
}
currentPageElement := doc.Find(".paginator .page__link_active").First()
if currentPageElement.Length() > 0 {
currentPageString := strings.TrimSpace(currentPageElement.Text())
currentPage, err := strconv.ParseInt(currentPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting current page number: %s\n", err)
}
result.Page = currentPage
} else {
result.Page = 1
}
totalPageElement := doc.Find(".paginator .page__link").Last()
if totalPageElement.Length() > 0 {
totalPageString := strings.TrimSpace(totalPageElement.Text())
totalPage, err := strconv.ParseInt(totalPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting total page number: %s\n", err)
}
result.TotalPage = totalPage
} else {
result.TotalPage = 1
}
totalNode := doc.Find(".allRecords > strong").First()
totalNumber := int64(0)
if totalNode.Length() > 0 {
totalComment := totalNode.Get(0).NextSibling
if len(strings.TrimSpace(totalComment.Data)) > 0 {
totalString := strings.Split(totalComment.Data, ": ")[1]
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, "\u00a0", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting exact total records number: %s\n", err.Error())
}
} else {
totalString := strings.TrimSpace(totalNode.Text())
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, " ", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting approximate total records number: %s\n", err.Error())
}
}
}
result.Total = totalNumber
result.Items = make([]SearchItem, 0)
doc.Find("div.registerBox.registerBoxBank.margBtm20").Each(func(i int, s *goquery.Selection) {
var itemStruct SearchItem
itemTable := s.ChildrenFiltered("table").First()
itemHeader := itemTable.Find(".descriptTenderTd > dl > dt").First()
itemHeaderLink := itemHeader.Find("a")
itemHeaderLinkHref, _ := itemHeaderLink.Attr("href")
itemUrl, err := url.Parse(itemHeaderLinkHref)
if err != nil {
log.Print(err)
return
}
itemUrl.Scheme = "http"
itemUrl.Host = "zakupki.gov.ru"
itemName := strings.TrimSpace(itemHeaderLink.Text())
itemHeader.Remove()
itemOrganization := itemTable.Find(".descriptTenderTd > dl > .nameOrganization").First()
organizationLink := itemOrganization.Find("a").First()
organizationLinkHref, _ := organizationLink.Attr("href")
organizationUrl, err := url.Parse(organizationLinkHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
organizationUrl.Scheme = "http"
organizationUrl.Host = "zakupki.gov.ru"
organizationName := strings.TrimSpace(organizationLink.Text())
itemOrganization.Remove()
itemIdNode := itemTable.Find(".descriptTenderTd > dl > dd.padTop10 > dl.greyText.margTop0.padTop8").First()
itemIdNode.Find("script").Remove()
itemId := strings.ReplaceAll(itemIdNode.Text(), " ", "")
itemIdNode.Parent().Remove()
itemIds := strings.Split(itemId, "\n")
tempItemIds := make([]string, 0)
for i := 0; i < len(itemIds); i++ {
itemIds[i] = strings.TrimSpace(itemIds[i])
if len(itemIds[i]) > 0 {
tempItemIds = append(tempItemIds, itemIds[i])
}
}
itemIds = tempItemIds
itemDescription := strings.TrimSpace(itemTable.Find(".descriptTenderTd > dl > *").First().Text())
itemType := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dt > strong").First().Text())
itemStatusSlice := strings.Split(itemTable.Find(".tenderTd > dl > dt > span.noWrap").First().Text(), "/")
itemStatus := strings.TrimSpace(itemStatusSlice[0])
itemLaw := strings.TrimSpace(itemStatusSlice[1])
itemPriceSlice := strings.Split(itemTable.Find(".tenderTd > dl > dd .fractionalNumber").First().Parent().Text(), ",")
for i2, priceItem := range itemPriceSlice {
itemPriceSlice[i2] = strings.TrimSpace(priceItem)
}
itemPrice := strings.Join(itemPriceSlice, ",")
itemCurrency := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dd > .currency").First().Text())
itemStruct.Lots = make([]SearchItemLot, 0)
itemTable.Find(".lotsInfo .descriptTenderTd").Each(func(i2 int, s2 *goquery.Selection) {
var lotInfo SearchItemLot
lotDesciptionElem := s2.Find("dl > dt").First()
lotNameElem := lotDesciptionElem.ChildrenFiltered("strong").First()
lotPriceElem := s2.Find("dl > dt > i > strong").First()
lotInfo.Name = strings.TrimSpace(lotNameElem.Text())
lotNameElem.Remove()
lotInfo.Description = strings.TrimSpace(lotDesciptionElem.Text())
lotInfo.Price = strings.TrimSpace(lotPriceElem.Text())
lotInfo.Currency = strings.TrimSpace(lotPriceElem.Get(0).NextSibling.Data)
itemStruct.Lots = append(itemStruct.Lots, lotInfo)
})
amountNodes := itemTable.Find(".amountTenderTd > ul > li > label")
publishDateString := strings.TrimSpace(amountNodes.Get(0).NextSibling.Data)
updateDateString := strings.TrimSpace(amountNodes.Get(1).NextSibling.Data)
publishDate, err := time.Parse("02.01.2006", publishDateString)
if err != nil {
log.Print(err)
return
}
updateDate, err := time.Parse("02.01.2006", updateDateString)
if err != nil {
log.Print(err)
return
}
reportBox := itemTable.Next()
reportBoxList := reportBox.Find("ul > ul").First()
reportBoxList.Find("a").Each(func(i2 int, s2 *goquery.Selection) {
reportHref, reportHrefExists := s2.Attr("href")
if !reportHrefExists {
s2.Remove()
return
}
reportUrl, err := url.Parse(reportHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
reportUrl.Scheme = "http"
reportUrl.Host = "zakupki.gov.ru"
var reportItem SearchItemAction
reportItem.Name = strings.TrimSpace(s2.Text())
reportItem.Link = reportUrl.String()
itemStruct.Actions = append(itemStruct.Actions, reportItem)
})
itemStruct.Name = itemName
itemStruct.Link = itemUrl.String()
itemStruct.Ids = itemIds
itemStruct.Type = itemType
itemStruct.Status = itemStatus
itemStruct.Law = itemLaw
itemStruct.Price = itemPrice
itemStruct.Currency = itemCurrency
itemStruct.Customer = organizationName
itemStruct.CustomerLink = organizationUrl.String()
itemStruct.Description = itemDescription
itemStruct.PublishDate = publishDate.Unix()
itemStruct.UpdateDate = updateDate.Unix()
result.Items = append(result.Items, itemStruct)
})
return result, nil
}
| Search | identifier_name |
getinfo.go | package main
import (
"fmt"
"log"
"time"
"strings"
"strconv"
"net/http"
"net/url"
"github.com/PuerkitoBio/goquery"
)
type GetInfo struct {}
type SearchResult struct {
Items []SearchItem
Total int64
Page int64
TotalPage int64
}
type SearchItem struct {
Name string
Link string
Ids []string
Type string
Status string
Law string
Price string
Currency string
Customer string
CustomerLink string
Description string
Lots []SearchItemLot
PublishDate int64
UpdateDate int64
Actions []SearchItemAction
}
type SearchItemLot struct {
Name string
Description string
Price string
Currency string
}
type SearchItemAction struct {
Name string
Link string
}
func (_ GetInfo) SearchQueryToParams(searchQuery SearchQuery) string |
func (m GetInfo) Search(searchQuery SearchQuery) (SearchResult, error) {
var result SearchResult
var err error
searchUrl := "http://zakupki.gov.ru/epz/order/quicksearch/search.html?" + m.SearchQueryToParams(searchQuery)
page, err := http.Get(searchUrl)
if err != nil {
return SearchResult{}, err
}
defer page.Body.Close()
if page.StatusCode != 200 {
return SearchResult{}, fmt.Errorf("Search page status error: %s", page.Status)
}
doc, err := goquery.NewDocumentFromReader(page.Body)
if err != nil {
return SearchResult{}, err
}
currentPageElement := doc.Find(".paginator .page__link_active").First()
if currentPageElement.Length() > 0 {
currentPageString := strings.TrimSpace(currentPageElement.Text())
currentPage, err := strconv.ParseInt(currentPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting current page number: %s\n", err)
}
result.Page = currentPage
} else {
result.Page = 1
}
totalPageElement := doc.Find(".paginator .page__link").Last()
if totalPageElement.Length() > 0 {
totalPageString := strings.TrimSpace(totalPageElement.Text())
totalPage, err := strconv.ParseInt(totalPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting total page number: %s\n", err)
}
result.TotalPage = totalPage
} else {
result.TotalPage = 1
}
totalNode := doc.Find(".allRecords > strong").First()
totalNumber := int64(0)
if totalNode.Length() > 0 {
totalComment := totalNode.Get(0).NextSibling
if len(strings.TrimSpace(totalComment.Data)) > 0 {
totalString := strings.Split(totalComment.Data, ": ")[1]
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, "\u00a0", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting exact total records number: %s\n", err.Error())
}
} else {
totalString := strings.TrimSpace(totalNode.Text())
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, " ", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting approximate total records number: %s\n", err.Error())
}
}
}
result.Total = totalNumber
result.Items = make([]SearchItem, 0)
doc.Find("div.registerBox.registerBoxBank.margBtm20").Each(func(i int, s *goquery.Selection) {
var itemStruct SearchItem
itemTable := s.ChildrenFiltered("table").First()
itemHeader := itemTable.Find(".descriptTenderTd > dl > dt").First()
itemHeaderLink := itemHeader.Find("a")
itemHeaderLinkHref, _ := itemHeaderLink.Attr("href")
itemUrl, err := url.Parse(itemHeaderLinkHref)
if err != nil {
log.Print(err)
return
}
itemUrl.Scheme = "http"
itemUrl.Host = "zakupki.gov.ru"
itemName := strings.TrimSpace(itemHeaderLink.Text())
itemHeader.Remove()
itemOrganization := itemTable.Find(".descriptTenderTd > dl > .nameOrganization").First()
organizationLink := itemOrganization.Find("a").First()
organizationLinkHref, _ := organizationLink.Attr("href")
organizationUrl, err := url.Parse(organizationLinkHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
organizationUrl.Scheme = "http"
organizationUrl.Host = "zakupki.gov.ru"
organizationName := strings.TrimSpace(organizationLink.Text())
itemOrganization.Remove()
itemIdNode := itemTable.Find(".descriptTenderTd > dl > dd.padTop10 > dl.greyText.margTop0.padTop8").First()
itemIdNode.Find("script").Remove()
itemId := strings.ReplaceAll(itemIdNode.Text(), " ", "")
itemIdNode.Parent().Remove()
itemIds := strings.Split(itemId, "\n")
tempItemIds := make([]string, 0)
for i := 0; i < len(itemIds); i++ {
itemIds[i] = strings.TrimSpace(itemIds[i])
if len(itemIds[i]) > 0 {
tempItemIds = append(tempItemIds, itemIds[i])
}
}
itemIds = tempItemIds
itemDescription := strings.TrimSpace(itemTable.Find(".descriptTenderTd > dl > *").First().Text())
itemType := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dt > strong").First().Text())
itemStatusSlice := strings.Split(itemTable.Find(".tenderTd > dl > dt > span.noWrap").First().Text(), "/")
itemStatus := strings.TrimSpace(itemStatusSlice[0])
itemLaw := strings.TrimSpace(itemStatusSlice[1])
itemPriceSlice := strings.Split(itemTable.Find(".tenderTd > dl > dd .fractionalNumber").First().Parent().Text(), ",")
for i2, priceItem := range itemPriceSlice {
itemPriceSlice[i2] = strings.TrimSpace(priceItem)
}
itemPrice := strings.Join(itemPriceSlice, ",")
itemCurrency := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dd > .currency").First().Text())
itemStruct.Lots = make([]SearchItemLot, 0)
itemTable.Find(".lotsInfo .descriptTenderTd").Each(func(i2 int, s2 *goquery.Selection) {
var lotInfo SearchItemLot
lotDesciptionElem := s2.Find("dl > dt").First()
lotNameElem := lotDesciptionElem.ChildrenFiltered("strong").First()
lotPriceElem := s2.Find("dl > dt > i > strong").First()
lotInfo.Name = strings.TrimSpace(lotNameElem.Text())
lotNameElem.Remove()
lotInfo.Description = strings.TrimSpace(lotDesciptionElem.Text())
lotInfo.Price = strings.TrimSpace(lotPriceElem.Text())
lotInfo.Currency = strings.TrimSpace(lotPriceElem.Get(0).NextSibling.Data)
itemStruct.Lots = append(itemStruct.Lots, lotInfo)
})
amountNodes := itemTable.Find(".amountTenderTd > ul > li > label")
publishDateString := strings.TrimSpace(amountNodes.Get(0).NextSibling.Data)
updateDateString := strings.TrimSpace(amountNodes.Get(1).NextSibling.Data)
publishDate, err := time.Parse("02.01.2006", publishDateString)
if err != nil {
log.Print(err)
return
}
updateDate, err := time.Parse("02.01.2006", updateDateString)
if err != nil {
log.Print(err)
return
}
reportBox := itemTable.Next()
reportBoxList := reportBox.Find("ul > ul").First()
reportBoxList.Find("a").Each(func(i2 int, s2 *goquery.Selection) {
reportHref, reportHrefExists := s2.Attr("href")
if !reportHrefExists {
s2.Remove()
return
}
reportUrl, err := url.Parse(reportHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
reportUrl.Scheme = "http"
reportUrl.Host = "zakupki.gov.ru"
var reportItem SearchItemAction
reportItem.Name = strings.TrimSpace(s2.Text())
reportItem.Link = reportUrl.String()
itemStruct.Actions = append(itemStruct.Actions, reportItem)
})
itemStruct.Name = itemName
itemStruct.Link = itemUrl.String()
itemStruct.Ids = itemIds
itemStruct.Type = itemType
itemStruct.Status = itemStatus
itemStruct.Law = itemLaw
itemStruct.Price = itemPrice
itemStruct.Currency = itemCurrency
itemStruct.Customer = organizationName
itemStruct.CustomerLink = organizationUrl.String()
itemStruct.Description = itemDescription
itemStruct.PublishDate = publishDate.Unix()
itemStruct.UpdateDate = updateDate.Unix()
result.Items = append(result.Items, itemStruct)
})
return result, nil
}
| {
params := ""
for _, item := range searchQuery.LawNumber {
if len(params) > 0 {
params += "&"
}
switch item {
case "44-fz":
params += "fz44=on"
case "223-fz":
params += "fz223=on"
case "pp_rf_615":
params += "ppRf615=on"
case "94-fz":
params += "fz94=on"
}
}
for _, item := range searchQuery.ProcedureStatus {
if len(params) > 0 {
params += "&"
}
switch item {
case "applicationSubmission":
params += "af=on"
case "commissionWork":
params += "ca=on"
case "procedureCompleted":
params += "pc=on"
case "procedureAborted":
params += "pa=on"
}
}
if len(searchQuery.SortDirection) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortDirection {
case "up":
params += "sortDirection=true"
case "down":
params += "sortDirection=false"
}
}
if len(searchQuery.SortBy) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortBy {
case "updateDate":
params += "sortBy=UPDATE_DATE"
case "publishDate":
params += "sortBy=PUBLISH_DATE"
case "price":
params += "sortBy=PRICE"
case "relevance":
params += "sortBy=RELEVANCE"
}
}
if len(searchQuery.CityName) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.CityName {
case "st_petersburg":
params += "regions=5277347"
case "moscow":
params += "regions=5277335"
}
}
if searchQuery.PublishDateFrom > 0 {
if len(params) > 0 {
params += "&"
}
dateFrom := time.Unix(searchQuery.PublishDateFrom, 0)
params += "publishDateFrom=" + url.QueryEscape(dateFrom.Format("02.01.2006"))
}
if searchQuery.PublishDateTo > 0 {
if len(params) > 0 {
params += "&"
}
dateTo := time.Unix(searchQuery.PublishDateTo, 0)
params += "publishDateTo=" + url.QueryEscape(dateTo.Format("02.01.2006"))
}
if searchQuery.PageNumber > 0 {
if len(params) > 0 {
params += "&"
}
params += "pageNumber=" + strconv.FormatInt(searchQuery.PageNumber, 10)
}
if len(params) > 0 {
params += "&"
}
params += "searchString=" + url.QueryEscape(searchQuery.SearchString)
return params
} | identifier_body |
getinfo.go | package main
import (
"fmt"
"log"
"time"
"strings"
"strconv"
"net/http"
"net/url"
"github.com/PuerkitoBio/goquery"
)
type GetInfo struct {}
type SearchResult struct {
Items []SearchItem
Total int64
Page int64
TotalPage int64
}
type SearchItem struct {
Name string
Link string
Ids []string
Type string
Status string
Law string
Price string
Currency string
Customer string
CustomerLink string
Description string
Lots []SearchItemLot
PublishDate int64
UpdateDate int64
Actions []SearchItemAction
}
type SearchItemLot struct {
Name string
Description string
Price string
Currency string
}
type SearchItemAction struct {
Name string
Link string
}
func (_ GetInfo) SearchQueryToParams(searchQuery SearchQuery) string {
params := ""
for _, item := range searchQuery.LawNumber {
if len(params) > 0 {
params += "&"
}
switch item {
case "44-fz":
params += "fz44=on"
case "223-fz":
params += "fz223=on"
case "pp_rf_615":
params += "ppRf615=on"
case "94-fz":
params += "fz94=on"
}
}
for _, item := range searchQuery.ProcedureStatus {
if len(params) > 0 {
params += "&"
}
switch item {
case "applicationSubmission":
params += "af=on"
case "commissionWork":
params += "ca=on"
case "procedureCompleted":
params += "pc=on"
case "procedureAborted":
params += "pa=on"
}
}
if len(searchQuery.SortDirection) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortDirection {
case "up":
params += "sortDirection=true"
case "down":
params += "sortDirection=false"
}
}
if len(searchQuery.SortBy) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortBy {
case "updateDate":
params += "sortBy=UPDATE_DATE"
case "publishDate":
params += "sortBy=PUBLISH_DATE"
case "price":
params += "sortBy=PRICE"
case "relevance":
params += "sortBy=RELEVANCE"
}
}
if len(searchQuery.CityName) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.CityName {
case "st_petersburg":
params += "regions=5277347"
case "moscow":
params += "regions=5277335"
}
}
if searchQuery.PublishDateFrom > 0 {
if len(params) > 0 {
params += "&"
}
dateFrom := time.Unix(searchQuery.PublishDateFrom, 0)
params += "publishDateFrom=" + url.QueryEscape(dateFrom.Format("02.01.2006"))
}
if searchQuery.PublishDateTo > 0 {
if len(params) > 0 {
params += "&"
}
dateTo := time.Unix(searchQuery.PublishDateTo, 0)
params += "publishDateTo=" + url.QueryEscape(dateTo.Format("02.01.2006"))
}
if searchQuery.PageNumber > 0 {
if len(params) > 0 {
params += "&"
}
params += "pageNumber=" + strconv.FormatInt(searchQuery.PageNumber, 10)
}
if len(params) > 0 {
params += "&"
}
params += "searchString=" + url.QueryEscape(searchQuery.SearchString)
return params
}
func (m GetInfo) Search(searchQuery SearchQuery) (SearchResult, error) {
var result SearchResult
var err error
searchUrl := "http://zakupki.gov.ru/epz/order/quicksearch/search.html?" + m.SearchQueryToParams(searchQuery)
page, err := http.Get(searchUrl)
if err != nil {
return SearchResult{}, err
}
defer page.Body.Close()
if page.StatusCode != 200 {
return SearchResult{}, fmt.Errorf("Search page status error: %s", page.Status)
}
doc, err := goquery.NewDocumentFromReader(page.Body)
if err != nil {
return SearchResult{}, err
}
currentPageElement := doc.Find(".paginator .page__link_active").First()
if currentPageElement.Length() > 0 {
currentPageString := strings.TrimSpace(currentPageElement.Text())
currentPage, err := strconv.ParseInt(currentPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting current page number: %s\n", err)
}
result.Page = currentPage
} else {
result.Page = 1
}
totalPageElement := doc.Find(".paginator .page__link").Last()
if totalPageElement.Length() > 0 {
totalPageString := strings.TrimSpace(totalPageElement.Text())
totalPage, err := strconv.ParseInt(totalPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting total page number: %s\n", err)
}
result.TotalPage = totalPage
} else {
result.TotalPage = 1
}
totalNode := doc.Find(".allRecords > strong").First()
totalNumber := int64(0)
if totalNode.Length() > 0 {
totalComment := totalNode.Get(0).NextSibling
if len(strings.TrimSpace(totalComment.Data)) > 0 | else {
totalString := strings.TrimSpace(totalNode.Text())
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, " ", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting approximate total records number: %s\n", err.Error())
}
}
}
result.Total = totalNumber
result.Items = make([]SearchItem, 0)
doc.Find("div.registerBox.registerBoxBank.margBtm20").Each(func(i int, s *goquery.Selection) {
var itemStruct SearchItem
itemTable := s.ChildrenFiltered("table").First()
itemHeader := itemTable.Find(".descriptTenderTd > dl > dt").First()
itemHeaderLink := itemHeader.Find("a")
itemHeaderLinkHref, _ := itemHeaderLink.Attr("href")
itemUrl, err := url.Parse(itemHeaderLinkHref)
if err != nil {
log.Print(err)
return
}
itemUrl.Scheme = "http"
itemUrl.Host = "zakupki.gov.ru"
itemName := strings.TrimSpace(itemHeaderLink.Text())
itemHeader.Remove()
itemOrganization := itemTable.Find(".descriptTenderTd > dl > .nameOrganization").First()
organizationLink := itemOrganization.Find("a").First()
organizationLinkHref, _ := organizationLink.Attr("href")
organizationUrl, err := url.Parse(organizationLinkHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
organizationUrl.Scheme = "http"
organizationUrl.Host = "zakupki.gov.ru"
organizationName := strings.TrimSpace(organizationLink.Text())
itemOrganization.Remove()
itemIdNode := itemTable.Find(".descriptTenderTd > dl > dd.padTop10 > dl.greyText.margTop0.padTop8").First()
itemIdNode.Find("script").Remove()
itemId := strings.ReplaceAll(itemIdNode.Text(), " ", "")
itemIdNode.Parent().Remove()
itemIds := strings.Split(itemId, "\n")
tempItemIds := make([]string, 0)
for i := 0; i < len(itemIds); i++ {
itemIds[i] = strings.TrimSpace(itemIds[i])
if len(itemIds[i]) > 0 {
tempItemIds = append(tempItemIds, itemIds[i])
}
}
itemIds = tempItemIds
itemDescription := strings.TrimSpace(itemTable.Find(".descriptTenderTd > dl > *").First().Text())
itemType := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dt > strong").First().Text())
itemStatusSlice := strings.Split(itemTable.Find(".tenderTd > dl > dt > span.noWrap").First().Text(), "/")
itemStatus := strings.TrimSpace(itemStatusSlice[0])
itemLaw := strings.TrimSpace(itemStatusSlice[1])
itemPriceSlice := strings.Split(itemTable.Find(".tenderTd > dl > dd .fractionalNumber").First().Parent().Text(), ",")
for i2, priceItem := range itemPriceSlice {
itemPriceSlice[i2] = strings.TrimSpace(priceItem)
}
itemPrice := strings.Join(itemPriceSlice, ",")
itemCurrency := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dd > .currency").First().Text())
itemStruct.Lots = make([]SearchItemLot, 0)
itemTable.Find(".lotsInfo .descriptTenderTd").Each(func(i2 int, s2 *goquery.Selection) {
var lotInfo SearchItemLot
lotDesciptionElem := s2.Find("dl > dt").First()
lotNameElem := lotDesciptionElem.ChildrenFiltered("strong").First()
lotPriceElem := s2.Find("dl > dt > i > strong").First()
lotInfo.Name = strings.TrimSpace(lotNameElem.Text())
lotNameElem.Remove()
lotInfo.Description = strings.TrimSpace(lotDesciptionElem.Text())
lotInfo.Price = strings.TrimSpace(lotPriceElem.Text())
lotInfo.Currency = strings.TrimSpace(lotPriceElem.Get(0).NextSibling.Data)
itemStruct.Lots = append(itemStruct.Lots, lotInfo)
})
amountNodes := itemTable.Find(".amountTenderTd > ul > li > label")
publishDateString := strings.TrimSpace(amountNodes.Get(0).NextSibling.Data)
updateDateString := strings.TrimSpace(amountNodes.Get(1).NextSibling.Data)
publishDate, err := time.Parse("02.01.2006", publishDateString)
if err != nil {
log.Print(err)
return
}
updateDate, err := time.Parse("02.01.2006", updateDateString)
if err != nil {
log.Print(err)
return
}
reportBox := itemTable.Next()
reportBoxList := reportBox.Find("ul > ul").First()
reportBoxList.Find("a").Each(func(i2 int, s2 *goquery.Selection) {
reportHref, reportHrefExists := s2.Attr("href")
if !reportHrefExists {
s2.Remove()
return
}
reportUrl, err := url.Parse(reportHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
reportUrl.Scheme = "http"
reportUrl.Host = "zakupki.gov.ru"
var reportItem SearchItemAction
reportItem.Name = strings.TrimSpace(s2.Text())
reportItem.Link = reportUrl.String()
itemStruct.Actions = append(itemStruct.Actions, reportItem)
})
itemStruct.Name = itemName
itemStruct.Link = itemUrl.String()
itemStruct.Ids = itemIds
itemStruct.Type = itemType
itemStruct.Status = itemStatus
itemStruct.Law = itemLaw
itemStruct.Price = itemPrice
itemStruct.Currency = itemCurrency
itemStruct.Customer = organizationName
itemStruct.CustomerLink = organizationUrl.String()
itemStruct.Description = itemDescription
itemStruct.PublishDate = publishDate.Unix()
itemStruct.UpdateDate = updateDate.Unix()
result.Items = append(result.Items, itemStruct)
})
return result, nil
}
| {
totalString := strings.Split(totalComment.Data, ": ")[1]
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, "\u00a0", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting exact total records number: %s\n", err.Error())
}
} | conditional_block |
getinfo.go | package main
import ( | "strconv"
"net/http"
"net/url"
"github.com/PuerkitoBio/goquery"
)
type GetInfo struct {}
type SearchResult struct {
Items []SearchItem
Total int64
Page int64
TotalPage int64
}
type SearchItem struct {
Name string
Link string
Ids []string
Type string
Status string
Law string
Price string
Currency string
Customer string
CustomerLink string
Description string
Lots []SearchItemLot
PublishDate int64
UpdateDate int64
Actions []SearchItemAction
}
type SearchItemLot struct {
Name string
Description string
Price string
Currency string
}
type SearchItemAction struct {
Name string
Link string
}
func (_ GetInfo) SearchQueryToParams(searchQuery SearchQuery) string {
params := ""
for _, item := range searchQuery.LawNumber {
if len(params) > 0 {
params += "&"
}
switch item {
case "44-fz":
params += "fz44=on"
case "223-fz":
params += "fz223=on"
case "pp_rf_615":
params += "ppRf615=on"
case "94-fz":
params += "fz94=on"
}
}
for _, item := range searchQuery.ProcedureStatus {
if len(params) > 0 {
params += "&"
}
switch item {
case "applicationSubmission":
params += "af=on"
case "commissionWork":
params += "ca=on"
case "procedureCompleted":
params += "pc=on"
case "procedureAborted":
params += "pa=on"
}
}
if len(searchQuery.SortDirection) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortDirection {
case "up":
params += "sortDirection=true"
case "down":
params += "sortDirection=false"
}
}
if len(searchQuery.SortBy) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.SortBy {
case "updateDate":
params += "sortBy=UPDATE_DATE"
case "publishDate":
params += "sortBy=PUBLISH_DATE"
case "price":
params += "sortBy=PRICE"
case "relevance":
params += "sortBy=RELEVANCE"
}
}
if len(searchQuery.CityName) > 0 {
if len(params) > 0 {
params += "&"
}
switch searchQuery.CityName {
case "st_petersburg":
params += "regions=5277347"
case "moscow":
params += "regions=5277335"
}
}
if searchQuery.PublishDateFrom > 0 {
if len(params) > 0 {
params += "&"
}
dateFrom := time.Unix(searchQuery.PublishDateFrom, 0)
params += "publishDateFrom=" + url.QueryEscape(dateFrom.Format("02.01.2006"))
}
if searchQuery.PublishDateTo > 0 {
if len(params) > 0 {
params += "&"
}
dateTo := time.Unix(searchQuery.PublishDateTo, 0)
params += "publishDateTo=" + url.QueryEscape(dateTo.Format("02.01.2006"))
}
if searchQuery.PageNumber > 0 {
if len(params) > 0 {
params += "&"
}
params += "pageNumber=" + strconv.FormatInt(searchQuery.PageNumber, 10)
}
if len(params) > 0 {
params += "&"
}
params += "searchString=" + url.QueryEscape(searchQuery.SearchString)
return params
}
func (m GetInfo) Search(searchQuery SearchQuery) (SearchResult, error) {
var result SearchResult
var err error
searchUrl := "http://zakupki.gov.ru/epz/order/quicksearch/search.html?" + m.SearchQueryToParams(searchQuery)
page, err := http.Get(searchUrl)
if err != nil {
return SearchResult{}, err
}
defer page.Body.Close()
if page.StatusCode != 200 {
return SearchResult{}, fmt.Errorf("Search page status error: %s", page.Status)
}
doc, err := goquery.NewDocumentFromReader(page.Body)
if err != nil {
return SearchResult{}, err
}
currentPageElement := doc.Find(".paginator .page__link_active").First()
if currentPageElement.Length() > 0 {
currentPageString := strings.TrimSpace(currentPageElement.Text())
currentPage, err := strconv.ParseInt(currentPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting current page number: %s\n", err)
}
result.Page = currentPage
} else {
result.Page = 1
}
totalPageElement := doc.Find(".paginator .page__link").Last()
if totalPageElement.Length() > 0 {
totalPageString := strings.TrimSpace(totalPageElement.Text())
totalPage, err := strconv.ParseInt(totalPageString, 10, 64)
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting total page number: %s\n", err)
}
result.TotalPage = totalPage
} else {
result.TotalPage = 1
}
totalNode := doc.Find(".allRecords > strong").First()
totalNumber := int64(0)
if totalNode.Length() > 0 {
totalComment := totalNode.Get(0).NextSibling
if len(strings.TrimSpace(totalComment.Data)) > 0 {
totalString := strings.Split(totalComment.Data, ": ")[1]
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, "\u00a0", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting exact total records number: %s\n", err.Error())
}
} else {
totalString := strings.TrimSpace(totalNode.Text())
totalNumber, err = strconv.ParseInt(strings.ReplaceAll(totalString, " ", ""), 10, 64) // there is a no-break space
if err != nil {
return SearchResult{}, fmt.Errorf("Error on getting approximate total records number: %s\n", err.Error())
}
}
}
result.Total = totalNumber
result.Items = make([]SearchItem, 0)
doc.Find("div.registerBox.registerBoxBank.margBtm20").Each(func(i int, s *goquery.Selection) {
var itemStruct SearchItem
itemTable := s.ChildrenFiltered("table").First()
itemHeader := itemTable.Find(".descriptTenderTd > dl > dt").First()
itemHeaderLink := itemHeader.Find("a")
itemHeaderLinkHref, _ := itemHeaderLink.Attr("href")
itemUrl, err := url.Parse(itemHeaderLinkHref)
if err != nil {
log.Print(err)
return
}
itemUrl.Scheme = "http"
itemUrl.Host = "zakupki.gov.ru"
itemName := strings.TrimSpace(itemHeaderLink.Text())
itemHeader.Remove()
itemOrganization := itemTable.Find(".descriptTenderTd > dl > .nameOrganization").First()
organizationLink := itemOrganization.Find("a").First()
organizationLinkHref, _ := organizationLink.Attr("href")
organizationUrl, err := url.Parse(organizationLinkHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
organizationUrl.Scheme = "http"
organizationUrl.Host = "zakupki.gov.ru"
organizationName := strings.TrimSpace(organizationLink.Text())
itemOrganization.Remove()
itemIdNode := itemTable.Find(".descriptTenderTd > dl > dd.padTop10 > dl.greyText.margTop0.padTop8").First()
itemIdNode.Find("script").Remove()
itemId := strings.ReplaceAll(itemIdNode.Text(), " ", "")
itemIdNode.Parent().Remove()
itemIds := strings.Split(itemId, "\n")
tempItemIds := make([]string, 0)
for i := 0; i < len(itemIds); i++ {
itemIds[i] = strings.TrimSpace(itemIds[i])
if len(itemIds[i]) > 0 {
tempItemIds = append(tempItemIds, itemIds[i])
}
}
itemIds = tempItemIds
itemDescription := strings.TrimSpace(itemTable.Find(".descriptTenderTd > dl > *").First().Text())
itemType := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dt > strong").First().Text())
itemStatusSlice := strings.Split(itemTable.Find(".tenderTd > dl > dt > span.noWrap").First().Text(), "/")
itemStatus := strings.TrimSpace(itemStatusSlice[0])
itemLaw := strings.TrimSpace(itemStatusSlice[1])
itemPriceSlice := strings.Split(itemTable.Find(".tenderTd > dl > dd .fractionalNumber").First().Parent().Text(), ",")
for i2, priceItem := range itemPriceSlice {
itemPriceSlice[i2] = strings.TrimSpace(priceItem)
}
itemPrice := strings.Join(itemPriceSlice, ",")
itemCurrency := strings.TrimSpace(itemTable.Find(".tenderTd > dl > dd > .currency").First().Text())
itemStruct.Lots = make([]SearchItemLot, 0)
itemTable.Find(".lotsInfo .descriptTenderTd").Each(func(i2 int, s2 *goquery.Selection) {
var lotInfo SearchItemLot
lotDesciptionElem := s2.Find("dl > dt").First()
lotNameElem := lotDesciptionElem.ChildrenFiltered("strong").First()
lotPriceElem := s2.Find("dl > dt > i > strong").First()
lotInfo.Name = strings.TrimSpace(lotNameElem.Text())
lotNameElem.Remove()
lotInfo.Description = strings.TrimSpace(lotDesciptionElem.Text())
lotInfo.Price = strings.TrimSpace(lotPriceElem.Text())
lotInfo.Currency = strings.TrimSpace(lotPriceElem.Get(0).NextSibling.Data)
itemStruct.Lots = append(itemStruct.Lots, lotInfo)
})
amountNodes := itemTable.Find(".amountTenderTd > ul > li > label")
publishDateString := strings.TrimSpace(amountNodes.Get(0).NextSibling.Data)
updateDateString := strings.TrimSpace(amountNodes.Get(1).NextSibling.Data)
publishDate, err := time.Parse("02.01.2006", publishDateString)
if err != nil {
log.Print(err)
return
}
updateDate, err := time.Parse("02.01.2006", updateDateString)
if err != nil {
log.Print(err)
return
}
reportBox := itemTable.Next()
reportBoxList := reportBox.Find("ul > ul").First()
reportBoxList.Find("a").Each(func(i2 int, s2 *goquery.Selection) {
reportHref, reportHrefExists := s2.Attr("href")
if !reportHrefExists {
s2.Remove()
return
}
reportUrl, err := url.Parse(reportHref)
if err != nil {
log.Print(err)
return
}
//fix relative urls
reportUrl.Scheme = "http"
reportUrl.Host = "zakupki.gov.ru"
var reportItem SearchItemAction
reportItem.Name = strings.TrimSpace(s2.Text())
reportItem.Link = reportUrl.String()
itemStruct.Actions = append(itemStruct.Actions, reportItem)
})
itemStruct.Name = itemName
itemStruct.Link = itemUrl.String()
itemStruct.Ids = itemIds
itemStruct.Type = itemType
itemStruct.Status = itemStatus
itemStruct.Law = itemLaw
itemStruct.Price = itemPrice
itemStruct.Currency = itemCurrency
itemStruct.Customer = organizationName
itemStruct.CustomerLink = organizationUrl.String()
itemStruct.Description = itemDescription
itemStruct.PublishDate = publishDate.Unix()
itemStruct.UpdateDate = updateDate.Unix()
result.Items = append(result.Items, itemStruct)
})
return result, nil
} | "fmt"
"log"
"time"
"strings" | random_line_split |
daemon.go | // Copyright (C) 2021 Tweag IO
// Copyright © 2020-2022 The Trustix Authors
//
// SPDX-License-Identifier: GPL-3.0-only
package cmd
import (
"crypto"
"fmt"
"math"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path"
"sync"
"syscall"
"time"
connect "github.com/bufbuild/connect-go"
"github.com/coreos/go-systemd/activation"
"github.com/nix-community/trustix/packages/go-lib/executor"
"github.com/nix-community/trustix/packages/trustix-proto/api"
"github.com/nix-community/trustix/packages/trustix-proto/api/apiconnect"
"github.com/nix-community/trustix/packages/trustix-proto/protocols"
"github.com/nix-community/trustix/packages/trustix-proto/rpc/rpcconnect"
"github.com/nix-community/trustix/packages/trustix/auth"
"github.com/nix-community/trustix/packages/trustix/client"
tapi "github.com/nix-community/trustix/packages/trustix/internal/api"
conf "github.com/nix-community/trustix/packages/trustix/internal/config"
"github.com/nix-community/trustix/packages/trustix/internal/constants"
"github.com/nix-community/trustix/packages/trustix/internal/decider"
"github.com/nix-community/trustix/packages/trustix/internal/lib"
"github.com/nix-community/trustix/packages/trustix/internal/pool"
pub "github.com/nix-community/trustix/packages/trustix/internal/publisher"
"github.com/nix-community/trustix/packages/trustix/internal/server"
"github.com/nix-community/trustix/packages/trustix/internal/signer"
"github.com/nix-community/trustix/packages/trustix/internal/sthsync"
"github.com/nix-community/trustix/packages/trustix/internal/storage"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
var daemonListenAddresses []string
var daemonConfigPath string
var daemonStateDirectory string
var daemonPollInterval float64
var daemonCmd = &cobra.Command{
Use: "daemon",
Short: "Trustix daemon",
RunE: func(cmd *cobra.Command, args []string) error {
if daemonConfigPath == "" {
return fmt.Errorf("Missing config flag")
}
config, err := conf.NewConfigFromFile(daemonConfigPath)
if err != nil {
log.Fatal(err)
}
log.WithFields(log.Fields{
"directory": daemonStateDirectory,
}).Info("Creating state directory")
err = os.MkdirAll(daemonStateDirectory, 0700)
if err != nil {
log.Fatalf("Could not create state directory: %s", daemonStateDirectory)
}
var store storage.Storage
{
switch config.Storage.Type {
case "native":
store, err = storage.NewNativeStorage(daemonStateDirectory)
case "memory":
store, err = storage.NewMemoryStorage()
}
if err != nil {
log.Fatalf("Could not initialise store: %v", err)
}
}
// Set up write access tokens
var authInterceptor connect.UnaryInterceptorFunc
{
writeTokens := make(map[string]*auth.PublicToken)
// From the TRUSTIX_TOKEN env var
// This is the default token used.
defaultTokenPath := os.Getenv("TRUSTIX_TOKEN")
if defaultTokenPath != "" {
f, err := os.Open(defaultTokenPath)
if err != nil {
log.Fatalf("Error opening private token file '%s': %v", defaultTokenPath, err)
}
tok, err := auth.NewPublicTokenFromPriv(f)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
writeTokens[tok.Name] = tok
}
for _, publicTokenStr := range config.WriteTokens {
tok, err := auth.NewPublicTokenFromPub(publicTokenStr)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
_, ok := writeTokens[tok.Name]
if ok {
log.Fatalf("Naming collision in tokens: '%s' exists more than once", tok.Name)
}
writeTokens[tok.Name] = tok
}
authInterceptor = auth.NewAuthInterceptor(nil, writeTokens)
}
signers := make(map[string]crypto.Signer)
{
for name, signerConfig := range config.Signers {
var sig crypto.Signer
log.WithFields(log.Fields{
"type": signerConfig.Type,
"name": name,
}).Info("Creating signer")
switch signerConfig.Type {
case "ed25519":
sig, err = signer.NewED25519Signer(signerConfig.ED25519.PrivateKeyPath)
if err != nil {
return err
}
default:
return fmt.Errorf("Signer type '%s' is not supported.", signerConfig.Type)
}
signers[name] = sig
}
}
// These APIs are static and fully controlled by the configuration file
logs := []*api.Log{}
logsPublished := []*api.Log{}
{
for _, pubConf := range config.Publishers {
pd, err := protocols.Get(pubConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := pubConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := pubConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: pubConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(), // Hard-coded for now
}
logs = append(logs, log)
logsPublished = append(logsPublished, log)
}
for _, subConf := range config.Subscribers {
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := subConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: subConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(),
}
logs = append(logs, log)
}
}
clientPool := pool.NewClientPool()
defer clientPool.Close()
for _, remote := range config.Remotes {
remote := remote
go func() {
pc, err := clientPool.Dial(remote)
if err != nil {
log.WithFields(log.Fields{
"remote": remote,
}).Error("Couldn't dial remote")
return
}
pc.Activate()
}()
}
rootBucket := &storage.Bucket{}
caValueBucket := rootBucket.Cd(constants.CaValueBucket)
nodeAPI := tapi.NewKVStoreNodeAPI(store, caValueBucket, logsPublished)
nodeAPIServer := server.NewNodeAPIServer(nodeAPI)
headSyncCloser := lib.NewMultiCloser()
defer headSyncCloser.Close()
pubMap := pub.NewPublisherMap()
defer pubMap.Close()
{
logInitExecutor := executor.NewParallellExecutor()
for _, subscriberConfig := range config.Subscribers {
subConf := subscriberConfig
logInitExecutor.Add(func() error { // nolint:errcheck
pubBytes, err := subConf.PublicKey.Decode()
if err != nil {
return err
}
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
return err
}
log.WithFields(log.Fields{
"id": logID,
"pubkey": subConf.PublicKey.Pub,
}).Info("Adding log subscriber")
var verifier signer.Verifier
{
switch subConf.PublicKey.Type {
case "ed25519":
verifier, err = signer.NewED25519Verifier(pubBytes)
if err != nil {
return err
}
default:
return fmt.Errorf("Verifier type '%s' is not supported.", subConf.PublicKey.Type)
}
}
pollDuration := time.Millisecond * time.Duration(math.Round(daemonPollInterval/1000))
headSyncCloser.Add(sthsync.NewSTHSyncer(logID, store, rootBucket.Cd(logID), clientPool, verifier, pollDuration, pd))
return nil
})
}
for i, publisherConfig := range config.Publishers {
i := i
publisherConfig := publisherConfig
logInitExecutor.Add(func() error { // nolint:errcheck
logID := *logsPublished[i].LogID
log.WithFields(log.Fields{
"id": logID,
"pubkey": publisherConfig.PublicKey.Pub,
}).Info("Adding log")
pd, err := protocols.Get(publisherConfig.Protocol)
if err != nil {
return err
}
logAPI, err := tapi.NewKVStoreLogAPI(logID, store, rootBucket.Cd(logID), pd)
if err != nil {
return err
}
sig, ok := signers[publisherConfig.Signer]
if !ok {
return fmt.Errorf("Missing signer '%s'", publisherConfig.Signer)
}
publisher, err := pub.NewPublisher(logID, store, caValueBucket, rootBucket.Cd(logID), sig, pd)
if err != nil {
return err
}
if err = pubMap.Set(logID, publisher); err != nil {
return err
}
pc, err := clientPool.Add(&client.Client{
NodeAPI: nodeAPI,
LogAPI: logAPI,
Type: client.LocalClientType,
}, []string{logID})
if err != nil {
return err
}
pc.Activate()
return nil
})
}
err = logInitExecutor.Wait()
if err != nil {
return err
}
}
logAPIServer := server.NewLogAPIServer(logsPublished, clientPool)
deciders := make(map[string]decider.LogDecider)
{
for protocol, deciderConfigs := range config.Deciders { | }
// Private RPC methods to enumerate logs, decide on outputs and get raw values from storage
logRpcServer := server.NewLogRPCServer(store, rootBucket, clientPool, pubMap)
// Private RPC methods to get log heads, log entries, submit entries & commit queue
rpcServer := server.NewRPCServer(store, rootBucket, clientPool, pubMap, logs, deciders)
log.Debug("Creating gRPC servers")
errChan := make(chan error)
interceptors := connect.WithInterceptors(authInterceptor)
createServer := func(lis net.Listener) *http.Server {
mux := http.NewServeMux()
mux.Handle(rpcconnect.NewLogRPCHandler(logRpcServer, interceptors))
mux.Handle(rpcconnect.NewRPCApiHandler(rpcServer, interceptors))
mux.Handle(apiconnect.NewLogAPIHandler(logAPIServer, interceptors))
mux.Handle(apiconnect.NewNodeAPIHandler(nodeAPIServer, interceptors))
// Prometheus metrics
mux.Handle("/metrics", promhttp.Handler())
server := &http.Server{Handler: h2c.NewHandler(mux, &http2.Server{})}
go func() {
err := server.Serve(lis)
if err != nil {
errChan <- fmt.Errorf("failed to serve: %v", err)
}
}()
return server
}
var servers []*http.Server
// Systemd socket activation
listeners, err := activation.Listeners()
if err != nil {
log.Fatalf("cannot retrieve listeners: %s", err)
}
for _, lis := range listeners {
log.WithFields(log.Fields{
"address": lis.Addr(),
}).Info("Using socket activated listener")
servers = append(servers, createServer(lis))
}
// Create sockets
for _, addr := range daemonListenAddresses {
u, err := url.Parse(addr)
if err != nil {
log.Fatalf("Could not parse url: %v", err)
}
family := ""
host := ""
switch u.Scheme {
case "unix":
family = "unix"
host = u.Host + u.Path
case "http":
family = "tcp"
host = u.Host
default:
log.Fatalf("Socket with scheme '%s' unsupported", u.Scheme)
}
lis, err := net.Listen(family, host)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.WithFields(log.Fields{
"address": addr,
}).Info("Listening to address")
servers = append(servers, createServer(lis))
}
if len(servers) <= 0 {
log.Fatal("No listeners configured!")
}
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-quit
wg := new(sync.WaitGroup)
log.Info("Received shutdown signal, closing down server gracefully")
for _, server := range servers {
server := server
wg.Add(1)
go func() {
defer wg.Done()
server.Close()
}()
}
wg.Wait()
log.Info("Done closing down servers")
close(errChan)
}()
for err := range errChan {
log.Fatal(err)
}
return nil
},
}
func initDaemon() {
homeDir, _ := os.UserHomeDir()
defaultStateDir := path.Join(homeDir, ".local/share/trustix")
daemonCmd.Flags().StringVar(&daemonStateDirectory, "state", defaultStateDir, "State directory")
// Default poll every 30 minutes
daemonCmd.Flags().Float64Var(&daemonPollInterval, "interval", 60*30, "Log poll interval in seconds")
daemonCmd.Flags().StringSliceVar(&daemonListenAddresses, "listen", []string{}, "Listen to address")
daemonCmd.Flags().StringVar(&daemonConfigPath, "config", "", "Path to config.toml/json")
}
|
current := []decider.LogDecider{}
for _, deciderConfig := range deciderConfigs {
switch deciderConfig.Engine {
case "javascript":
decider, err := decider.NewJSDecider(deciderConfig.JS.Function)
if err != nil {
return err
}
current = append(current, decider)
case "percentage":
decider, err := decider.NewMinimumPercentDecider(deciderConfig.Percentage.Minimum)
if err != nil {
return err
}
current = append(current, decider)
case "logid":
decider, err := decider.NewLogIDDecider(deciderConfig.LogID.ID)
if err != nil {
return err
}
current = append(current, decider)
default:
return fmt.Errorf("No such engine: %s", deciderConfig.Engine)
}
}
pd, err := protocols.Get(protocol)
if err != nil {
return err
}
deciders[pd.ID] = decider.NewAggDecider(current...)
}
| conditional_block |
daemon.go | // Copyright (C) 2021 Tweag IO
// Copyright © 2020-2022 The Trustix Authors
//
// SPDX-License-Identifier: GPL-3.0-only
package cmd
import (
"crypto"
"fmt"
"math"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path"
"sync"
"syscall"
"time"
connect "github.com/bufbuild/connect-go"
"github.com/coreos/go-systemd/activation"
"github.com/nix-community/trustix/packages/go-lib/executor"
"github.com/nix-community/trustix/packages/trustix-proto/api"
"github.com/nix-community/trustix/packages/trustix-proto/api/apiconnect"
"github.com/nix-community/trustix/packages/trustix-proto/protocols"
"github.com/nix-community/trustix/packages/trustix-proto/rpc/rpcconnect"
"github.com/nix-community/trustix/packages/trustix/auth"
"github.com/nix-community/trustix/packages/trustix/client"
tapi "github.com/nix-community/trustix/packages/trustix/internal/api"
conf "github.com/nix-community/trustix/packages/trustix/internal/config"
"github.com/nix-community/trustix/packages/trustix/internal/constants"
"github.com/nix-community/trustix/packages/trustix/internal/decider"
"github.com/nix-community/trustix/packages/trustix/internal/lib"
"github.com/nix-community/trustix/packages/trustix/internal/pool"
pub "github.com/nix-community/trustix/packages/trustix/internal/publisher"
"github.com/nix-community/trustix/packages/trustix/internal/server"
"github.com/nix-community/trustix/packages/trustix/internal/signer"
"github.com/nix-community/trustix/packages/trustix/internal/sthsync"
"github.com/nix-community/trustix/packages/trustix/internal/storage"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
var daemonListenAddresses []string
var daemonConfigPath string
var daemonStateDirectory string
var daemonPollInterval float64
var daemonCmd = &cobra.Command{
Use: "daemon",
Short: "Trustix daemon",
RunE: func(cmd *cobra.Command, args []string) error {
if daemonConfigPath == "" {
return fmt.Errorf("Missing config flag")
}
config, err := conf.NewConfigFromFile(daemonConfigPath)
if err != nil {
log.Fatal(err)
}
log.WithFields(log.Fields{
"directory": daemonStateDirectory,
}).Info("Creating state directory")
err = os.MkdirAll(daemonStateDirectory, 0700)
if err != nil {
log.Fatalf("Could not create state directory: %s", daemonStateDirectory)
}
var store storage.Storage
{
switch config.Storage.Type {
case "native":
store, err = storage.NewNativeStorage(daemonStateDirectory)
case "memory":
store, err = storage.NewMemoryStorage()
}
if err != nil {
log.Fatalf("Could not initialise store: %v", err)
}
}
// Set up write access tokens
var authInterceptor connect.UnaryInterceptorFunc
{
writeTokens := make(map[string]*auth.PublicToken)
// From the TRUSTIX_TOKEN env var
// This is the default token used.
defaultTokenPath := os.Getenv("TRUSTIX_TOKEN")
if defaultTokenPath != "" {
f, err := os.Open(defaultTokenPath)
if err != nil {
log.Fatalf("Error opening private token file '%s': %v", defaultTokenPath, err)
}
tok, err := auth.NewPublicTokenFromPriv(f)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
writeTokens[tok.Name] = tok
}
for _, publicTokenStr := range config.WriteTokens {
tok, err := auth.NewPublicTokenFromPub(publicTokenStr)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
_, ok := writeTokens[tok.Name]
if ok {
log.Fatalf("Naming collision in tokens: '%s' exists more than once", tok.Name)
}
writeTokens[tok.Name] = tok
}
authInterceptor = auth.NewAuthInterceptor(nil, writeTokens)
}
signers := make(map[string]crypto.Signer)
{
for name, signerConfig := range config.Signers {
var sig crypto.Signer
log.WithFields(log.Fields{
"type": signerConfig.Type,
"name": name,
}).Info("Creating signer")
switch signerConfig.Type {
case "ed25519":
sig, err = signer.NewED25519Signer(signerConfig.ED25519.PrivateKeyPath)
if err != nil {
return err
}
default:
return fmt.Errorf("Signer type '%s' is not supported.", signerConfig.Type)
}
signers[name] = sig
}
}
// These APIs are static and fully controlled by the configuration file
logs := []*api.Log{}
logsPublished := []*api.Log{}
{
for _, pubConf := range config.Publishers {
pd, err := protocols.Get(pubConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := pubConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := pubConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: pubConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(), // Hard-coded for now
}
logs = append(logs, log)
logsPublished = append(logsPublished, log)
}
for _, subConf := range config.Subscribers {
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := subConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: subConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(),
}
logs = append(logs, log)
}
}
clientPool := pool.NewClientPool()
defer clientPool.Close()
for _, remote := range config.Remotes {
remote := remote
go func() {
pc, err := clientPool.Dial(remote)
if err != nil {
log.WithFields(log.Fields{
"remote": remote,
}).Error("Couldn't dial remote")
return
}
pc.Activate()
}()
}
rootBucket := &storage.Bucket{}
caValueBucket := rootBucket.Cd(constants.CaValueBucket)
nodeAPI := tapi.NewKVStoreNodeAPI(store, caValueBucket, logsPublished)
nodeAPIServer := server.NewNodeAPIServer(nodeAPI)
headSyncCloser := lib.NewMultiCloser()
defer headSyncCloser.Close()
pubMap := pub.NewPublisherMap()
defer pubMap.Close()
{
logInitExecutor := executor.NewParallellExecutor()
for _, subscriberConfig := range config.Subscribers {
subConf := subscriberConfig
logInitExecutor.Add(func() error { // nolint:errcheck
pubBytes, err := subConf.PublicKey.Decode()
if err != nil {
return err
}
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
return err
}
log.WithFields(log.Fields{
"id": logID,
"pubkey": subConf.PublicKey.Pub,
}).Info("Adding log subscriber")
var verifier signer.Verifier
{
switch subConf.PublicKey.Type {
case "ed25519":
verifier, err = signer.NewED25519Verifier(pubBytes)
if err != nil {
return err
}
default:
return fmt.Errorf("Verifier type '%s' is not supported.", subConf.PublicKey.Type)
}
}
pollDuration := time.Millisecond * time.Duration(math.Round(daemonPollInterval/1000))
headSyncCloser.Add(sthsync.NewSTHSyncer(logID, store, rootBucket.Cd(logID), clientPool, verifier, pollDuration, pd))
return nil
})
}
for i, publisherConfig := range config.Publishers {
i := i
publisherConfig := publisherConfig
logInitExecutor.Add(func() error { // nolint:errcheck
logID := *logsPublished[i].LogID
log.WithFields(log.Fields{
"id": logID,
"pubkey": publisherConfig.PublicKey.Pub,
}).Info("Adding log")
pd, err := protocols.Get(publisherConfig.Protocol)
if err != nil {
return err
}
logAPI, err := tapi.NewKVStoreLogAPI(logID, store, rootBucket.Cd(logID), pd)
if err != nil {
return err
}
sig, ok := signers[publisherConfig.Signer]
if !ok {
return fmt.Errorf("Missing signer '%s'", publisherConfig.Signer)
}
publisher, err := pub.NewPublisher(logID, store, caValueBucket, rootBucket.Cd(logID), sig, pd)
if err != nil {
return err
}
if err = pubMap.Set(logID, publisher); err != nil {
return err
}
pc, err := clientPool.Add(&client.Client{
NodeAPI: nodeAPI,
LogAPI: logAPI,
Type: client.LocalClientType,
}, []string{logID})
if err != nil {
return err
}
pc.Activate()
return nil
})
}
err = logInitExecutor.Wait()
if err != nil {
return err
}
}
logAPIServer := server.NewLogAPIServer(logsPublished, clientPool)
deciders := make(map[string]decider.LogDecider)
{
for protocol, deciderConfigs := range config.Deciders {
current := []decider.LogDecider{}
for _, deciderConfig := range deciderConfigs {
switch deciderConfig.Engine {
case "javascript":
decider, err := decider.NewJSDecider(deciderConfig.JS.Function)
if err != nil {
return err
}
current = append(current, decider)
case "percentage":
decider, err := decider.NewMinimumPercentDecider(deciderConfig.Percentage.Minimum)
if err != nil {
return err
}
current = append(current, decider)
case "logid":
decider, err := decider.NewLogIDDecider(deciderConfig.LogID.ID)
if err != nil {
return err
}
current = append(current, decider)
default:
return fmt.Errorf("No such engine: %s", deciderConfig.Engine)
}
}
pd, err := protocols.Get(protocol)
if err != nil {
return err
}
deciders[pd.ID] = decider.NewAggDecider(current...)
}
}
// Private RPC methods to enumerate logs, decide on outputs and get raw values from storage
logRpcServer := server.NewLogRPCServer(store, rootBucket, clientPool, pubMap)
// Private RPC methods to get log heads, log entries, submit entries & commit queue
rpcServer := server.NewRPCServer(store, rootBucket, clientPool, pubMap, logs, deciders)
log.Debug("Creating gRPC servers")
errChan := make(chan error)
interceptors := connect.WithInterceptors(authInterceptor)
createServer := func(lis net.Listener) *http.Server {
mux := http.NewServeMux()
mux.Handle(rpcconnect.NewLogRPCHandler(logRpcServer, interceptors))
mux.Handle(rpcconnect.NewRPCApiHandler(rpcServer, interceptors))
mux.Handle(apiconnect.NewLogAPIHandler(logAPIServer, interceptors))
mux.Handle(apiconnect.NewNodeAPIHandler(nodeAPIServer, interceptors))
// Prometheus metrics
mux.Handle("/metrics", promhttp.Handler())
server := &http.Server{Handler: h2c.NewHandler(mux, &http2.Server{})}
go func() {
err := server.Serve(lis)
if err != nil {
errChan <- fmt.Errorf("failed to serve: %v", err)
}
}()
return server
}
var servers []*http.Server
// Systemd socket activation
listeners, err := activation.Listeners()
if err != nil {
log.Fatalf("cannot retrieve listeners: %s", err)
}
for _, lis := range listeners {
log.WithFields(log.Fields{
"address": lis.Addr(),
}).Info("Using socket activated listener")
servers = append(servers, createServer(lis))
}
// Create sockets
for _, addr := range daemonListenAddresses {
u, err := url.Parse(addr)
if err != nil {
log.Fatalf("Could not parse url: %v", err)
}
family := ""
host := ""
switch u.Scheme {
case "unix":
family = "unix"
host = u.Host + u.Path
case "http":
family = "tcp"
host = u.Host
default:
log.Fatalf("Socket with scheme '%s' unsupported", u.Scheme)
}
lis, err := net.Listen(family, host)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.WithFields(log.Fields{
"address": addr,
}).Info("Listening to address")
servers = append(servers, createServer(lis))
}
if len(servers) <= 0 {
log.Fatal("No listeners configured!")
}
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-quit
wg := new(sync.WaitGroup)
log.Info("Received shutdown signal, closing down server gracefully")
for _, server := range servers {
server := server
wg.Add(1)
go func() {
defer wg.Done()
server.Close()
}()
}
wg.Wait()
log.Info("Done closing down servers")
close(errChan)
}()
for err := range errChan {
log.Fatal(err)
}
return nil
},
}
func initDaemon() { |
homeDir, _ := os.UserHomeDir()
defaultStateDir := path.Join(homeDir, ".local/share/trustix")
daemonCmd.Flags().StringVar(&daemonStateDirectory, "state", defaultStateDir, "State directory")
// Default poll every 30 minutes
daemonCmd.Flags().Float64Var(&daemonPollInterval, "interval", 60*30, "Log poll interval in seconds")
daemonCmd.Flags().StringSliceVar(&daemonListenAddresses, "listen", []string{}, "Listen to address")
daemonCmd.Flags().StringVar(&daemonConfigPath, "config", "", "Path to config.toml/json")
}
| identifier_body | |
daemon.go | // Copyright (C) 2021 Tweag IO
// Copyright © 2020-2022 The Trustix Authors
//
// SPDX-License-Identifier: GPL-3.0-only
package cmd
import (
"crypto"
"fmt"
"math"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path"
"sync"
"syscall"
"time"
connect "github.com/bufbuild/connect-go"
"github.com/coreos/go-systemd/activation"
"github.com/nix-community/trustix/packages/go-lib/executor"
"github.com/nix-community/trustix/packages/trustix-proto/api"
"github.com/nix-community/trustix/packages/trustix-proto/api/apiconnect"
"github.com/nix-community/trustix/packages/trustix-proto/protocols"
"github.com/nix-community/trustix/packages/trustix-proto/rpc/rpcconnect"
"github.com/nix-community/trustix/packages/trustix/auth"
"github.com/nix-community/trustix/packages/trustix/client"
tapi "github.com/nix-community/trustix/packages/trustix/internal/api"
conf "github.com/nix-community/trustix/packages/trustix/internal/config"
"github.com/nix-community/trustix/packages/trustix/internal/constants"
"github.com/nix-community/trustix/packages/trustix/internal/decider"
"github.com/nix-community/trustix/packages/trustix/internal/lib"
"github.com/nix-community/trustix/packages/trustix/internal/pool"
pub "github.com/nix-community/trustix/packages/trustix/internal/publisher"
"github.com/nix-community/trustix/packages/trustix/internal/server"
"github.com/nix-community/trustix/packages/trustix/internal/signer"
"github.com/nix-community/trustix/packages/trustix/internal/sthsync"
"github.com/nix-community/trustix/packages/trustix/internal/storage"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
var daemonListenAddresses []string
var daemonConfigPath string
var daemonStateDirectory string
var daemonPollInterval float64
var daemonCmd = &cobra.Command{
Use: "daemon",
Short: "Trustix daemon",
RunE: func(cmd *cobra.Command, args []string) error {
if daemonConfigPath == "" {
return fmt.Errorf("Missing config flag")
}
config, err := conf.NewConfigFromFile(daemonConfigPath)
if err != nil {
log.Fatal(err)
}
log.WithFields(log.Fields{
"directory": daemonStateDirectory,
}).Info("Creating state directory")
err = os.MkdirAll(daemonStateDirectory, 0700)
if err != nil {
log.Fatalf("Could not create state directory: %s", daemonStateDirectory)
}
var store storage.Storage
{
switch config.Storage.Type {
case "native":
store, err = storage.NewNativeStorage(daemonStateDirectory)
case "memory":
store, err = storage.NewMemoryStorage()
}
if err != nil {
log.Fatalf("Could not initialise store: %v", err)
}
}
// Set up write access tokens
var authInterceptor connect.UnaryInterceptorFunc
{
writeTokens := make(map[string]*auth.PublicToken)
// From the TRUSTIX_TOKEN env var
// This is the default token used.
defaultTokenPath := os.Getenv("TRUSTIX_TOKEN")
if defaultTokenPath != "" {
f, err := os.Open(defaultTokenPath)
if err != nil {
log.Fatalf("Error opening private token file '%s': %v", defaultTokenPath, err)
}
tok, err := auth.NewPublicTokenFromPriv(f)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
writeTokens[tok.Name] = tok
}
for _, publicTokenStr := range config.WriteTokens {
tok, err := auth.NewPublicTokenFromPub(publicTokenStr)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
_, ok := writeTokens[tok.Name]
if ok {
log.Fatalf("Naming collision in tokens: '%s' exists more than once", tok.Name)
}
writeTokens[tok.Name] = tok
}
authInterceptor = auth.NewAuthInterceptor(nil, writeTokens)
}
signers := make(map[string]crypto.Signer)
{
for name, signerConfig := range config.Signers {
var sig crypto.Signer
log.WithFields(log.Fields{
"type": signerConfig.Type,
"name": name,
}).Info("Creating signer")
switch signerConfig.Type {
case "ed25519":
sig, err = signer.NewED25519Signer(signerConfig.ED25519.PrivateKeyPath)
if err != nil {
return err
}
default:
return fmt.Errorf("Signer type '%s' is not supported.", signerConfig.Type)
}
signers[name] = sig
}
}
// These APIs are static and fully controlled by the configuration file
logs := []*api.Log{}
logsPublished := []*api.Log{}
{
for _, pubConf := range config.Publishers {
pd, err := protocols.Get(pubConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := pubConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := pubConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: pubConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(), // Hard-coded for now
}
logs = append(logs, log)
logsPublished = append(logsPublished, log)
}
for _, subConf := range config.Subscribers {
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := subConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: subConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(),
}
logs = append(logs, log)
}
}
clientPool := pool.NewClientPool()
defer clientPool.Close()
for _, remote := range config.Remotes {
remote := remote
go func() {
pc, err := clientPool.Dial(remote)
if err != nil {
log.WithFields(log.Fields{
"remote": remote,
}).Error("Couldn't dial remote")
return
}
pc.Activate()
}()
}
rootBucket := &storage.Bucket{}
caValueBucket := rootBucket.Cd(constants.CaValueBucket)
nodeAPI := tapi.NewKVStoreNodeAPI(store, caValueBucket, logsPublished)
nodeAPIServer := server.NewNodeAPIServer(nodeAPI)
headSyncCloser := lib.NewMultiCloser()
defer headSyncCloser.Close()
pubMap := pub.NewPublisherMap()
defer pubMap.Close()
{
logInitExecutor := executor.NewParallellExecutor()
for _, subscriberConfig := range config.Subscribers {
subConf := subscriberConfig
logInitExecutor.Add(func() error { // nolint:errcheck
pubBytes, err := subConf.PublicKey.Decode()
if err != nil {
return err
}
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
return err
}
log.WithFields(log.Fields{
"id": logID,
"pubkey": subConf.PublicKey.Pub,
}).Info("Adding log subscriber")
var verifier signer.Verifier
{
switch subConf.PublicKey.Type {
case "ed25519":
verifier, err = signer.NewED25519Verifier(pubBytes)
if err != nil {
return err
}
default:
return fmt.Errorf("Verifier type '%s' is not supported.", subConf.PublicKey.Type)
}
}
pollDuration := time.Millisecond * time.Duration(math.Round(daemonPollInterval/1000))
headSyncCloser.Add(sthsync.NewSTHSyncer(logID, store, rootBucket.Cd(logID), clientPool, verifier, pollDuration, pd))
return nil
})
}
for i, publisherConfig := range config.Publishers {
i := i
publisherConfig := publisherConfig
logInitExecutor.Add(func() error { // nolint:errcheck
logID := *logsPublished[i].LogID
log.WithFields(log.Fields{
"id": logID,
"pubkey": publisherConfig.PublicKey.Pub,
}).Info("Adding log")
pd, err := protocols.Get(publisherConfig.Protocol)
if err != nil {
return err
}
logAPI, err := tapi.NewKVStoreLogAPI(logID, store, rootBucket.Cd(logID), pd)
if err != nil {
return err
}
sig, ok := signers[publisherConfig.Signer]
if !ok {
return fmt.Errorf("Missing signer '%s'", publisherConfig.Signer)
}
publisher, err := pub.NewPublisher(logID, store, caValueBucket, rootBucket.Cd(logID), sig, pd)
if err != nil {
return err
}
if err = pubMap.Set(logID, publisher); err != nil {
return err
}
pc, err := clientPool.Add(&client.Client{
NodeAPI: nodeAPI,
LogAPI: logAPI,
Type: client.LocalClientType,
}, []string{logID})
if err != nil {
return err
}
pc.Activate()
return nil
})
}
err = logInitExecutor.Wait()
if err != nil {
return err
}
}
logAPIServer := server.NewLogAPIServer(logsPublished, clientPool)
deciders := make(map[string]decider.LogDecider)
{
for protocol, deciderConfigs := range config.Deciders {
current := []decider.LogDecider{}
for _, deciderConfig := range deciderConfigs {
switch deciderConfig.Engine {
case "javascript":
decider, err := decider.NewJSDecider(deciderConfig.JS.Function)
if err != nil {
return err
}
current = append(current, decider)
case "percentage":
decider, err := decider.NewMinimumPercentDecider(deciderConfig.Percentage.Minimum)
if err != nil {
return err
}
current = append(current, decider)
case "logid":
decider, err := decider.NewLogIDDecider(deciderConfig.LogID.ID)
if err != nil {
return err
}
current = append(current, decider)
default:
return fmt.Errorf("No such engine: %s", deciderConfig.Engine)
}
}
pd, err := protocols.Get(protocol)
if err != nil {
return err
}
deciders[pd.ID] = decider.NewAggDecider(current...)
}
}
// Private RPC methods to enumerate logs, decide on outputs and get raw values from storage
logRpcServer := server.NewLogRPCServer(store, rootBucket, clientPool, pubMap)
// Private RPC methods to get log heads, log entries, submit entries & commit queue
rpcServer := server.NewRPCServer(store, rootBucket, clientPool, pubMap, logs, deciders)
log.Debug("Creating gRPC servers")
errChan := make(chan error)
interceptors := connect.WithInterceptors(authInterceptor)
createServer := func(lis net.Listener) *http.Server {
mux := http.NewServeMux()
mux.Handle(rpcconnect.NewLogRPCHandler(logRpcServer, interceptors))
mux.Handle(rpcconnect.NewRPCApiHandler(rpcServer, interceptors))
mux.Handle(apiconnect.NewLogAPIHandler(logAPIServer, interceptors))
mux.Handle(apiconnect.NewNodeAPIHandler(nodeAPIServer, interceptors))
// Prometheus metrics
mux.Handle("/metrics", promhttp.Handler())
server := &http.Server{Handler: h2c.NewHandler(mux, &http2.Server{})}
go func() {
err := server.Serve(lis)
if err != nil {
errChan <- fmt.Errorf("failed to serve: %v", err)
}
}()
return server
}
var servers []*http.Server
// Systemd socket activation
listeners, err := activation.Listeners()
if err != nil {
log.Fatalf("cannot retrieve listeners: %s", err)
}
for _, lis := range listeners {
log.WithFields(log.Fields{
"address": lis.Addr(),
}).Info("Using socket activated listener")
servers = append(servers, createServer(lis))
}
// Create sockets
for _, addr := range daemonListenAddresses {
u, err := url.Parse(addr)
if err != nil {
log.Fatalf("Could not parse url: %v", err)
}
family := ""
host := ""
switch u.Scheme {
case "unix":
family = "unix"
host = u.Host + u.Path
case "http":
family = "tcp"
host = u.Host
default:
log.Fatalf("Socket with scheme '%s' unsupported", u.Scheme)
}
lis, err := net.Listen(family, host)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.WithFields(log.Fields{
"address": addr,
}).Info("Listening to address")
servers = append(servers, createServer(lis))
}
if len(servers) <= 0 {
log.Fatal("No listeners configured!")
}
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-quit
wg := new(sync.WaitGroup)
log.Info("Received shutdown signal, closing down server gracefully")
for _, server := range servers {
server := server
wg.Add(1)
go func() {
defer wg.Done()
server.Close()
}()
}
wg.Wait()
log.Info("Done closing down servers")
close(errChan)
}()
for err := range errChan {
log.Fatal(err) | }
func initDaemon() {
homeDir, _ := os.UserHomeDir()
defaultStateDir := path.Join(homeDir, ".local/share/trustix")
daemonCmd.Flags().StringVar(&daemonStateDirectory, "state", defaultStateDir, "State directory")
// Default poll every 30 minutes
daemonCmd.Flags().Float64Var(&daemonPollInterval, "interval", 60*30, "Log poll interval in seconds")
daemonCmd.Flags().StringSliceVar(&daemonListenAddresses, "listen", []string{}, "Listen to address")
daemonCmd.Flags().StringVar(&daemonConfigPath, "config", "", "Path to config.toml/json")
} | }
return nil
}, | random_line_split |
daemon.go | // Copyright (C) 2021 Tweag IO
// Copyright © 2020-2022 The Trustix Authors
//
// SPDX-License-Identifier: GPL-3.0-only
package cmd
import (
"crypto"
"fmt"
"math"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path"
"sync"
"syscall"
"time"
connect "github.com/bufbuild/connect-go"
"github.com/coreos/go-systemd/activation"
"github.com/nix-community/trustix/packages/go-lib/executor"
"github.com/nix-community/trustix/packages/trustix-proto/api"
"github.com/nix-community/trustix/packages/trustix-proto/api/apiconnect"
"github.com/nix-community/trustix/packages/trustix-proto/protocols"
"github.com/nix-community/trustix/packages/trustix-proto/rpc/rpcconnect"
"github.com/nix-community/trustix/packages/trustix/auth"
"github.com/nix-community/trustix/packages/trustix/client"
tapi "github.com/nix-community/trustix/packages/trustix/internal/api"
conf "github.com/nix-community/trustix/packages/trustix/internal/config"
"github.com/nix-community/trustix/packages/trustix/internal/constants"
"github.com/nix-community/trustix/packages/trustix/internal/decider"
"github.com/nix-community/trustix/packages/trustix/internal/lib"
"github.com/nix-community/trustix/packages/trustix/internal/pool"
pub "github.com/nix-community/trustix/packages/trustix/internal/publisher"
"github.com/nix-community/trustix/packages/trustix/internal/server"
"github.com/nix-community/trustix/packages/trustix/internal/signer"
"github.com/nix-community/trustix/packages/trustix/internal/sthsync"
"github.com/nix-community/trustix/packages/trustix/internal/storage"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
)
var daemonListenAddresses []string
var daemonConfigPath string
var daemonStateDirectory string
var daemonPollInterval float64
var daemonCmd = &cobra.Command{
Use: "daemon",
Short: "Trustix daemon",
RunE: func(cmd *cobra.Command, args []string) error {
if daemonConfigPath == "" {
return fmt.Errorf("Missing config flag")
}
config, err := conf.NewConfigFromFile(daemonConfigPath)
if err != nil {
log.Fatal(err)
}
log.WithFields(log.Fields{
"directory": daemonStateDirectory,
}).Info("Creating state directory")
err = os.MkdirAll(daemonStateDirectory, 0700)
if err != nil {
log.Fatalf("Could not create state directory: %s", daemonStateDirectory)
}
var store storage.Storage
{
switch config.Storage.Type {
case "native":
store, err = storage.NewNativeStorage(daemonStateDirectory)
case "memory":
store, err = storage.NewMemoryStorage()
}
if err != nil {
log.Fatalf("Could not initialise store: %v", err)
}
}
// Set up write access tokens
var authInterceptor connect.UnaryInterceptorFunc
{
writeTokens := make(map[string]*auth.PublicToken)
// From the TRUSTIX_TOKEN env var
// This is the default token used.
defaultTokenPath := os.Getenv("TRUSTIX_TOKEN")
if defaultTokenPath != "" {
f, err := os.Open(defaultTokenPath)
if err != nil {
log.Fatalf("Error opening private token file '%s': %v", defaultTokenPath, err)
}
tok, err := auth.NewPublicTokenFromPriv(f)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
writeTokens[tok.Name] = tok
}
for _, publicTokenStr := range config.WriteTokens {
tok, err := auth.NewPublicTokenFromPub(publicTokenStr)
if err != nil {
log.Fatalf("Error creating token: %v", err)
}
_, ok := writeTokens[tok.Name]
if ok {
log.Fatalf("Naming collision in tokens: '%s' exists more than once", tok.Name)
}
writeTokens[tok.Name] = tok
}
authInterceptor = auth.NewAuthInterceptor(nil, writeTokens)
}
signers := make(map[string]crypto.Signer)
{
for name, signerConfig := range config.Signers {
var sig crypto.Signer
log.WithFields(log.Fields{
"type": signerConfig.Type,
"name": name,
}).Info("Creating signer")
switch signerConfig.Type {
case "ed25519":
sig, err = signer.NewED25519Signer(signerConfig.ED25519.PrivateKeyPath)
if err != nil {
return err
}
default:
return fmt.Errorf("Signer type '%s' is not supported.", signerConfig.Type)
}
signers[name] = sig
}
}
// These APIs are static and fully controlled by the configuration file
logs := []*api.Log{}
logsPublished := []*api.Log{}
{
for _, pubConf := range config.Publishers {
pd, err := protocols.Get(pubConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := pubConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := pubConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: pubConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(), // Hard-coded for now
}
logs = append(logs, log)
logsPublished = append(logsPublished, log)
}
for _, subConf := range config.Subscribers {
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
log.Fatal(err)
}
signer, err := subConf.PublicKey.Signer()
if err != nil {
log.Fatal(err)
}
log := &api.Log{
LogID: &logID,
Meta: subConf.GetMeta(),
Signer: signer,
Protocol: &pd.ID,
Mode: logMode.Enum(),
}
logs = append(logs, log)
}
}
clientPool := pool.NewClientPool()
defer clientPool.Close()
for _, remote := range config.Remotes {
remote := remote
go func() {
pc, err := clientPool.Dial(remote)
if err != nil {
log.WithFields(log.Fields{
"remote": remote,
}).Error("Couldn't dial remote")
return
}
pc.Activate()
}()
}
rootBucket := &storage.Bucket{}
caValueBucket := rootBucket.Cd(constants.CaValueBucket)
nodeAPI := tapi.NewKVStoreNodeAPI(store, caValueBucket, logsPublished)
nodeAPIServer := server.NewNodeAPIServer(nodeAPI)
headSyncCloser := lib.NewMultiCloser()
defer headSyncCloser.Close()
pubMap := pub.NewPublisherMap()
defer pubMap.Close()
{
logInitExecutor := executor.NewParallellExecutor()
for _, subscriberConfig := range config.Subscribers {
subConf := subscriberConfig
logInitExecutor.Add(func() error { // nolint:errcheck
pubBytes, err := subConf.PublicKey.Decode()
if err != nil {
return err
}
pd, err := protocols.Get(subConf.Protocol)
if err != nil {
return err
}
logMode := api.Log_LogModes(0)
logID, err := subConf.PublicKey.LogID(pd, logMode)
if err != nil {
return err
}
log.WithFields(log.Fields{
"id": logID,
"pubkey": subConf.PublicKey.Pub,
}).Info("Adding log subscriber")
var verifier signer.Verifier
{
switch subConf.PublicKey.Type {
case "ed25519":
verifier, err = signer.NewED25519Verifier(pubBytes)
if err != nil {
return err
}
default:
return fmt.Errorf("Verifier type '%s' is not supported.", subConf.PublicKey.Type)
}
}
pollDuration := time.Millisecond * time.Duration(math.Round(daemonPollInterval/1000))
headSyncCloser.Add(sthsync.NewSTHSyncer(logID, store, rootBucket.Cd(logID), clientPool, verifier, pollDuration, pd))
return nil
})
}
for i, publisherConfig := range config.Publishers {
i := i
publisherConfig := publisherConfig
logInitExecutor.Add(func() error { // nolint:errcheck
logID := *logsPublished[i].LogID
log.WithFields(log.Fields{
"id": logID,
"pubkey": publisherConfig.PublicKey.Pub,
}).Info("Adding log")
pd, err := protocols.Get(publisherConfig.Protocol)
if err != nil {
return err
}
logAPI, err := tapi.NewKVStoreLogAPI(logID, store, rootBucket.Cd(logID), pd)
if err != nil {
return err
}
sig, ok := signers[publisherConfig.Signer]
if !ok {
return fmt.Errorf("Missing signer '%s'", publisherConfig.Signer)
}
publisher, err := pub.NewPublisher(logID, store, caValueBucket, rootBucket.Cd(logID), sig, pd)
if err != nil {
return err
}
if err = pubMap.Set(logID, publisher); err != nil {
return err
}
pc, err := clientPool.Add(&client.Client{
NodeAPI: nodeAPI,
LogAPI: logAPI,
Type: client.LocalClientType,
}, []string{logID})
if err != nil {
return err
}
pc.Activate()
return nil
})
}
err = logInitExecutor.Wait()
if err != nil {
return err
}
}
logAPIServer := server.NewLogAPIServer(logsPublished, clientPool)
deciders := make(map[string]decider.LogDecider)
{
for protocol, deciderConfigs := range config.Deciders {
current := []decider.LogDecider{}
for _, deciderConfig := range deciderConfigs {
switch deciderConfig.Engine {
case "javascript":
decider, err := decider.NewJSDecider(deciderConfig.JS.Function)
if err != nil {
return err
}
current = append(current, decider)
case "percentage":
decider, err := decider.NewMinimumPercentDecider(deciderConfig.Percentage.Minimum)
if err != nil {
return err
}
current = append(current, decider)
case "logid":
decider, err := decider.NewLogIDDecider(deciderConfig.LogID.ID)
if err != nil {
return err
}
current = append(current, decider)
default:
return fmt.Errorf("No such engine: %s", deciderConfig.Engine)
}
}
pd, err := protocols.Get(protocol)
if err != nil {
return err
}
deciders[pd.ID] = decider.NewAggDecider(current...)
}
}
// Private RPC methods to enumerate logs, decide on outputs and get raw values from storage
logRpcServer := server.NewLogRPCServer(store, rootBucket, clientPool, pubMap)
// Private RPC methods to get log heads, log entries, submit entries & commit queue
rpcServer := server.NewRPCServer(store, rootBucket, clientPool, pubMap, logs, deciders)
log.Debug("Creating gRPC servers")
errChan := make(chan error)
interceptors := connect.WithInterceptors(authInterceptor)
createServer := func(lis net.Listener) *http.Server {
mux := http.NewServeMux()
mux.Handle(rpcconnect.NewLogRPCHandler(logRpcServer, interceptors))
mux.Handle(rpcconnect.NewRPCApiHandler(rpcServer, interceptors))
mux.Handle(apiconnect.NewLogAPIHandler(logAPIServer, interceptors))
mux.Handle(apiconnect.NewNodeAPIHandler(nodeAPIServer, interceptors))
// Prometheus metrics
mux.Handle("/metrics", promhttp.Handler())
server := &http.Server{Handler: h2c.NewHandler(mux, &http2.Server{})}
go func() {
err := server.Serve(lis)
if err != nil {
errChan <- fmt.Errorf("failed to serve: %v", err)
}
}()
return server
}
var servers []*http.Server
// Systemd socket activation
listeners, err := activation.Listeners()
if err != nil {
log.Fatalf("cannot retrieve listeners: %s", err)
}
for _, lis := range listeners {
log.WithFields(log.Fields{
"address": lis.Addr(),
}).Info("Using socket activated listener")
servers = append(servers, createServer(lis))
}
// Create sockets
for _, addr := range daemonListenAddresses {
u, err := url.Parse(addr)
if err != nil {
log.Fatalf("Could not parse url: %v", err)
}
family := ""
host := ""
switch u.Scheme {
case "unix":
family = "unix"
host = u.Host + u.Path
case "http":
family = "tcp"
host = u.Host
default:
log.Fatalf("Socket with scheme '%s' unsupported", u.Scheme)
}
lis, err := net.Listen(family, host)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.WithFields(log.Fields{
"address": addr,
}).Info("Listening to address")
servers = append(servers, createServer(lis))
}
if len(servers) <= 0 {
log.Fatal("No listeners configured!")
}
quit := make(chan os.Signal, 1)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-quit
wg := new(sync.WaitGroup)
log.Info("Received shutdown signal, closing down server gracefully")
for _, server := range servers {
server := server
wg.Add(1)
go func() {
defer wg.Done()
server.Close()
}()
}
wg.Wait()
log.Info("Done closing down servers")
close(errChan)
}()
for err := range errChan {
log.Fatal(err)
}
return nil
},
}
func i | ) {
homeDir, _ := os.UserHomeDir()
defaultStateDir := path.Join(homeDir, ".local/share/trustix")
daemonCmd.Flags().StringVar(&daemonStateDirectory, "state", defaultStateDir, "State directory")
// Default poll every 30 minutes
daemonCmd.Flags().Float64Var(&daemonPollInterval, "interval", 60*30, "Log poll interval in seconds")
daemonCmd.Flags().StringSliceVar(&daemonListenAddresses, "listen", []string{}, "Listen to address")
daemonCmd.Flags().StringVar(&daemonConfigPath, "config", "", "Path to config.toml/json")
}
| nitDaemon( | identifier_name |
fetch_fnz_mf.py | import requests
import datetime
from dbConfig import *
import logging
FORMAT = "[f360 fetch_fnz_mf] %(asctime)s - [%(funcName)s()] %(message)s"
logging.basicConfig(format=FORMAT, level=runtime_cfg.LOGLEVEL)
logger = logging.getLogger(__name__)
def read_from_ms_database():
logger.info('Reading MS database for basic fund info')
conn = get_ms_db_conn()
try:
with conn.cursor(pymysql.cursors.DictCursor) as cursor:
sql = 'SELECT f.MStarID, f.ISIN, f.BrandingName, fcm.FundName, fcm.FundName_CN, f.InvestmentStrategy, ' \
' COALESCE(st.InvestmentStrategy_CHN, \'\') as InvestmentStrategy_CHN, ' \
' COALESCE(ftc.InvestmentStrategy_TCHN, \'\') as InvestmentStrategy_TCHN, ' \
' f.CategoryCode, f.BroadCategoryGroupID ' \
'from hkfundbasicinfo f ' \
' LEFT JOIN hkfundchineseinvestmentstrategy st on f.mstarId = st.mstarId, ' \
' hkfundchinesenamemapping fcm, hkfundtraditionalchinese ftc ' \
'where ' \
' f.mstarId = fcm.mstarId ' \
' and f.mstarId = ftc.mstarId '
cursor.execute(sql)
query_result = cursor.fetchall()
finally:
conn.close()
result = {}
#return dict((isin, {'mStarId': mStarId, 'strategy': strategy, 'mstar_category': mstar_category, 'branding': branding}) for isin, mStarId, strategy, mstar_category, branding in result)
for item in query_result:
result.setdefault(item['ISIN'], item)
return result
def get_fnz_funds():
logger.info('Getting FNZ Fund from ' + fnz_fund_api)
#url = 'https://distributionserviceofsu36.fnz.com/api/distribution/v3/funds?ProductCode='+arg_instcode
headers = {}
headers['Content-Type'] = 'application/json'
headers['X-ApplicationName'] = '1'
headers['X-UserContext'] = 'UserId=436476' # or UserId = 436477
headers['Accept'] = 'application/json'
req = requests.get(url=fnz_fund_api, headers=headers)
r = req.json()
if int(r['TotalNumberOfResults']) > 0:
return r['PageOfResults'] # from a list of dictionary
else:
return []
def insert_mutual_funds(isin_map):
logger.info('Inserting into mutual fund database')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_name_sql = "REPLACE INTO mstar_fund_name(mstar_id, name, name_cn) values(%s, %s, %s)"
mf_sql = "REPLACE INTO mutual_fund (name, product_id, isin, fnz_code, currency, inception_date, fund_size, investment_strategy, investment_strategy_cn, investment_strategy_tw, domicile, div_type, " \
"asset_class, sub_asset_class, branding, company_name, primary_index, mstar_id, mstar_category_code, mstar_broad_category_group_id, buyable) " \
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
charge_sql = "REPLACE INTO fund_charge (isin, initial_inv_amt, initial_charge, annual_charge, subscription_cost, redemption_cost) VALUES(%s, %s, %s, %s, %s, %s)"
performance_sql = "REPLACE INTO fnz_fund_performance_history(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
performance_sql_2 = "REPLACE INTO fnz_fund_performance(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
non_tradable_sql = 'UPDATE mutual_fund set buyable = 0 where product_id = %s'
all_funds = get_fnz_funds()
num_of_funds = len(all_funds)
logger.info('# of funds fetched from FNZ: ' + str(num_of_funds))
fund_name_stmt = []
mutual_fund_stmt = []
fund_charges_stmt = []
performance_stmt = []
non_tradable_funds_stmt = []
for row in all_funds:
sellable = row['Sellable']
if sellable != True:
product_id = row.get('ProductId', '')
non_tradable_funds_stmt.append((product_id))
continue
isin = row['FundIdentifiers'].get('Isin','')
if isin in isin_map:
branding = isin_map[isin]['BrandingName']
mStarId = isin_map[isin]['MStarID']
name = isin_map[isin]['FundName']
name_cn = isin_map[isin]['FundName_CN']
strategy = isin_map[isin]['InvestmentStrategy']
strategy_cn = '' if isin_map[isin]['InvestmentStrategy_CHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_CHN']
strategy_tw = '' if isin_map[isin]['InvestmentStrategy_TCHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_TCHN']
mstar_category = isin_map[isin]['CategoryCode']
mstar_broad_category_group_id = isin_map[isin]['BroadCategoryGroupID']
else:
if isin is not None and row['Sellable']:
log_msg = 'missing isin [%s], buyable [%s] description [%s] in morningStar' % (isin, row['Sellable'], row['Description'])
logger.info(log_msg)
continue
pricing_date = row['Price']['PricingDate']
price = "{0:.2f}".format(row['Price']['Value'].get('Value', 0.0))
fnz_code = row['FundIdentifiers'].get('ProductCode','')
#name = row.get('Name','')
product_id = row.get('ProductId','')
logger.info('Processing product with id %s' % product_id)
inception_date = row.get('LaunchDate','')
fund_size = row.get('FundSize', 0.0)
#strategy = row.get('Description','')
asset_class = row.get('AssetClass','')
sub_asset_class = row.get('AssetSubClass','')
buyable = row.get('Sellable','')
fund_manager_detail = row.get('FundManagerDetails', None)
if isinstance(fund_manager_detail, dict):
company = row['FundManagerDetails'].get('CompanyName', '')
else:
company = ''
primary_index_obj = row.get('PrimaryIndex', None)
if isinstance(primary_index_obj, dict):
primary_index = row['PrimaryIndex'].get('Name', '')
else:
primary_index = ''
domicile = row.get('Domicile','')
currency = row.get('Currency','')
div_type = row.get('DividendReinvestment','')
min_initial_inv_amt = str(row.get('MinInitialInvestmentAmount', ''))
charges = row.get('Charges', None)
if isinstance(charges, dict):
initial_charge = str(row['Charges']['InitialCharge'].get('Value', ''))
annual_management_charge = str(row['Charges']['AnualManagementCharge'].get('Value', ''))
subscription_charge = str(row['Charges']['SubScriptionCost'].get('Value', ''))
redemption_charge = str(row['Charges']['RedemptionCost'].get('Value', ''))
else:
initial_charge = ''
annual_management_charge = ''
subscription_charge = ''
redemption_charge = ''
#Performance
ytd = row['HistoricalPerformance'].get('YearToDate', 0.0)
three_m = row['HistoricalPerformance'].get('ThreeMonth', 0.0)
one_y = row['HistoricalPerformance'].get('OneYear', 0.0)
two_y = row['HistoricalPerformance'].get('TwoYear', 0.0)
three_y = row['HistoricalPerformance'].get('ThreeYear', 0.0)
five_y = row['HistoricalPerformance'].get('FiveYear', 0.0)
ytd = '' if ytd is None else "{0:.5f}".format(ytd)
three_m = '' if three_m is None else "{0:.5f}".format(three_m)
one_y = '' if one_y is None else "{0:.5f}".format(one_y)
two_y = '' if two_y is None else "{0:.5f}".format(two_y)
three_y = '' if three_y is None else "{0:.5f}".format(three_y)
five_y = '' if five_y is None else "{0:.5f}".format(five_y)
fund_name_stmt.append((mStarId, name, name_cn))
mutual_fund_stmt.append((name, product_id, isin, fnz_code, currency, inception_date, fund_size,
strategy, strategy_cn, strategy_tw, domicile, div_type, asset_class, sub_asset_class,
branding, company, primary_index, mStarId, mstar_category, mstar_broad_category_group_id, buyable))
fund_charges_stmt.append((isin, min_initial_inv_amt, initial_charge, annual_management_charge, subscription_charge, redemption_charge))
performance_stmt.append((isin, pricing_date, currency, price, ytd, three_m, one_y, two_y, three_y, five_y))
for product_id in non_tradable_funds_stmt:
log_msg = 'Skipping non tradable product id: %s' % product_id
logger.info(log_msg)
logger.info('Executing insert sql')
cursor.executemany(fund_name_sql, fund_name_stmt)
cursor.executemany(mf_sql, mutual_fund_stmt)
cursor.executemany(charge_sql, fund_charges_stmt)
cursor.executemany(performance_sql, performance_stmt)
cursor.executemany(performance_sql_2, performance_stmt)
cursor.executemany(non_tradable_sql, non_tradable_funds_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def extract_brandings(isin_map):
all_funds = list(isin_map.values())
# remove duplicates
brandings = set(fund['BrandingName'].strip() for fund in all_funds)
# filter ''
brandings = [brand for brand in brandings if brand.strip()]
return brandings
def insert_fund_branding(brandings):
|
def read_fnz_fund_list():
isin_map = read_from_ms_database()
brandings = extract_brandings(isin_map)
insert_fund_branding(brandings)
insert_mutual_funds(isin_map)
if __name__ == '__main__':
read_fnz_fund_list()
logger.info('Finished updating database using FNZ api') | logger.info('Inserting into fund_branding')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_branding_sql = "INSERT INTO fund_branding values(%s, '', '') ON DUPLICATE KEY UPDATE name = %s"
fund_branding_stmt = []
for branding in brandings:
fund_branding_stmt.append((branding, branding))
cursor.executemany(fund_branding_sql, fund_branding_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close() | identifier_body |
fetch_fnz_mf.py | import requests
import datetime
from dbConfig import *
import logging
FORMAT = "[f360 fetch_fnz_mf] %(asctime)s - [%(funcName)s()] %(message)s"
logging.basicConfig(format=FORMAT, level=runtime_cfg.LOGLEVEL)
logger = logging.getLogger(__name__)
def read_from_ms_database():
logger.info('Reading MS database for basic fund info')
conn = get_ms_db_conn()
try:
with conn.cursor(pymysql.cursors.DictCursor) as cursor:
sql = 'SELECT f.MStarID, f.ISIN, f.BrandingName, fcm.FundName, fcm.FundName_CN, f.InvestmentStrategy, ' \
' COALESCE(st.InvestmentStrategy_CHN, \'\') as InvestmentStrategy_CHN, ' \
' COALESCE(ftc.InvestmentStrategy_TCHN, \'\') as InvestmentStrategy_TCHN, ' \
' f.CategoryCode, f.BroadCategoryGroupID ' \
'from hkfundbasicinfo f ' \
' LEFT JOIN hkfundchineseinvestmentstrategy st on f.mstarId = st.mstarId, ' \
' hkfundchinesenamemapping fcm, hkfundtraditionalchinese ftc ' \
'where ' \
' f.mstarId = fcm.mstarId ' \
' and f.mstarId = ftc.mstarId '
cursor.execute(sql)
query_result = cursor.fetchall()
finally:
conn.close()
result = {}
#return dict((isin, {'mStarId': mStarId, 'strategy': strategy, 'mstar_category': mstar_category, 'branding': branding}) for isin, mStarId, strategy, mstar_category, branding in result)
for item in query_result:
result.setdefault(item['ISIN'], item)
return result
def get_fnz_funds():
logger.info('Getting FNZ Fund from ' + fnz_fund_api)
#url = 'https://distributionserviceofsu36.fnz.com/api/distribution/v3/funds?ProductCode='+arg_instcode
headers = {}
headers['Content-Type'] = 'application/json'
headers['X-ApplicationName'] = '1'
headers['X-UserContext'] = 'UserId=436476' # or UserId = 436477
headers['Accept'] = 'application/json'
req = requests.get(url=fnz_fund_api, headers=headers)
r = req.json()
if int(r['TotalNumberOfResults']) > 0:
return r['PageOfResults'] # from a list of dictionary
else:
return []
def insert_mutual_funds(isin_map):
logger.info('Inserting into mutual fund database')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_name_sql = "REPLACE INTO mstar_fund_name(mstar_id, name, name_cn) values(%s, %s, %s)"
mf_sql = "REPLACE INTO mutual_fund (name, product_id, isin, fnz_code, currency, inception_date, fund_size, investment_strategy, investment_strategy_cn, investment_strategy_tw, domicile, div_type, " \
"asset_class, sub_asset_class, branding, company_name, primary_index, mstar_id, mstar_category_code, mstar_broad_category_group_id, buyable) " \
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
charge_sql = "REPLACE INTO fund_charge (isin, initial_inv_amt, initial_charge, annual_charge, subscription_cost, redemption_cost) VALUES(%s, %s, %s, %s, %s, %s)"
performance_sql = "REPLACE INTO fnz_fund_performance_history(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
performance_sql_2 = "REPLACE INTO fnz_fund_performance(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
non_tradable_sql = 'UPDATE mutual_fund set buyable = 0 where product_id = %s'
all_funds = get_fnz_funds()
num_of_funds = len(all_funds)
logger.info('# of funds fetched from FNZ: ' + str(num_of_funds))
fund_name_stmt = []
mutual_fund_stmt = []
fund_charges_stmt = []
performance_stmt = []
non_tradable_funds_stmt = []
for row in all_funds:
sellable = row['Sellable']
if sellable != True:
product_id = row.get('ProductId', '')
non_tradable_funds_stmt.append((product_id))
continue
isin = row['FundIdentifiers'].get('Isin','')
if isin in isin_map:
branding = isin_map[isin]['BrandingName']
mStarId = isin_map[isin]['MStarID']
name = isin_map[isin]['FundName']
name_cn = isin_map[isin]['FundName_CN']
strategy = isin_map[isin]['InvestmentStrategy']
strategy_cn = '' if isin_map[isin]['InvestmentStrategy_CHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_CHN']
strategy_tw = '' if isin_map[isin]['InvestmentStrategy_TCHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_TCHN']
mstar_category = isin_map[isin]['CategoryCode']
mstar_broad_category_group_id = isin_map[isin]['BroadCategoryGroupID']
else:
if isin is not None and row['Sellable']:
log_msg = 'missing isin [%s], buyable [%s] description [%s] in morningStar' % (isin, row['Sellable'], row['Description'])
logger.info(log_msg)
continue
pricing_date = row['Price']['PricingDate']
price = "{0:.2f}".format(row['Price']['Value'].get('Value', 0.0))
fnz_code = row['FundIdentifiers'].get('ProductCode','')
#name = row.get('Name','')
product_id = row.get('ProductId','')
logger.info('Processing product with id %s' % product_id)
inception_date = row.get('LaunchDate','')
fund_size = row.get('FundSize', 0.0)
#strategy = row.get('Description','')
asset_class = row.get('AssetClass','')
sub_asset_class = row.get('AssetSubClass','')
buyable = row.get('Sellable','')
fund_manager_detail = row.get('FundManagerDetails', None)
if isinstance(fund_manager_detail, dict):
company = row['FundManagerDetails'].get('CompanyName', '')
else:
company = ''
primary_index_obj = row.get('PrimaryIndex', None)
if isinstance(primary_index_obj, dict):
primary_index = row['PrimaryIndex'].get('Name', '')
else:
primary_index = ''
domicile = row.get('Domicile','')
currency = row.get('Currency','')
div_type = row.get('DividendReinvestment','')
min_initial_inv_amt = str(row.get('MinInitialInvestmentAmount', ''))
charges = row.get('Charges', None)
if isinstance(charges, dict):
initial_charge = str(row['Charges']['InitialCharge'].get('Value', ''))
annual_management_charge = str(row['Charges']['AnualManagementCharge'].get('Value', ''))
subscription_charge = str(row['Charges']['SubScriptionCost'].get('Value', ''))
redemption_charge = str(row['Charges']['RedemptionCost'].get('Value', ''))
else:
initial_charge = ''
annual_management_charge = ''
subscription_charge = ''
redemption_charge = ''
#Performance
ytd = row['HistoricalPerformance'].get('YearToDate', 0.0)
three_m = row['HistoricalPerformance'].get('ThreeMonth', 0.0)
one_y = row['HistoricalPerformance'].get('OneYear', 0.0)
two_y = row['HistoricalPerformance'].get('TwoYear', 0.0)
three_y = row['HistoricalPerformance'].get('ThreeYear', 0.0)
five_y = row['HistoricalPerformance'].get('FiveYear', 0.0)
| two_y = '' if two_y is None else "{0:.5f}".format(two_y)
three_y = '' if three_y is None else "{0:.5f}".format(three_y)
five_y = '' if five_y is None else "{0:.5f}".format(five_y)
fund_name_stmt.append((mStarId, name, name_cn))
mutual_fund_stmt.append((name, product_id, isin, fnz_code, currency, inception_date, fund_size,
strategy, strategy_cn, strategy_tw, domicile, div_type, asset_class, sub_asset_class,
branding, company, primary_index, mStarId, mstar_category, mstar_broad_category_group_id, buyable))
fund_charges_stmt.append((isin, min_initial_inv_amt, initial_charge, annual_management_charge, subscription_charge, redemption_charge))
performance_stmt.append((isin, pricing_date, currency, price, ytd, three_m, one_y, two_y, three_y, five_y))
for product_id in non_tradable_funds_stmt:
log_msg = 'Skipping non tradable product id: %s' % product_id
logger.info(log_msg)
logger.info('Executing insert sql')
cursor.executemany(fund_name_sql, fund_name_stmt)
cursor.executemany(mf_sql, mutual_fund_stmt)
cursor.executemany(charge_sql, fund_charges_stmt)
cursor.executemany(performance_sql, performance_stmt)
cursor.executemany(performance_sql_2, performance_stmt)
cursor.executemany(non_tradable_sql, non_tradable_funds_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def extract_brandings(isin_map):
all_funds = list(isin_map.values())
# remove duplicates
brandings = set(fund['BrandingName'].strip() for fund in all_funds)
# filter ''
brandings = [brand for brand in brandings if brand.strip()]
return brandings
def insert_fund_branding(brandings):
logger.info('Inserting into fund_branding')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_branding_sql = "INSERT INTO fund_branding values(%s, '', '') ON DUPLICATE KEY UPDATE name = %s"
fund_branding_stmt = []
for branding in brandings:
fund_branding_stmt.append((branding, branding))
cursor.executemany(fund_branding_sql, fund_branding_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def read_fnz_fund_list():
isin_map = read_from_ms_database()
brandings = extract_brandings(isin_map)
insert_fund_branding(brandings)
insert_mutual_funds(isin_map)
if __name__ == '__main__':
read_fnz_fund_list()
logger.info('Finished updating database using FNZ api') | ytd = '' if ytd is None else "{0:.5f}".format(ytd)
three_m = '' if three_m is None else "{0:.5f}".format(three_m)
one_y = '' if one_y is None else "{0:.5f}".format(one_y)
| random_line_split |
fetch_fnz_mf.py | import requests
import datetime
from dbConfig import *
import logging
FORMAT = "[f360 fetch_fnz_mf] %(asctime)s - [%(funcName)s()] %(message)s"
logging.basicConfig(format=FORMAT, level=runtime_cfg.LOGLEVEL)
logger = logging.getLogger(__name__)
def read_from_ms_database():
logger.info('Reading MS database for basic fund info')
conn = get_ms_db_conn()
try:
with conn.cursor(pymysql.cursors.DictCursor) as cursor:
sql = 'SELECT f.MStarID, f.ISIN, f.BrandingName, fcm.FundName, fcm.FundName_CN, f.InvestmentStrategy, ' \
' COALESCE(st.InvestmentStrategy_CHN, \'\') as InvestmentStrategy_CHN, ' \
' COALESCE(ftc.InvestmentStrategy_TCHN, \'\') as InvestmentStrategy_TCHN, ' \
' f.CategoryCode, f.BroadCategoryGroupID ' \
'from hkfundbasicinfo f ' \
' LEFT JOIN hkfundchineseinvestmentstrategy st on f.mstarId = st.mstarId, ' \
' hkfundchinesenamemapping fcm, hkfundtraditionalchinese ftc ' \
'where ' \
' f.mstarId = fcm.mstarId ' \
' and f.mstarId = ftc.mstarId '
cursor.execute(sql)
query_result = cursor.fetchall()
finally:
conn.close()
result = {}
#return dict((isin, {'mStarId': mStarId, 'strategy': strategy, 'mstar_category': mstar_category, 'branding': branding}) for isin, mStarId, strategy, mstar_category, branding in result)
for item in query_result:
result.setdefault(item['ISIN'], item)
return result
def get_fnz_funds():
logger.info('Getting FNZ Fund from ' + fnz_fund_api)
#url = 'https://distributionserviceofsu36.fnz.com/api/distribution/v3/funds?ProductCode='+arg_instcode
headers = {}
headers['Content-Type'] = 'application/json'
headers['X-ApplicationName'] = '1'
headers['X-UserContext'] = 'UserId=436476' # or UserId = 436477
headers['Accept'] = 'application/json'
req = requests.get(url=fnz_fund_api, headers=headers)
r = req.json()
if int(r['TotalNumberOfResults']) > 0:
return r['PageOfResults'] # from a list of dictionary
else:
return []
def | (isin_map):
logger.info('Inserting into mutual fund database')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_name_sql = "REPLACE INTO mstar_fund_name(mstar_id, name, name_cn) values(%s, %s, %s)"
mf_sql = "REPLACE INTO mutual_fund (name, product_id, isin, fnz_code, currency, inception_date, fund_size, investment_strategy, investment_strategy_cn, investment_strategy_tw, domicile, div_type, " \
"asset_class, sub_asset_class, branding, company_name, primary_index, mstar_id, mstar_category_code, mstar_broad_category_group_id, buyable) " \
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
charge_sql = "REPLACE INTO fund_charge (isin, initial_inv_amt, initial_charge, annual_charge, subscription_cost, redemption_cost) VALUES(%s, %s, %s, %s, %s, %s)"
performance_sql = "REPLACE INTO fnz_fund_performance_history(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
performance_sql_2 = "REPLACE INTO fnz_fund_performance(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
non_tradable_sql = 'UPDATE mutual_fund set buyable = 0 where product_id = %s'
all_funds = get_fnz_funds()
num_of_funds = len(all_funds)
logger.info('# of funds fetched from FNZ: ' + str(num_of_funds))
fund_name_stmt = []
mutual_fund_stmt = []
fund_charges_stmt = []
performance_stmt = []
non_tradable_funds_stmt = []
for row in all_funds:
sellable = row['Sellable']
if sellable != True:
product_id = row.get('ProductId', '')
non_tradable_funds_stmt.append((product_id))
continue
isin = row['FundIdentifiers'].get('Isin','')
if isin in isin_map:
branding = isin_map[isin]['BrandingName']
mStarId = isin_map[isin]['MStarID']
name = isin_map[isin]['FundName']
name_cn = isin_map[isin]['FundName_CN']
strategy = isin_map[isin]['InvestmentStrategy']
strategy_cn = '' if isin_map[isin]['InvestmentStrategy_CHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_CHN']
strategy_tw = '' if isin_map[isin]['InvestmentStrategy_TCHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_TCHN']
mstar_category = isin_map[isin]['CategoryCode']
mstar_broad_category_group_id = isin_map[isin]['BroadCategoryGroupID']
else:
if isin is not None and row['Sellable']:
log_msg = 'missing isin [%s], buyable [%s] description [%s] in morningStar' % (isin, row['Sellable'], row['Description'])
logger.info(log_msg)
continue
pricing_date = row['Price']['PricingDate']
price = "{0:.2f}".format(row['Price']['Value'].get('Value', 0.0))
fnz_code = row['FundIdentifiers'].get('ProductCode','')
#name = row.get('Name','')
product_id = row.get('ProductId','')
logger.info('Processing product with id %s' % product_id)
inception_date = row.get('LaunchDate','')
fund_size = row.get('FundSize', 0.0)
#strategy = row.get('Description','')
asset_class = row.get('AssetClass','')
sub_asset_class = row.get('AssetSubClass','')
buyable = row.get('Sellable','')
fund_manager_detail = row.get('FundManagerDetails', None)
if isinstance(fund_manager_detail, dict):
company = row['FundManagerDetails'].get('CompanyName', '')
else:
company = ''
primary_index_obj = row.get('PrimaryIndex', None)
if isinstance(primary_index_obj, dict):
primary_index = row['PrimaryIndex'].get('Name', '')
else:
primary_index = ''
domicile = row.get('Domicile','')
currency = row.get('Currency','')
div_type = row.get('DividendReinvestment','')
min_initial_inv_amt = str(row.get('MinInitialInvestmentAmount', ''))
charges = row.get('Charges', None)
if isinstance(charges, dict):
initial_charge = str(row['Charges']['InitialCharge'].get('Value', ''))
annual_management_charge = str(row['Charges']['AnualManagementCharge'].get('Value', ''))
subscription_charge = str(row['Charges']['SubScriptionCost'].get('Value', ''))
redemption_charge = str(row['Charges']['RedemptionCost'].get('Value', ''))
else:
initial_charge = ''
annual_management_charge = ''
subscription_charge = ''
redemption_charge = ''
#Performance
ytd = row['HistoricalPerformance'].get('YearToDate', 0.0)
three_m = row['HistoricalPerformance'].get('ThreeMonth', 0.0)
one_y = row['HistoricalPerformance'].get('OneYear', 0.0)
two_y = row['HistoricalPerformance'].get('TwoYear', 0.0)
three_y = row['HistoricalPerformance'].get('ThreeYear', 0.0)
five_y = row['HistoricalPerformance'].get('FiveYear', 0.0)
ytd = '' if ytd is None else "{0:.5f}".format(ytd)
three_m = '' if three_m is None else "{0:.5f}".format(three_m)
one_y = '' if one_y is None else "{0:.5f}".format(one_y)
two_y = '' if two_y is None else "{0:.5f}".format(two_y)
three_y = '' if three_y is None else "{0:.5f}".format(three_y)
five_y = '' if five_y is None else "{0:.5f}".format(five_y)
fund_name_stmt.append((mStarId, name, name_cn))
mutual_fund_stmt.append((name, product_id, isin, fnz_code, currency, inception_date, fund_size,
strategy, strategy_cn, strategy_tw, domicile, div_type, asset_class, sub_asset_class,
branding, company, primary_index, mStarId, mstar_category, mstar_broad_category_group_id, buyable))
fund_charges_stmt.append((isin, min_initial_inv_amt, initial_charge, annual_management_charge, subscription_charge, redemption_charge))
performance_stmt.append((isin, pricing_date, currency, price, ytd, three_m, one_y, two_y, three_y, five_y))
for product_id in non_tradable_funds_stmt:
log_msg = 'Skipping non tradable product id: %s' % product_id
logger.info(log_msg)
logger.info('Executing insert sql')
cursor.executemany(fund_name_sql, fund_name_stmt)
cursor.executemany(mf_sql, mutual_fund_stmt)
cursor.executemany(charge_sql, fund_charges_stmt)
cursor.executemany(performance_sql, performance_stmt)
cursor.executemany(performance_sql_2, performance_stmt)
cursor.executemany(non_tradable_sql, non_tradable_funds_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def extract_brandings(isin_map):
all_funds = list(isin_map.values())
# remove duplicates
brandings = set(fund['BrandingName'].strip() for fund in all_funds)
# filter ''
brandings = [brand for brand in brandings if brand.strip()]
return brandings
def insert_fund_branding(brandings):
logger.info('Inserting into fund_branding')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_branding_sql = "INSERT INTO fund_branding values(%s, '', '') ON DUPLICATE KEY UPDATE name = %s"
fund_branding_stmt = []
for branding in brandings:
fund_branding_stmt.append((branding, branding))
cursor.executemany(fund_branding_sql, fund_branding_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def read_fnz_fund_list():
isin_map = read_from_ms_database()
brandings = extract_brandings(isin_map)
insert_fund_branding(brandings)
insert_mutual_funds(isin_map)
if __name__ == '__main__':
read_fnz_fund_list()
logger.info('Finished updating database using FNZ api') | insert_mutual_funds | identifier_name |
fetch_fnz_mf.py | import requests
import datetime
from dbConfig import *
import logging
FORMAT = "[f360 fetch_fnz_mf] %(asctime)s - [%(funcName)s()] %(message)s"
logging.basicConfig(format=FORMAT, level=runtime_cfg.LOGLEVEL)
logger = logging.getLogger(__name__)
def read_from_ms_database():
logger.info('Reading MS database for basic fund info')
conn = get_ms_db_conn()
try:
with conn.cursor(pymysql.cursors.DictCursor) as cursor:
sql = 'SELECT f.MStarID, f.ISIN, f.BrandingName, fcm.FundName, fcm.FundName_CN, f.InvestmentStrategy, ' \
' COALESCE(st.InvestmentStrategy_CHN, \'\') as InvestmentStrategy_CHN, ' \
' COALESCE(ftc.InvestmentStrategy_TCHN, \'\') as InvestmentStrategy_TCHN, ' \
' f.CategoryCode, f.BroadCategoryGroupID ' \
'from hkfundbasicinfo f ' \
' LEFT JOIN hkfundchineseinvestmentstrategy st on f.mstarId = st.mstarId, ' \
' hkfundchinesenamemapping fcm, hkfundtraditionalchinese ftc ' \
'where ' \
' f.mstarId = fcm.mstarId ' \
' and f.mstarId = ftc.mstarId '
cursor.execute(sql)
query_result = cursor.fetchall()
finally:
conn.close()
result = {}
#return dict((isin, {'mStarId': mStarId, 'strategy': strategy, 'mstar_category': mstar_category, 'branding': branding}) for isin, mStarId, strategy, mstar_category, branding in result)
for item in query_result:
result.setdefault(item['ISIN'], item)
return result
def get_fnz_funds():
logger.info('Getting FNZ Fund from ' + fnz_fund_api)
#url = 'https://distributionserviceofsu36.fnz.com/api/distribution/v3/funds?ProductCode='+arg_instcode
headers = {}
headers['Content-Type'] = 'application/json'
headers['X-ApplicationName'] = '1'
headers['X-UserContext'] = 'UserId=436476' # or UserId = 436477
headers['Accept'] = 'application/json'
req = requests.get(url=fnz_fund_api, headers=headers)
r = req.json()
if int(r['TotalNumberOfResults']) > 0:
|
else:
return []
def insert_mutual_funds(isin_map):
logger.info('Inserting into mutual fund database')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_name_sql = "REPLACE INTO mstar_fund_name(mstar_id, name, name_cn) values(%s, %s, %s)"
mf_sql = "REPLACE INTO mutual_fund (name, product_id, isin, fnz_code, currency, inception_date, fund_size, investment_strategy, investment_strategy_cn, investment_strategy_tw, domicile, div_type, " \
"asset_class, sub_asset_class, branding, company_name, primary_index, mstar_id, mstar_category_code, mstar_broad_category_group_id, buyable) " \
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
charge_sql = "REPLACE INTO fund_charge (isin, initial_inv_amt, initial_charge, annual_charge, subscription_cost, redemption_cost) VALUES(%s, %s, %s, %s, %s, %s)"
performance_sql = "REPLACE INTO fnz_fund_performance_history(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
performance_sql_2 = "REPLACE INTO fnz_fund_performance(isin, pricing_date, currency, price, ytd, 3m, 1y, 2y, 3y, 5y) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
non_tradable_sql = 'UPDATE mutual_fund set buyable = 0 where product_id = %s'
all_funds = get_fnz_funds()
num_of_funds = len(all_funds)
logger.info('# of funds fetched from FNZ: ' + str(num_of_funds))
fund_name_stmt = []
mutual_fund_stmt = []
fund_charges_stmt = []
performance_stmt = []
non_tradable_funds_stmt = []
for row in all_funds:
sellable = row['Sellable']
if sellable != True:
product_id = row.get('ProductId', '')
non_tradable_funds_stmt.append((product_id))
continue
isin = row['FundIdentifiers'].get('Isin','')
if isin in isin_map:
branding = isin_map[isin]['BrandingName']
mStarId = isin_map[isin]['MStarID']
name = isin_map[isin]['FundName']
name_cn = isin_map[isin]['FundName_CN']
strategy = isin_map[isin]['InvestmentStrategy']
strategy_cn = '' if isin_map[isin]['InvestmentStrategy_CHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_CHN']
strategy_tw = '' if isin_map[isin]['InvestmentStrategy_TCHN'] == 'NULL' else isin_map[isin]['InvestmentStrategy_TCHN']
mstar_category = isin_map[isin]['CategoryCode']
mstar_broad_category_group_id = isin_map[isin]['BroadCategoryGroupID']
else:
if isin is not None and row['Sellable']:
log_msg = 'missing isin [%s], buyable [%s] description [%s] in morningStar' % (isin, row['Sellable'], row['Description'])
logger.info(log_msg)
continue
pricing_date = row['Price']['PricingDate']
price = "{0:.2f}".format(row['Price']['Value'].get('Value', 0.0))
fnz_code = row['FundIdentifiers'].get('ProductCode','')
#name = row.get('Name','')
product_id = row.get('ProductId','')
logger.info('Processing product with id %s' % product_id)
inception_date = row.get('LaunchDate','')
fund_size = row.get('FundSize', 0.0)
#strategy = row.get('Description','')
asset_class = row.get('AssetClass','')
sub_asset_class = row.get('AssetSubClass','')
buyable = row.get('Sellable','')
fund_manager_detail = row.get('FundManagerDetails', None)
if isinstance(fund_manager_detail, dict):
company = row['FundManagerDetails'].get('CompanyName', '')
else:
company = ''
primary_index_obj = row.get('PrimaryIndex', None)
if isinstance(primary_index_obj, dict):
primary_index = row['PrimaryIndex'].get('Name', '')
else:
primary_index = ''
domicile = row.get('Domicile','')
currency = row.get('Currency','')
div_type = row.get('DividendReinvestment','')
min_initial_inv_amt = str(row.get('MinInitialInvestmentAmount', ''))
charges = row.get('Charges', None)
if isinstance(charges, dict):
initial_charge = str(row['Charges']['InitialCharge'].get('Value', ''))
annual_management_charge = str(row['Charges']['AnualManagementCharge'].get('Value', ''))
subscription_charge = str(row['Charges']['SubScriptionCost'].get('Value', ''))
redemption_charge = str(row['Charges']['RedemptionCost'].get('Value', ''))
else:
initial_charge = ''
annual_management_charge = ''
subscription_charge = ''
redemption_charge = ''
#Performance
ytd = row['HistoricalPerformance'].get('YearToDate', 0.0)
three_m = row['HistoricalPerformance'].get('ThreeMonth', 0.0)
one_y = row['HistoricalPerformance'].get('OneYear', 0.0)
two_y = row['HistoricalPerformance'].get('TwoYear', 0.0)
three_y = row['HistoricalPerformance'].get('ThreeYear', 0.0)
five_y = row['HistoricalPerformance'].get('FiveYear', 0.0)
ytd = '' if ytd is None else "{0:.5f}".format(ytd)
three_m = '' if three_m is None else "{0:.5f}".format(three_m)
one_y = '' if one_y is None else "{0:.5f}".format(one_y)
two_y = '' if two_y is None else "{0:.5f}".format(two_y)
three_y = '' if three_y is None else "{0:.5f}".format(three_y)
five_y = '' if five_y is None else "{0:.5f}".format(five_y)
fund_name_stmt.append((mStarId, name, name_cn))
mutual_fund_stmt.append((name, product_id, isin, fnz_code, currency, inception_date, fund_size,
strategy, strategy_cn, strategy_tw, domicile, div_type, asset_class, sub_asset_class,
branding, company, primary_index, mStarId, mstar_category, mstar_broad_category_group_id, buyable))
fund_charges_stmt.append((isin, min_initial_inv_amt, initial_charge, annual_management_charge, subscription_charge, redemption_charge))
performance_stmt.append((isin, pricing_date, currency, price, ytd, three_m, one_y, two_y, three_y, five_y))
for product_id in non_tradable_funds_stmt:
log_msg = 'Skipping non tradable product id: %s' % product_id
logger.info(log_msg)
logger.info('Executing insert sql')
cursor.executemany(fund_name_sql, fund_name_stmt)
cursor.executemany(mf_sql, mutual_fund_stmt)
cursor.executemany(charge_sql, fund_charges_stmt)
cursor.executemany(performance_sql, performance_stmt)
cursor.executemany(performance_sql_2, performance_stmt)
cursor.executemany(non_tradable_sql, non_tradable_funds_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def extract_brandings(isin_map):
all_funds = list(isin_map.values())
# remove duplicates
brandings = set(fund['BrandingName'].strip() for fund in all_funds)
# filter ''
brandings = [brand for brand in brandings if brand.strip()]
return brandings
def insert_fund_branding(brandings):
logger.info('Inserting into fund_branding')
conn = get_fund_db_conn()
cursor = conn.cursor()
fund_branding_sql = "INSERT INTO fund_branding values(%s, '', '') ON DUPLICATE KEY UPDATE name = %s"
fund_branding_stmt = []
for branding in brandings:
fund_branding_stmt.append((branding, branding))
cursor.executemany(fund_branding_sql, fund_branding_stmt)
logger.info('Commiting data into database')
conn.commit()
conn.close()
def read_fnz_fund_list():
isin_map = read_from_ms_database()
brandings = extract_brandings(isin_map)
insert_fund_branding(brandings)
insert_mutual_funds(isin_map)
if __name__ == '__main__':
read_fnz_fund_list()
logger.info('Finished updating database using FNZ api') | return r['PageOfResults'] # from a list of dictionary
| conditional_block |
bot.py | import os
import asyncio
from urllib.request import urlopen
from datetime import datetime
from random import choice, randint
import time
import json
import re
import discord
from discord.ext import commands
import youtube_dl
import requests
bot = commands.Bot(command_prefix="$", intents=discord.Intents().all(),
description="This is yet another discord bot.")
greet_options = ["Hey", "Hi", "Greetings", "Hello"]
coin = ["https://i.imgur.com/csSP4ce.jpg", "https://i.imgur.com/NSrQtWx.jpg"]
games = ["Valorant", "Minecraft", "Paladins"]
# Silence useless bug reports messages
youtube_dl.utils.bug_reports_message = lambda: ""
ytdl_options = {'format': 'bestaudio/best',
'outtmpl': '%(title)s-%(id)s.%(ext)s', # output file format
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
## Functions
def youtube_search(*query):
query_string = "+".join(list(query))
html_content = urlopen("https://www.youtube.com/results?search_query=" + query_string)
return re.search(r"\"\/watch\?v=(\S{11})\"", html_content.read().decode())[1]
## General Commands
@bot.command(name="ping", help="Check the bot latency")
async def ping(ctx: commands.Context): | await message.edit(content="Latency = {}ms".format(round(bot.latency * 1000)))
@bot.command(name="say", help="Make the bot say something")
async def say(ctx: commands.Context, *words: str):
await ctx.send(" ".join(list(words)))
@bot.command(name="quote", help="Get a random quote")
async def quote(ctx: commands.Context):
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
await ctx.send("\"{}\"\t~ {}".format(json_data[0]["q"], json_data[0]["a"]))
@bot.command(name="toss", help="Toss a coin")
async def toss(ctx: commands.Context):
embed = discord.Embed(color=discord.Color.blue())
url = choice(coin)
embed.set_image(url=url)
await ctx.send(embed=embed)
@bot.command(name="info", help="View relevant info about the server")
async def info(ctx: commands.Context):
embed = discord.Embed(title="{}".format(ctx.guild.name),
timestamp=datetime.utcnow(),
color=discord.Color.blue())
embed.add_field(name="Server created at", value="{}".format(ctx.guild.created_at.strftime("%m-%d-%Y %H:%M")))
embed.add_field(name="Server Owner", value="{}".format(ctx.guild.owner))
embed.add_field(name="Server Region", value="{}".format(ctx.guild.region))
embed.add_field(name="Total Members", value="{}".format(ctx.guild.member_count))
embed.add_field(name="Server ID", value="{}".format(ctx.guild.id))
embed.set_thumbnail(url="{}".format(ctx.guild.icon_url))
await ctx.send(embed=embed)
class Greetings(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self._last_member = None
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
channel = member.guild.system_channel
if channel is not None:
await channel.send('Welcome {0.mention}.'.format(member))
@commands.command(name='hello', help='Say hello to the bot')
async def hello(self, ctx: commands.Context, *, member: discord.Member = None):
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send(choice(greet_options) + ' {0.name}!'.format(member))
else:
await ctx.send(choice(greet_options) + ' {0.name}... Hmm this feels familiar.'.format(member))
self._last_member = member
class Math(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="add", help="Add two numbers")
async def _add(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 + num2))
@commands.command(name="sub", help="Subtract two numbers")
async def _sub(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 - num2))
@commands.command(name="mul", help="Multiply two numbers")
async def _mul(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 * num2))
@commands.command(name="div", help="Divide two numbers")
async def _div(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 / num2))
@commands.command(name="mod", help="Remainder of two numbers")
async def _mod(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 % num2))
@commands.command(name="rand", help="Get a random number between two numbers")
async def _rand(self, ctx: commands.Context, num1: int, num2: int):
await ctx.send("{}".format(randint(num1, num2)))
class Search(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="youtube", help="Search youtube and get the result")
async def youtube(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.youtube.com/watch?v=" + youtube_search(*query))
@commands.command(name="mal", help="Search an anime in My Anime List")
async def mal(self, ctx: commands.Context, *query):
query_string = "%20".join(list(query))
html_content = requests.get("https://myanimelist.net/search/all?q={}&cat=anime".format(query_string))
url = re.search(r"href=\"(https:\/\/myanimelist\.net\/anime\/[0-9]+\/[_a-zA-Z0-9\-]+?)\"", html_content.text)[1]
await ctx.send(url)
@commands.command(name="wiki", help="Get wikipedia search url")
async def wikipidea(self, ctx: commands.Context, *query: str):
await ctx.send("https://en.wikipedia.org/wiki/" + "_".join(list(query)))
@commands.command(name="google", help="Get google search url")
async def google(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.google.com/search?q=" + "+".join(list(query)))
class Meme(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="template", help="Get a meme template")
async def meme_template(self, ctx: commands.Context):
response = requests.get("https://api.imgflip.com/get_memes")
json_data = json.loads(response.text)
memes_list = json_data["data"]["memes"]
index = randint(0, len(memes_list) - 1)
await ctx.send("{}".format(memes_list[index]["url"]))
@commands.command(name="meme", help="Get a random meme")
async def meme(self, ctx: commands.Context, arg: str=None):
response = requests.get("https://meme-api.herokuapp.com/gimme")
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["url"]))
@commands.command(name="gif", help="Get a random GIF or search one by query")
async def gif(self, ctx: commands.Context, *query):
url_prefix = "https://api.giphy.com/v1/gifs"
if query == ():
url = "{}/random?api_key={}&tag=&rating=r".format(url_prefix, os.environ["giphy_api_key"])
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"]["url"]))
else:
url = "{}/search?api_key={}&q={}&limit=25&rating=g&lang=en".format(url_prefix,
os.environ["giphy_api_key"],
"%20".join(list(query)))
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"][randint(0, 24)]["url"]))
## Music Player
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.75):
super().__init__(source, volume)
self.data = data
self.title = data.get("title")
self.url = data.get("url")
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if "entries" in data:
# take first item from a playlist
data = data["entries"][0]
filename = data["url"] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='join', help='Add bot to the voice channel')
async def join(self, ctx: commands.Context):
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@commands.command(name='play', help='Play a song')
async def play(self, ctx: commands.Context, *query):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
if voice_client.is_playing():
await voice_client.stop()
url = "https://www.youtube.com/watch?v=" + youtube_search(*query)
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop)
voice_client.play(player, after=lambda e: print('Player error: {}'.format(e)) if e else None)
await ctx.send('Now playing: {}'.format(player.title))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='volume', help='Change the volume')
async def volume(self, ctx: commands.Context, volume: int):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='pause', help='Pause the song')
async def pause(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.pause()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='resume', help='Resumes the song')
async def resume(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
await voice_client.resume()
elif voice_client.is_playing():
await ctx.send("The bot is already playing a song.")
else:
await ctx.send("The bot was not playing anything before this. Use play command.")
@commands.command(name='stop', help='Stops the song')
async def stop(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.stop()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='leave', help='Disconnect bot from the voice channel')
async def leave(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
await voice_client.disconnect()
## Events
@bot.event
async def on_ready():
await bot.change_presence(activity = discord.Game(choice(games)))
print("We have logged in as {0.user}".format(bot))
guild_data = dict()
for guild in bot.guilds:
for channel in guild.text_channels :
if str(guild.id) == "839625432043225148":
if str(channel) == "bot-stats":
myChannel = channel
elif str(channel) == "bot-notification":
await channel.send('Robot Activated...')
await channel.send(file=discord.File('robot.png'))
guild_data[str(guild.name)] = (guild.member_count, guild.members)
for guild in guild_data:
await myChannel.send('Bot active in {}, Member Count : {}'.format(guild, guild_data[guild][0]))
for member in guild_data[guild][1]:
await myChannel.send('{}'.format(member))
@bot.listen()
async def on_message(message):
if message.author == bot.user:
return
if __name__ == "__main__":
bot.add_cog(Greetings(bot))
bot.add_cog(Math(bot))
bot.add_cog(Search(bot))
bot.add_cog(Meme(bot))
bot.add_cog(Music(bot))
bot.run(os.environ["token"]) | message = await ctx.send("Pinging...")
time.sleep(0.5) | random_line_split |
bot.py | import os
import asyncio
from urllib.request import urlopen
from datetime import datetime
from random import choice, randint
import time
import json
import re
import discord
from discord.ext import commands
import youtube_dl
import requests
bot = commands.Bot(command_prefix="$", intents=discord.Intents().all(),
description="This is yet another discord bot.")
greet_options = ["Hey", "Hi", "Greetings", "Hello"]
coin = ["https://i.imgur.com/csSP4ce.jpg", "https://i.imgur.com/NSrQtWx.jpg"]
games = ["Valorant", "Minecraft", "Paladins"]
# Silence useless bug reports messages
youtube_dl.utils.bug_reports_message = lambda: ""
ytdl_options = {'format': 'bestaudio/best',
'outtmpl': '%(title)s-%(id)s.%(ext)s', # output file format
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
## Functions
def youtube_search(*query):
query_string = "+".join(list(query))
html_content = urlopen("https://www.youtube.com/results?search_query=" + query_string)
return re.search(r"\"\/watch\?v=(\S{11})\"", html_content.read().decode())[1]
## General Commands
@bot.command(name="ping", help="Check the bot latency")
async def ping(ctx: commands.Context):
message = await ctx.send("Pinging...")
time.sleep(0.5)
await message.edit(content="Latency = {}ms".format(round(bot.latency * 1000)))
@bot.command(name="say", help="Make the bot say something")
async def say(ctx: commands.Context, *words: str):
await ctx.send(" ".join(list(words)))
@bot.command(name="quote", help="Get a random quote")
async def quote(ctx: commands.Context):
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
await ctx.send("\"{}\"\t~ {}".format(json_data[0]["q"], json_data[0]["a"]))
@bot.command(name="toss", help="Toss a coin")
async def toss(ctx: commands.Context):
embed = discord.Embed(color=discord.Color.blue())
url = choice(coin)
embed.set_image(url=url)
await ctx.send(embed=embed)
@bot.command(name="info", help="View relevant info about the server")
async def info(ctx: commands.Context):
embed = discord.Embed(title="{}".format(ctx.guild.name),
timestamp=datetime.utcnow(),
color=discord.Color.blue())
embed.add_field(name="Server created at", value="{}".format(ctx.guild.created_at.strftime("%m-%d-%Y %H:%M")))
embed.add_field(name="Server Owner", value="{}".format(ctx.guild.owner))
embed.add_field(name="Server Region", value="{}".format(ctx.guild.region))
embed.add_field(name="Total Members", value="{}".format(ctx.guild.member_count))
embed.add_field(name="Server ID", value="{}".format(ctx.guild.id))
embed.set_thumbnail(url="{}".format(ctx.guild.icon_url))
await ctx.send(embed=embed)
class Greetings(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self._last_member = None
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
channel = member.guild.system_channel
if channel is not None:
await channel.send('Welcome {0.mention}.'.format(member))
@commands.command(name='hello', help='Say hello to the bot')
async def hello(self, ctx: commands.Context, *, member: discord.Member = None):
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send(choice(greet_options) + ' {0.name}!'.format(member))
else:
await ctx.send(choice(greet_options) + ' {0.name}... Hmm this feels familiar.'.format(member))
self._last_member = member
class Math(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="add", help="Add two numbers")
async def _add(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 + num2))
@commands.command(name="sub", help="Subtract two numbers")
async def _sub(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 - num2))
@commands.command(name="mul", help="Multiply two numbers")
async def _mul(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 * num2))
@commands.command(name="div", help="Divide two numbers")
async def _div(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 / num2))
@commands.command(name="mod", help="Remainder of two numbers")
async def _mod(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 % num2))
@commands.command(name="rand", help="Get a random number between two numbers")
async def _rand(self, ctx: commands.Context, num1: int, num2: int):
await ctx.send("{}".format(randint(num1, num2)))
class Search(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="youtube", help="Search youtube and get the result")
async def youtube(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.youtube.com/watch?v=" + youtube_search(*query))
@commands.command(name="mal", help="Search an anime in My Anime List")
async def mal(self, ctx: commands.Context, *query):
query_string = "%20".join(list(query))
html_content = requests.get("https://myanimelist.net/search/all?q={}&cat=anime".format(query_string))
url = re.search(r"href=\"(https:\/\/myanimelist\.net\/anime\/[0-9]+\/[_a-zA-Z0-9\-]+?)\"", html_content.text)[1]
await ctx.send(url)
@commands.command(name="wiki", help="Get wikipedia search url")
async def wikipidea(self, ctx: commands.Context, *query: str):
await ctx.send("https://en.wikipedia.org/wiki/" + "_".join(list(query)))
@commands.command(name="google", help="Get google search url")
async def google(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.google.com/search?q=" + "+".join(list(query)))
class Meme(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="template", help="Get a meme template")
async def meme_template(self, ctx: commands.Context):
response = requests.get("https://api.imgflip.com/get_memes")
json_data = json.loads(response.text)
memes_list = json_data["data"]["memes"]
index = randint(0, len(memes_list) - 1)
await ctx.send("{}".format(memes_list[index]["url"]))
@commands.command(name="meme", help="Get a random meme")
async def meme(self, ctx: commands.Context, arg: str=None):
|
@commands.command(name="gif", help="Get a random GIF or search one by query")
async def gif(self, ctx: commands.Context, *query):
url_prefix = "https://api.giphy.com/v1/gifs"
if query == ():
url = "{}/random?api_key={}&tag=&rating=r".format(url_prefix, os.environ["giphy_api_key"])
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"]["url"]))
else:
url = "{}/search?api_key={}&q={}&limit=25&rating=g&lang=en".format(url_prefix,
os.environ["giphy_api_key"],
"%20".join(list(query)))
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"][randint(0, 24)]["url"]))
## Music Player
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.75):
super().__init__(source, volume)
self.data = data
self.title = data.get("title")
self.url = data.get("url")
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if "entries" in data:
# take first item from a playlist
data = data["entries"][0]
filename = data["url"] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='join', help='Add bot to the voice channel')
async def join(self, ctx: commands.Context):
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@commands.command(name='play', help='Play a song')
async def play(self, ctx: commands.Context, *query):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
if voice_client.is_playing():
await voice_client.stop()
url = "https://www.youtube.com/watch?v=" + youtube_search(*query)
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop)
voice_client.play(player, after=lambda e: print('Player error: {}'.format(e)) if e else None)
await ctx.send('Now playing: {}'.format(player.title))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='volume', help='Change the volume')
async def volume(self, ctx: commands.Context, volume: int):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='pause', help='Pause the song')
async def pause(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.pause()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='resume', help='Resumes the song')
async def resume(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
await voice_client.resume()
elif voice_client.is_playing():
await ctx.send("The bot is already playing a song.")
else:
await ctx.send("The bot was not playing anything before this. Use play command.")
@commands.command(name='stop', help='Stops the song')
async def stop(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.stop()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='leave', help='Disconnect bot from the voice channel')
async def leave(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
await voice_client.disconnect()
## Events
@bot.event
async def on_ready():
await bot.change_presence(activity = discord.Game(choice(games)))
print("We have logged in as {0.user}".format(bot))
guild_data = dict()
for guild in bot.guilds:
for channel in guild.text_channels :
if str(guild.id) == "839625432043225148":
if str(channel) == "bot-stats":
myChannel = channel
elif str(channel) == "bot-notification":
await channel.send('Robot Activated...')
await channel.send(file=discord.File('robot.png'))
guild_data[str(guild.name)] = (guild.member_count, guild.members)
for guild in guild_data:
await myChannel.send('Bot active in {}, Member Count : {}'.format(guild, guild_data[guild][0]))
for member in guild_data[guild][1]:
await myChannel.send('{}'.format(member))
@bot.listen()
async def on_message(message):
if message.author == bot.user:
return
if __name__ == "__main__":
bot.add_cog(Greetings(bot))
bot.add_cog(Math(bot))
bot.add_cog(Search(bot))
bot.add_cog(Meme(bot))
bot.add_cog(Music(bot))
bot.run(os.environ["token"])
| response = requests.get("https://meme-api.herokuapp.com/gimme")
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["url"])) | identifier_body |
bot.py | import os
import asyncio
from urllib.request import urlopen
from datetime import datetime
from random import choice, randint
import time
import json
import re
import discord
from discord.ext import commands
import youtube_dl
import requests
bot = commands.Bot(command_prefix="$", intents=discord.Intents().all(),
description="This is yet another discord bot.")
greet_options = ["Hey", "Hi", "Greetings", "Hello"]
coin = ["https://i.imgur.com/csSP4ce.jpg", "https://i.imgur.com/NSrQtWx.jpg"]
games = ["Valorant", "Minecraft", "Paladins"]
# Silence useless bug reports messages
youtube_dl.utils.bug_reports_message = lambda: ""
ytdl_options = {'format': 'bestaudio/best',
'outtmpl': '%(title)s-%(id)s.%(ext)s', # output file format
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
## Functions
def youtube_search(*query):
query_string = "+".join(list(query))
html_content = urlopen("https://www.youtube.com/results?search_query=" + query_string)
return re.search(r"\"\/watch\?v=(\S{11})\"", html_content.read().decode())[1]
## General Commands
@bot.command(name="ping", help="Check the bot latency")
async def ping(ctx: commands.Context):
message = await ctx.send("Pinging...")
time.sleep(0.5)
await message.edit(content="Latency = {}ms".format(round(bot.latency * 1000)))
@bot.command(name="say", help="Make the bot say something")
async def say(ctx: commands.Context, *words: str):
await ctx.send(" ".join(list(words)))
@bot.command(name="quote", help="Get a random quote")
async def quote(ctx: commands.Context):
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
await ctx.send("\"{}\"\t~ {}".format(json_data[0]["q"], json_data[0]["a"]))
@bot.command(name="toss", help="Toss a coin")
async def toss(ctx: commands.Context):
embed = discord.Embed(color=discord.Color.blue())
url = choice(coin)
embed.set_image(url=url)
await ctx.send(embed=embed)
@bot.command(name="info", help="View relevant info about the server")
async def info(ctx: commands.Context):
embed = discord.Embed(title="{}".format(ctx.guild.name),
timestamp=datetime.utcnow(),
color=discord.Color.blue())
embed.add_field(name="Server created at", value="{}".format(ctx.guild.created_at.strftime("%m-%d-%Y %H:%M")))
embed.add_field(name="Server Owner", value="{}".format(ctx.guild.owner))
embed.add_field(name="Server Region", value="{}".format(ctx.guild.region))
embed.add_field(name="Total Members", value="{}".format(ctx.guild.member_count))
embed.add_field(name="Server ID", value="{}".format(ctx.guild.id))
embed.set_thumbnail(url="{}".format(ctx.guild.icon_url))
await ctx.send(embed=embed)
class Greetings(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self._last_member = None
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
channel = member.guild.system_channel
if channel is not None:
|
@commands.command(name='hello', help='Say hello to the bot')
async def hello(self, ctx: commands.Context, *, member: discord.Member = None):
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send(choice(greet_options) + ' {0.name}!'.format(member))
else:
await ctx.send(choice(greet_options) + ' {0.name}... Hmm this feels familiar.'.format(member))
self._last_member = member
class Math(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="add", help="Add two numbers")
async def _add(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 + num2))
@commands.command(name="sub", help="Subtract two numbers")
async def _sub(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 - num2))
@commands.command(name="mul", help="Multiply two numbers")
async def _mul(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 * num2))
@commands.command(name="div", help="Divide two numbers")
async def _div(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 / num2))
@commands.command(name="mod", help="Remainder of two numbers")
async def _mod(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 % num2))
@commands.command(name="rand", help="Get a random number between two numbers")
async def _rand(self, ctx: commands.Context, num1: int, num2: int):
await ctx.send("{}".format(randint(num1, num2)))
class Search(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="youtube", help="Search youtube and get the result")
async def youtube(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.youtube.com/watch?v=" + youtube_search(*query))
@commands.command(name="mal", help="Search an anime in My Anime List")
async def mal(self, ctx: commands.Context, *query):
query_string = "%20".join(list(query))
html_content = requests.get("https://myanimelist.net/search/all?q={}&cat=anime".format(query_string))
url = re.search(r"href=\"(https:\/\/myanimelist\.net\/anime\/[0-9]+\/[_a-zA-Z0-9\-]+?)\"", html_content.text)[1]
await ctx.send(url)
@commands.command(name="wiki", help="Get wikipedia search url")
async def wikipidea(self, ctx: commands.Context, *query: str):
await ctx.send("https://en.wikipedia.org/wiki/" + "_".join(list(query)))
@commands.command(name="google", help="Get google search url")
async def google(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.google.com/search?q=" + "+".join(list(query)))
class Meme(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="template", help="Get a meme template")
async def meme_template(self, ctx: commands.Context):
response = requests.get("https://api.imgflip.com/get_memes")
json_data = json.loads(response.text)
memes_list = json_data["data"]["memes"]
index = randint(0, len(memes_list) - 1)
await ctx.send("{}".format(memes_list[index]["url"]))
@commands.command(name="meme", help="Get a random meme")
async def meme(self, ctx: commands.Context, arg: str=None):
response = requests.get("https://meme-api.herokuapp.com/gimme")
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["url"]))
@commands.command(name="gif", help="Get a random GIF or search one by query")
async def gif(self, ctx: commands.Context, *query):
url_prefix = "https://api.giphy.com/v1/gifs"
if query == ():
url = "{}/random?api_key={}&tag=&rating=r".format(url_prefix, os.environ["giphy_api_key"])
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"]["url"]))
else:
url = "{}/search?api_key={}&q={}&limit=25&rating=g&lang=en".format(url_prefix,
os.environ["giphy_api_key"],
"%20".join(list(query)))
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"][randint(0, 24)]["url"]))
## Music Player
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.75):
super().__init__(source, volume)
self.data = data
self.title = data.get("title")
self.url = data.get("url")
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if "entries" in data:
# take first item from a playlist
data = data["entries"][0]
filename = data["url"] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='join', help='Add bot to the voice channel')
async def join(self, ctx: commands.Context):
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@commands.command(name='play', help='Play a song')
async def play(self, ctx: commands.Context, *query):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
if voice_client.is_playing():
await voice_client.stop()
url = "https://www.youtube.com/watch?v=" + youtube_search(*query)
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop)
voice_client.play(player, after=lambda e: print('Player error: {}'.format(e)) if e else None)
await ctx.send('Now playing: {}'.format(player.title))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='volume', help='Change the volume')
async def volume(self, ctx: commands.Context, volume: int):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='pause', help='Pause the song')
async def pause(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.pause()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='resume', help='Resumes the song')
async def resume(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
await voice_client.resume()
elif voice_client.is_playing():
await ctx.send("The bot is already playing a song.")
else:
await ctx.send("The bot was not playing anything before this. Use play command.")
@commands.command(name='stop', help='Stops the song')
async def stop(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.stop()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='leave', help='Disconnect bot from the voice channel')
async def leave(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
await voice_client.disconnect()
## Events
@bot.event
async def on_ready():
await bot.change_presence(activity = discord.Game(choice(games)))
print("We have logged in as {0.user}".format(bot))
guild_data = dict()
for guild in bot.guilds:
for channel in guild.text_channels :
if str(guild.id) == "839625432043225148":
if str(channel) == "bot-stats":
myChannel = channel
elif str(channel) == "bot-notification":
await channel.send('Robot Activated...')
await channel.send(file=discord.File('robot.png'))
guild_data[str(guild.name)] = (guild.member_count, guild.members)
for guild in guild_data:
await myChannel.send('Bot active in {}, Member Count : {}'.format(guild, guild_data[guild][0]))
for member in guild_data[guild][1]:
await myChannel.send('{}'.format(member))
@bot.listen()
async def on_message(message):
if message.author == bot.user:
return
if __name__ == "__main__":
bot.add_cog(Greetings(bot))
bot.add_cog(Math(bot))
bot.add_cog(Search(bot))
bot.add_cog(Meme(bot))
bot.add_cog(Music(bot))
bot.run(os.environ["token"])
| await channel.send('Welcome {0.mention}.'.format(member)) | conditional_block |
bot.py | import os
import asyncio
from urllib.request import urlopen
from datetime import datetime
from random import choice, randint
import time
import json
import re
import discord
from discord.ext import commands
import youtube_dl
import requests
bot = commands.Bot(command_prefix="$", intents=discord.Intents().all(),
description="This is yet another discord bot.")
greet_options = ["Hey", "Hi", "Greetings", "Hello"]
coin = ["https://i.imgur.com/csSP4ce.jpg", "https://i.imgur.com/NSrQtWx.jpg"]
games = ["Valorant", "Minecraft", "Paladins"]
# Silence useless bug reports messages
youtube_dl.utils.bug_reports_message = lambda: ""
ytdl_options = {'format': 'bestaudio/best',
'outtmpl': '%(title)s-%(id)s.%(ext)s', # output file format
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_options)
## Functions
def youtube_search(*query):
query_string = "+".join(list(query))
html_content = urlopen("https://www.youtube.com/results?search_query=" + query_string)
return re.search(r"\"\/watch\?v=(\S{11})\"", html_content.read().decode())[1]
## General Commands
@bot.command(name="ping", help="Check the bot latency")
async def ping(ctx: commands.Context):
message = await ctx.send("Pinging...")
time.sleep(0.5)
await message.edit(content="Latency = {}ms".format(round(bot.latency * 1000)))
@bot.command(name="say", help="Make the bot say something")
async def | (ctx: commands.Context, *words: str):
await ctx.send(" ".join(list(words)))
@bot.command(name="quote", help="Get a random quote")
async def quote(ctx: commands.Context):
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
await ctx.send("\"{}\"\t~ {}".format(json_data[0]["q"], json_data[0]["a"]))
@bot.command(name="toss", help="Toss a coin")
async def toss(ctx: commands.Context):
embed = discord.Embed(color=discord.Color.blue())
url = choice(coin)
embed.set_image(url=url)
await ctx.send(embed=embed)
@bot.command(name="info", help="View relevant info about the server")
async def info(ctx: commands.Context):
embed = discord.Embed(title="{}".format(ctx.guild.name),
timestamp=datetime.utcnow(),
color=discord.Color.blue())
embed.add_field(name="Server created at", value="{}".format(ctx.guild.created_at.strftime("%m-%d-%Y %H:%M")))
embed.add_field(name="Server Owner", value="{}".format(ctx.guild.owner))
embed.add_field(name="Server Region", value="{}".format(ctx.guild.region))
embed.add_field(name="Total Members", value="{}".format(ctx.guild.member_count))
embed.add_field(name="Server ID", value="{}".format(ctx.guild.id))
embed.set_thumbnail(url="{}".format(ctx.guild.icon_url))
await ctx.send(embed=embed)
class Greetings(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self._last_member = None
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
channel = member.guild.system_channel
if channel is not None:
await channel.send('Welcome {0.mention}.'.format(member))
@commands.command(name='hello', help='Say hello to the bot')
async def hello(self, ctx: commands.Context, *, member: discord.Member = None):
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.send(choice(greet_options) + ' {0.name}!'.format(member))
else:
await ctx.send(choice(greet_options) + ' {0.name}... Hmm this feels familiar.'.format(member))
self._last_member = member
class Math(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="add", help="Add two numbers")
async def _add(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 + num2))
@commands.command(name="sub", help="Subtract two numbers")
async def _sub(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 - num2))
@commands.command(name="mul", help="Multiply two numbers")
async def _mul(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 * num2))
@commands.command(name="div", help="Divide two numbers")
async def _div(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 / num2))
@commands.command(name="mod", help="Remainder of two numbers")
async def _mod(self, ctx: commands.Context, num1: float, num2: float):
await ctx.send("{}".format(num1 % num2))
@commands.command(name="rand", help="Get a random number between two numbers")
async def _rand(self, ctx: commands.Context, num1: int, num2: int):
await ctx.send("{}".format(randint(num1, num2)))
class Search(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="youtube", help="Search youtube and get the result")
async def youtube(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.youtube.com/watch?v=" + youtube_search(*query))
@commands.command(name="mal", help="Search an anime in My Anime List")
async def mal(self, ctx: commands.Context, *query):
query_string = "%20".join(list(query))
html_content = requests.get("https://myanimelist.net/search/all?q={}&cat=anime".format(query_string))
url = re.search(r"href=\"(https:\/\/myanimelist\.net\/anime\/[0-9]+\/[_a-zA-Z0-9\-]+?)\"", html_content.text)[1]
await ctx.send(url)
@commands.command(name="wiki", help="Get wikipedia search url")
async def wikipidea(self, ctx: commands.Context, *query: str):
await ctx.send("https://en.wikipedia.org/wiki/" + "_".join(list(query)))
@commands.command(name="google", help="Get google search url")
async def google(self, ctx: commands.Context, *query: str):
await ctx.send("https://www.google.com/search?q=" + "+".join(list(query)))
class Meme(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name="template", help="Get a meme template")
async def meme_template(self, ctx: commands.Context):
response = requests.get("https://api.imgflip.com/get_memes")
json_data = json.loads(response.text)
memes_list = json_data["data"]["memes"]
index = randint(0, len(memes_list) - 1)
await ctx.send("{}".format(memes_list[index]["url"]))
@commands.command(name="meme", help="Get a random meme")
async def meme(self, ctx: commands.Context, arg: str=None):
response = requests.get("https://meme-api.herokuapp.com/gimme")
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["url"]))
@commands.command(name="gif", help="Get a random GIF or search one by query")
async def gif(self, ctx: commands.Context, *query):
url_prefix = "https://api.giphy.com/v1/gifs"
if query == ():
url = "{}/random?api_key={}&tag=&rating=r".format(url_prefix, os.environ["giphy_api_key"])
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"]["url"]))
else:
url = "{}/search?api_key={}&q={}&limit=25&rating=g&lang=en".format(url_prefix,
os.environ["giphy_api_key"],
"%20".join(list(query)))
response = requests.get(url)
json_data = json.loads(response.text)
await ctx.send("{}".format(json_data["data"][randint(0, 24)]["url"]))
## Music Player
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.75):
super().__init__(source, volume)
self.data = data
self.title = data.get("title")
self.url = data.get("url")
@classmethod
async def from_url(cls, url, *, loop=None, stream=False):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if "entries" in data:
# take first item from a playlist
data = data["entries"][0]
filename = data["url"] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data)
class Music(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command(name='join', help='Add bot to the voice channel')
async def join(self, ctx: commands.Context):
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@commands.command(name='play', help='Play a song')
async def play(self, ctx: commands.Context, *query):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
if voice_client.is_playing():
await voice_client.stop()
url = "https://www.youtube.com/watch?v=" + youtube_search(*query)
async with ctx.typing():
player = await YTDLSource.from_url(url, loop=self.bot.loop)
voice_client.play(player, after=lambda e: print('Player error: {}'.format(e)) if e else None)
await ctx.send('Now playing: {}'.format(player.title))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='volume', help='Change the volume')
async def volume(self, ctx: commands.Context, volume: int):
voice_client = ctx.message.guild.voice_client
if voice_client is not None:
voice_client.source.volume = volume / 100
await ctx.send("Changed volume to {}%".format(volume))
else:
await ctx.send("Not connected to a voice channel.")
@commands.command(name='pause', help='Pause the song')
async def pause(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.pause()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='resume', help='Resumes the song')
async def resume(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
await voice_client.resume()
elif voice_client.is_playing():
await ctx.send("The bot is already playing a song.")
else:
await ctx.send("The bot was not playing anything before this. Use play command.")
@commands.command(name='stop', help='Stops the song')
async def stop(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.stop()
else:
await ctx.send("The bot is not playing anything at the moment.")
@commands.command(name='leave', help='Disconnect bot from the voice channel')
async def leave(self, ctx: commands.Context):
voice_client = ctx.message.guild.voice_client
await voice_client.disconnect()
## Events
@bot.event
async def on_ready():
await bot.change_presence(activity = discord.Game(choice(games)))
print("We have logged in as {0.user}".format(bot))
guild_data = dict()
for guild in bot.guilds:
for channel in guild.text_channels :
if str(guild.id) == "839625432043225148":
if str(channel) == "bot-stats":
myChannel = channel
elif str(channel) == "bot-notification":
await channel.send('Robot Activated...')
await channel.send(file=discord.File('robot.png'))
guild_data[str(guild.name)] = (guild.member_count, guild.members)
for guild in guild_data:
await myChannel.send('Bot active in {}, Member Count : {}'.format(guild, guild_data[guild][0]))
for member in guild_data[guild][1]:
await myChannel.send('{}'.format(member))
@bot.listen()
async def on_message(message):
if message.author == bot.user:
return
if __name__ == "__main__":
bot.add_cog(Greetings(bot))
bot.add_cog(Math(bot))
bot.add_cog(Search(bot))
bot.add_cog(Meme(bot))
bot.add_cog(Music(bot))
bot.run(os.environ["token"])
| say | identifier_name |
mc-info.js | //
// Code for the Arachne Event Display
// Author: Nathaniel Tagg ntagg@otterbein.edu
//
// Licence: this code is free for non-commertial use. Note other licences may apply to 3rd-party code.
// Any use of this code must include attribution to Nathaniel Tagg at Otterbein University, but otherwise
// you're free to modify and use it as you like.
//
function overbar(a) { return "<span style='text-decoration: overline'>"+a+"</span>"; }
PdgCodes = [];
PdgCodes[ 1] = "d";
PdgCodes[ 2] = "u";
PdgCodes[ 3] = "s";
PdgCodes[ 4] = "c";
PdgCodes[ 5] = "b";
PdgCodes[ 6] = "t";
PdgCodes[ 7] = "b'";
PdgCodes[ 8] = "t'";
PdgCodes[11] = "e<sup>-</sup>"; PdgCodes[-11] = "e<sup>+</sup>";
PdgCodes[12] = "ν<sub>e</sub>"; PdgCodes[-12] = overbar("ν")+"<sub>e</sub>";
PdgCodes[13] = "μ<sup>-</sup>"; PdgCodes[-13] = "μ<sup>+</sup>";
PdgCodes[14] = "ν<sub>μ</sub>"; PdgCodes[-14] = overbar("ν")+"<sub>μ</sub>";
PdgCodes[15] = "τ<sup>-</sup>"; PdgCodes[-15] = "τ<sup>+</sup>";
PdgCodes[16] = "ν<sub>τ</sub>"; PdgCodes[-16] = overbar("ν")+"<sub>τ</sub>";
PdgCodes[21] = PdgCodes[-21] = "g";
PdgCodes[22] = PdgCodes[-22] = "γ";
PdgCodes[23] = PdgCodes[-23] = "Z<sup>0</sup>;";
PdgCodes[24] = "W<sup>+</sup>;"; PdgCodes[-24] = "W<sup>-</sup>;";
PdgCodes[211] = "π<sup>+</sup>"; PdgCodes[-211] = "π<sup>-</sup>";
PdgCodes[111] = "π<sup>0</sup>"; PdgCodes[-111] = overbar(PdgCodes[111]);
PdgCodes[321] = "K<sup>+</sup>"; PdgCodes[-321] = "K<sup>-</sup>";
PdgCodes[311] = "K<sup>0</sup>"; PdgCodes[-311] = overbar(PdgCodes[311]);
PdgCodes[310] = "K<sup>0</sup><sub>short</sub>"; PdgCodes[-310] = overbar(PdgCodes[310]);
PdgCodes[130] = "K<sup>0</sup><sub>long</sub>"; PdgCodes[-130] = overbar(PdgCodes[130]);
PdgCodes[221] = "η"; PdgCodes[-221] = overbar(PdgCodes[221]);
PdgCodes[2112] = "n"; PdgCodes[-2112] = overbar(PdgCodes[2112]);
PdgCodes[2212] = "p"; PdgCodes[-2212] = overbar(PdgCodes[2212]);
PdgCodes[3122] = "Λ"; PdgCodes[-3122] = overbar(PdgCodes[3122]);
PdgCodes[3112] = "Σ<sup>-</sup>";
PdgCodes[3222] = "Σ<sup>+</sup>";
PdgCodes[3212] = "Σ<sup>0</sup>";
PdgCodes[-3112] = overbar("Σ")+"<sup>+</sup>";
PdgCodes[-3222] = overbar("Σ")+"<sup>-</sup>";
PdgCodes[-3212] = overbar("Σ")+"<sup>0</sup>";
PdgCodes[3322] = "Ξ<sup>0</sup>"; PdgCodes[-3322] = overbar(PdgCodes[3322]);
PdgCodes[3312] = "Ξ<sup>-</sup>"; PdgCodes[-3312] = overbar(PdgCodes[3312]);
PdgCodes[3334] = "Ω<sup>-</sup>"; PdgCodes[-3334] = "Ω<sup>+</sup>";
ElementName = [];
ElementName[0 ] = "X";
ElementName[1 ] = "H";
ElementName[2 ] = "He";
ElementName[3 ] = "Li";
ElementName[4 ] = "Be";
ElementName[5 ] = "B";
ElementName[6 ] = "C";
ElementName[7 ] = "N";
ElementName[8 ] = "O";
ElementName[9 ] = "F";
ElementName[10 ] = "Ne";
ElementName[11 ] = "Na";
ElementName[12 ] = "Mg";
ElementName[13 ] = "Al";
ElementName[14 ] = "Si";
ElementName[15 ] = "P";
ElementName[16 ] = "S";
ElementName[17 ] = "Cl";
ElementName[18 ] = "Ar";
ElementName[19 ] = "K";
ElementName[20 ] = "Ca";
ElementName[21 ] = "Sc";
ElementName[22 ] = "Ti";
ElementName[23 ] = "V";
ElementName[24 ] = "Cr";
ElementName[25 ] = "Mn";
ElementName[26 ] = "Fe";
ElementName[27 ] = "Co";
ElementName[28 ] = "Ni";
ElementName[29 ] = "Cu";
ElementName[30 ] = "Zn";
ElementName[31 ] = "Ga";
ElementName[32 ] = "Ge";
ElementName[33 ] = "As";
ElementName[34 ] = "Se";
ElementName[35 ] = "Br";
ElementName[36 ] = "Kr";
ElementName[37 ] = "Rb";
ElementName[38 ] = "Sr";
ElementName[39 ] = "Y";
ElementName[40 ] = "Zr";
ElementName[41 ] = "Nb";
ElementName[42 ] = "Mo";
ElementName[43 ] = "Tc";
ElementName[44 ] = "Ru";
ElementName[45 ] = "Rh";
ElementName[46 ] = "Pd";
ElementName[47 ] = "Ag";
ElementName[48 ] = "Cd";
ElementName[49 ] = "In";
ElementName[50 ] = "Sn";
ElementName[51 ] = "Sb";
ElementName[52 ] = "Te";
ElementName[53 ] = "I";
ElementName[54 ] = "Xe";
ElementName[55 ] = "Cs";
ElementName[56 ] = "Ba";
ElementName[57 ] = "La";
ElementName[58 ] = "Ce";
ElementName[59 ] = "Pr";
ElementName[60 ] = "Nd";
ElementName[61 ] = "Pm";
ElementName[62 ] = "Sm";
ElementName[63 ] = "Eu";
ElementName[64 ] = "Gd";
ElementName[65 ] = "Tb";
ElementName[66 ] = "Dy";
ElementName[67 ] = "Ho";
ElementName[68 ] = "Er";
ElementName[69 ] = "Tm";
ElementName[70 ] = "Yb";
ElementName[71 ] = "Lu";
ElementName[72 ] = "Hf";
ElementName[73 ] = "Ta";
ElementName[74 ] = "W";
ElementName[75 ] = "Re";
ElementName[76 ] = "Os";
ElementName[77 ] = "Ir";
ElementName[78 ] = "Pt";
ElementName[79 ] = "Au";
ElementName[80 ] = "Hg";
ElementName[81 ] = "Tl";
ElementName[82 ] = "Pb";
ElementName[83 ] = "Bi";
ElementName[84 ] = "Po";
ElementName[85 ] = "At";
ElementName[86 ] = "Rn";
ElementName[87 ] = "Fr";
ElementName[88 ] = "Ra";
ElementName[89 ] = "Ac";
ElementName[90 ] = "Th";
ElementName[91 ] = "Pa";
ElementName[92 ] = "U";
ElementName[93 ] = "Np";
ElementName[94 ] = "Pu";
ElementName[95 ] = "Am";
ElementName[96 ] = "Cm";
ElementName[97 ] = "Bk";
ElementName[98 ] = "Cf";
ElementName[99 ] = "Es";
ElementName[100] = "Fm";
ElementName[101] = "Md";
ElementName[102] = "No";
ElementName[103] = "Lr";
ElementName[104] = "Rf";
ElementName[105] = "Db";
ElementName[106] = "Sg";
ElementName[107] = "Bh";
ElementName[108] = "Hs";
ElementName[109] = "Mt";
ChannelCode = [];
ChannelCode[0] = "No interaction";
ChannelCode[1] = "QE";
ChannelCode[2] = "Res";
ChannelCode[3] = "DIS";
ChannelCode[4] = "Coh π";
ChannelCode[5] = "AMNUGAMMA";
ChannelCode[6] = "Inv &\mu; decay";
ChannelCode[7] = "&\nu<sub>e</sub> elastic";
ChannelCode[8] = "Unknown";
CurrentCode = [];
CurrentCode[1] = "CC";
CurrentCode[2] = "NC";
function | (code)
{
///
/// Transform a PDG code into something printable.
///
var p = PdgCodes[code];
if(p) return p;
// googled from geant4 code, seems consistent with the picture.
// Nuclear codes are given as 10-digit numbers +-10LZZZAAAI.
//For a nucleus consisting of np protons and nn neutrons
// A = np + nn +nlambda and Z = np.
// L = nlambda
// I gives the isomer level, with I = 0 corresponding
// to the ground state and I >0 to excitations
if(code > 1000000000) {
var I = code%10;
var A = Math.floor(code/10)%1000;
var Z = Math.floor(code/10000)%1000;
var el = ElementName[Z];
if(el == undefined) el = Z;
var p = "<sup>"+A+"</sup>"+el;
if(I>0) p += "<sup>*</sup>";
return p;
}
console.log("Can't find lookup code for ",code);
return code;
}
function MCInfoDisplay( element )
{
// console.debug("MCInfoDisplay::ctor",element);
this.fElement = element;
$(".accordion",this.fElement).accordion({
collapsible: true
});
// gStateMachine.BindObj("mcChange",this,"Build");
gStateMachine.BindObj("gateChange",this,"Build");
}
MCInfoDisplay.prototype.Build = function()
{
// console.debug("MCInfoDisplay::Build()");
$(".mc-event-info",this.fElement).empty();
$(".accordion",this.fElement).empty();
$(".accordion",this.fElement).accordion("destroy");
var mc = gRecord.mc;
if(!mc) return;
$(".mc-event-info",this.fElement).html(
"MC event: "
+ mc.mc_run + "|"
+ mc.mc_subrun + "|"
+ mc.mc_spill
);
var h="";
var ints = mc.interactions;
for(var whichint=0;whichint<ints.length;whichint++) {
var inter = ints[whichint];
console.log(inter.index);
h += "<h3 interaction='" + inter.index+"'>";
h += "<a href='#' >Interaction " + inter.index + "</a></h3>";
h += "<div>";
//var incE = parseFloat($('incoming4p',inter).text().split(',')[3])/1000.;
var incE = inter.incomingE/1000;
h+="<span>" + incE.toFixed(3) + " GeV " + GetParticle(inter.incomingPDG) +"</span><br/>";
var channel = inter.channel;
var current = inter.current;
h+= "<span>" + CurrentCode[current] + "("+current+") / " + ChannelCode[channel] + "("+channel+")" + "</span>";
h += '<table border="0" class="mc-info" >';
var r1 = '<tr><td width="100px" class="mc-info-caption">';
var r2 = '</td><td class="mc-info-el">';
var r3 = '</td></tr>';
h+= r1 + "Target Nucleus" + r2 + "<sup>" + inter.targetA + "</sup>" + ElementName[inter.targetZ] + r3;
h+= r1 + "Target Nucleon" + r2 + "<sup>" + GetParticle(inter.tgtNucleon) + r3;
h+= r1 + "Process Type" + r2 + inter.processType + r3;
h+= r1 + "Inc Particle" + r2 + GetParticle(inter.incomingPDG) + r3;
h+= r1 + "Q<sup>2</sup>" + r2 + inter.QSquared/1e6 + " GeV<sup>2</sup>" + r3;
h+= r1 + "X" + r2 + inter.bjorkenX + r3;
h+= r1 + "Y" + r2 + inter.bjorkenY + r3;
h+= r1 + "W" + r2 + inter.W/1000 + r3;
vtx = inter.vtx;
h+= r1 + "Vertex (mm)" + r2 + "x:"+Math.round(vtx[0])+ "<br/>"
+ "y:"+Math.round(vtx[1])+ "<br/>"
+ "z:"+Math.round(vtx[2])
+ r3;
h += '</table>';
h += "Final State: <br/>";
var fss = inter.FSParticles;
h+= "<table border='0' class='mc-fs'>";
for(var j=0;j<fss.length;j++) {
var fs = fss[j];
var pdg = fs.Pdg;
var px = fs.Px;
var py = fs.Py
var pz = fs.Pz
var etot = fs.E;
var p2 = px*px + py*py + pz*pz;
var p = Math.sqrt(p2);
var m2 = etot*etot - p2;
var m = 0;
if(m2>0) m = Math.sqrt(m2);
var ke = etot - m;
h+="<tr>";
h+="<td class='mc-fs-particle'>"
+ GetParticle(pdg)
+ "</td><td class='mc-fs-energy'>"
+ '<div class="collapsible-title" revealed="false">'
+ "KE=" + ke.toFixed(1) + " MeV"
+ '</div>'
+ '<div class="collapsible">'
+ " p=" + p.toFixed(1) + " MeV/c<br/>"
+ " px=" + px.toFixed(1) + " MeV/c<br/>"
+ " py=" + py.toFixed(1) + " MeV/c<br/>"
+ " pz=" + pz.toFixed(1) + " MeV/c<br/>"
+ " E =" + etot.toFixed(1) + " MeV<br/>"
+ " θ: " + (Math.acos(pz/p)*180/Math.PI).toFixed(1) + "°<br/>"
+ " φ: " + (Math.atan2(py,px)*180/Math.PI).toFixed(1) + "°<br/>"
+ '</div>'
+ "</td>";
h+="</tr>";
}
h+= "</table>";
h += '</div>';
}
$(".accordion",this.fElement).html(h);
make_collapsibles(this.fElement);
console.log($(".accordion",this.fElement));
$(".accordion",this.fElement).accordion({
collapsible: true
})
.bind('accordionchange', function(event, ui) {
console.log("accordian selection: ",ui.newHeader.attr('interaction'));
gInteraction = ui.newHeader.attr('interaction');
gStateMachine.Trigger('selectedHitChange');
// alert("interaction " + $(ui.newHeader).attr('interaction')); // jQuery object, activated header
});
}
| GetParticle | identifier_name |
mc-info.js | //
// Code for the Arachne Event Display
// Author: Nathaniel Tagg ntagg@otterbein.edu
//
// Licence: this code is free for non-commertial use. Note other licences may apply to 3rd-party code.
// Any use of this code must include attribution to Nathaniel Tagg at Otterbein University, but otherwise
// you're free to modify and use it as you like.
//
function overbar(a) { return "<span style='text-decoration: overline'>"+a+"</span>"; }
PdgCodes = [];
PdgCodes[ 1] = "d";
PdgCodes[ 2] = "u";
PdgCodes[ 3] = "s";
PdgCodes[ 4] = "c";
PdgCodes[ 5] = "b";
PdgCodes[ 6] = "t";
PdgCodes[ 7] = "b'";
PdgCodes[ 8] = "t'";
PdgCodes[11] = "e<sup>-</sup>"; PdgCodes[-11] = "e<sup>+</sup>";
PdgCodes[12] = "ν<sub>e</sub>"; PdgCodes[-12] = overbar("ν")+"<sub>e</sub>";
PdgCodes[13] = "μ<sup>-</sup>"; PdgCodes[-13] = "μ<sup>+</sup>";
PdgCodes[14] = "ν<sub>μ</sub>"; PdgCodes[-14] = overbar("ν")+"<sub>μ</sub>";
PdgCodes[15] = "τ<sup>-</sup>"; PdgCodes[-15] = "τ<sup>+</sup>";
PdgCodes[16] = "ν<sub>τ</sub>"; PdgCodes[-16] = overbar("ν")+"<sub>τ</sub>";
PdgCodes[21] = PdgCodes[-21] = "g";
PdgCodes[22] = PdgCodes[-22] = "γ";
PdgCodes[23] = PdgCodes[-23] = "Z<sup>0</sup>;";
PdgCodes[24] = "W<sup>+</sup>;"; PdgCodes[-24] = "W<sup>-</sup>;";
PdgCodes[211] = "π<sup>+</sup>"; PdgCodes[-211] = "π<sup>-</sup>";
PdgCodes[111] = "π<sup>0</sup>"; PdgCodes[-111] = overbar(PdgCodes[111]);
PdgCodes[321] = "K<sup>+</sup>"; PdgCodes[-321] = "K<sup>-</sup>";
PdgCodes[311] = "K<sup>0</sup>"; PdgCodes[-311] = overbar(PdgCodes[311]);
PdgCodes[310] = "K<sup>0</sup><sub>short</sub>"; PdgCodes[-310] = overbar(PdgCodes[310]);
PdgCodes[130] = "K<sup>0</sup><sub>long</sub>"; PdgCodes[-130] = overbar(PdgCodes[130]);
PdgCodes[221] = "η"; PdgCodes[-221] = overbar(PdgCodes[221]);
PdgCodes[2112] = "n"; PdgCodes[-2112] = overbar(PdgCodes[2112]);
PdgCodes[2212] = "p"; PdgCodes[-2212] = overbar(PdgCodes[2212]);
PdgCodes[3122] = "Λ"; PdgCodes[-3122] = overbar(PdgCodes[3122]);
PdgCodes[3112] = "Σ<sup>-</sup>";
PdgCodes[3222] = "Σ<sup>+</sup>";
PdgCodes[3212] = "Σ<sup>0</sup>";
PdgCodes[-3112] = overbar("Σ")+"<sup>+</sup>";
PdgCodes[-3222] = overbar("Σ")+"<sup>-</sup>";
PdgCodes[-3212] = overbar("Σ")+"<sup>0</sup>";
PdgCodes[3322] = "Ξ<sup>0</sup>"; PdgCodes[-3322] = overbar(PdgCodes[3322]);
PdgCodes[3312] = "Ξ<sup>-</sup>"; PdgCodes[-3312] = overbar(PdgCodes[3312]);
PdgCodes[3334] = "Ω<sup>-</sup>"; PdgCodes[-3334] = "Ω<sup>+</sup>";
ElementName = [];
ElementName[0 ] = "X";
ElementName[1 ] = "H";
ElementName[2 ] = "He";
ElementName[3 ] = "Li";
ElementName[4 ] = "Be";
ElementName[5 ] = "B";
ElementName[6 ] = "C";
ElementName[7 ] = "N";
ElementName[8 ] = "O";
ElementName[9 ] = "F";
ElementName[10 ] = "Ne";
ElementName[11 ] = "Na";
ElementName[12 ] = "Mg";
ElementName[13 ] = "Al";
ElementName[14 ] = "Si";
ElementName[15 ] = "P";
ElementName[16 ] = "S";
ElementName[17 ] = "Cl";
ElementName[18 ] = "Ar";
ElementName[19 ] = "K";
ElementName[20 ] = "Ca";
ElementName[21 ] = "Sc";
ElementName[22 ] = "Ti";
ElementName[23 ] = "V";
ElementName[24 ] = "Cr";
ElementName[25 ] = "Mn";
ElementName[26 ] = "Fe";
ElementName[27 ] = "Co";
ElementName[28 ] = "Ni";
ElementName[29 ] = "Cu";
ElementName[30 ] = "Zn";
ElementName[31 ] = "Ga";
ElementName[32 ] = "Ge";
ElementName[33 ] = "As";
ElementName[34 ] = "Se";
ElementName[35 ] = "Br";
ElementName[36 ] = "Kr";
ElementName[37 ] = "Rb";
ElementName[38 ] = "Sr";
ElementName[39 ] = "Y";
ElementName[40 ] = "Zr";
ElementName[41 ] = "Nb";
ElementName[42 ] = "Mo";
ElementName[43 ] = "Tc";
ElementName[44 ] = "Ru";
ElementName[45 ] = "Rh";
ElementName[46 ] = "Pd";
ElementName[47 ] = "Ag";
ElementName[48 ] = "Cd";
ElementName[49 ] = "In";
ElementName[50 ] = "Sn";
ElementName[51 ] = "Sb";
ElementName[52 ] = "Te";
ElementName[53 ] = "I";
ElementName[54 ] = "Xe";
ElementName[55 ] = "Cs";
ElementName[56 ] = "Ba";
ElementName[57 ] = "La";
ElementName[58 ] = "Ce";
ElementName[59 ] = "Pr";
ElementName[60 ] = "Nd";
ElementName[61 ] = "Pm";
ElementName[62 ] = "Sm";
ElementName[63 ] = "Eu";
ElementName[64 ] = "Gd";
ElementName[65 ] = "Tb";
ElementName[66 ] = "Dy";
ElementName[67 ] = "Ho";
ElementName[68 ] = "Er";
ElementName[69 ] = "Tm";
ElementName[70 ] = "Yb";
ElementName[71 ] = "Lu";
ElementName[72 ] = "Hf";
ElementName[73 ] = "Ta";
ElementName[74 ] = "W";
ElementName[75 ] = "Re";
ElementName[76 ] = "Os";
ElementName[77 ] = "Ir";
ElementName[78 ] = "Pt";
ElementName[79 ] = "Au";
ElementName[80 ] = "Hg";
ElementName[81 ] = "Tl";
ElementName[82 ] = "Pb";
ElementName[83 ] = "Bi";
ElementName[84 ] = "Po";
ElementName[85 ] = "At";
ElementName[86 ] = "Rn";
ElementName[87 ] = "Fr";
ElementName[88 ] = "Ra";
ElementName[89 ] = "Ac";
ElementName[90 ] = "Th";
ElementName[91 ] = "Pa";
ElementName[92 ] = "U";
ElementName[93 ] = "Np";
ElementName[94 ] = "Pu";
ElementName[95 ] = "Am";
ElementName[96 ] = "Cm";
ElementName[97 ] = "Bk";
ElementName[98 ] = "Cf";
ElementName[99 ] = "Es";
ElementName[100] = "Fm";
ElementName[101] = "Md";
ElementName[102] = "No";
ElementName[103] = "Lr";
ElementName[104] = "Rf";
ElementName[105] = "Db";
ElementName[106] = "Sg";
ElementName[107] = "Bh";
ElementName[108] = "Hs";
ElementName[109] = "Mt";
ChannelCode = [];
ChannelCode[0] = "No interaction";
ChannelCode[1] = "QE";
ChannelCode[2] = "Res";
ChannelCode[3] = "DIS";
ChannelCode[4] = "Coh π";
ChannelCode[5] = "AMNUGAMMA";
ChannelCode[6] = "Inv &\mu; decay";
ChannelCode[7] = "&\nu<sub>e</sub> elastic";
ChannelCode[8] = "Unknown";
CurrentCode = [];
CurrentCode[1] = "CC";
CurrentCode[2] = "NC";
function GetParticle(code)
|
function MCInfoDisplay( element )
{
// console.debug("MCInfoDisplay::ctor",element);
this.fElement = element;
$(".accordion",this.fElement).accordion({
collapsible: true
});
// gStateMachine.BindObj("mcChange",this,"Build");
gStateMachine.BindObj("gateChange",this,"Build");
}
MCInfoDisplay.prototype.Build = function()
{
// console.debug("MCInfoDisplay::Build()");
$(".mc-event-info",this.fElement).empty();
$(".accordion",this.fElement).empty();
$(".accordion",this.fElement).accordion("destroy");
var mc = gRecord.mc;
if(!mc) return;
$(".mc-event-info",this.fElement).html(
"MC event: "
+ mc.mc_run + "|"
+ mc.mc_subrun + "|"
+ mc.mc_spill
);
var h="";
var ints = mc.interactions;
for(var whichint=0;whichint<ints.length;whichint++) {
var inter = ints[whichint];
console.log(inter.index);
h += "<h3 interaction='" + inter.index+"'>";
h += "<a href='#' >Interaction " + inter.index + "</a></h3>";
h += "<div>";
//var incE = parseFloat($('incoming4p',inter).text().split(',')[3])/1000.;
var incE = inter.incomingE/1000;
h+="<span>" + incE.toFixed(3) + " GeV " + GetParticle(inter.incomingPDG) +"</span><br/>";
var channel = inter.channel;
var current = inter.current;
h+= "<span>" + CurrentCode[current] + "("+current+") / " + ChannelCode[channel] + "("+channel+")" + "</span>";
h += '<table border="0" class="mc-info" >';
var r1 = '<tr><td width="100px" class="mc-info-caption">';
var r2 = '</td><td class="mc-info-el">';
var r3 = '</td></tr>';
h+= r1 + "Target Nucleus" + r2 + "<sup>" + inter.targetA + "</sup>" + ElementName[inter.targetZ] + r3;
h+= r1 + "Target Nucleon" + r2 + "<sup>" + GetParticle(inter.tgtNucleon) + r3;
h+= r1 + "Process Type" + r2 + inter.processType + r3;
h+= r1 + "Inc Particle" + r2 + GetParticle(inter.incomingPDG) + r3;
h+= r1 + "Q<sup>2</sup>" + r2 + inter.QSquared/1e6 + " GeV<sup>2</sup>" + r3;
h+= r1 + "X" + r2 + inter.bjorkenX + r3;
h+= r1 + "Y" + r2 + inter.bjorkenY + r3;
h+= r1 + "W" + r2 + inter.W/1000 + r3;
vtx = inter.vtx;
h+= r1 + "Vertex (mm)" + r2 + "x:"+Math.round(vtx[0])+ "<br/>"
+ "y:"+Math.round(vtx[1])+ "<br/>"
+ "z:"+Math.round(vtx[2])
+ r3;
h += '</table>';
h += "Final State: <br/>";
var fss = inter.FSParticles;
h+= "<table border='0' class='mc-fs'>";
for(var j=0;j<fss.length;j++) {
var fs = fss[j];
var pdg = fs.Pdg;
var px = fs.Px;
var py = fs.Py
var pz = fs.Pz
var etot = fs.E;
var p2 = px*px + py*py + pz*pz;
var p = Math.sqrt(p2);
var m2 = etot*etot - p2;
var m = 0;
if(m2>0) m = Math.sqrt(m2);
var ke = etot - m;
h+="<tr>";
h+="<td class='mc-fs-particle'>"
+ GetParticle(pdg)
+ "</td><td class='mc-fs-energy'>"
+ '<div class="collapsible-title" revealed="false">'
+ "KE=" + ke.toFixed(1) + " MeV"
+ '</div>'
+ '<div class="collapsible">'
+ " p=" + p.toFixed(1) + " MeV/c<br/>"
+ " px=" + px.toFixed(1) + " MeV/c<br/>"
+ " py=" + py.toFixed(1) + " MeV/c<br/>"
+ " pz=" + pz.toFixed(1) + " MeV/c<br/>"
+ " E =" + etot.toFixed(1) + " MeV<br/>"
+ " θ: " + (Math.acos(pz/p)*180/Math.PI).toFixed(1) + "°<br/>"
+ " φ: " + (Math.atan2(py,px)*180/Math.PI).toFixed(1) + "°<br/>"
+ '</div>'
+ "</td>";
h+="</tr>";
}
h+= "</table>";
h += '</div>';
}
$(".accordion",this.fElement).html(h);
make_collapsibles(this.fElement);
console.log($(".accordion",this.fElement));
$(".accordion",this.fElement).accordion({
collapsible: true
})
.bind('accordionchange', function(event, ui) {
console.log("accordian selection: ",ui.newHeader.attr('interaction'));
gInteraction = ui.newHeader.attr('interaction');
gStateMachine.Trigger('selectedHitChange');
// alert("interaction " + $(ui.newHeader).attr('interaction')); // jQuery object, activated header
});
}
| {
///
/// Transform a PDG code into something printable.
///
var p = PdgCodes[code];
if(p) return p;
// googled from geant4 code, seems consistent with the picture.
// Nuclear codes are given as 10-digit numbers +-10LZZZAAAI.
//For a nucleus consisting of np protons and nn neutrons
// A = np + nn +nlambda and Z = np.
// L = nlambda
// I gives the isomer level, with I = 0 corresponding
// to the ground state and I >0 to excitations
if(code > 1000000000) {
var I = code%10;
var A = Math.floor(code/10)%1000;
var Z = Math.floor(code/10000)%1000;
var el = ElementName[Z];
if(el == undefined) el = Z;
var p = "<sup>"+A+"</sup>"+el;
if(I>0) p += "<sup>*</sup>";
return p;
}
console.log("Can't find lookup code for ",code);
return code;
} | identifier_body |
mc-info.js | // | // Any use of this code must include attribution to Nathaniel Tagg at Otterbein University, but otherwise
// you're free to modify and use it as you like.
//
function overbar(a) { return "<span style='text-decoration: overline'>"+a+"</span>"; }
PdgCodes = [];
PdgCodes[ 1] = "d";
PdgCodes[ 2] = "u";
PdgCodes[ 3] = "s";
PdgCodes[ 4] = "c";
PdgCodes[ 5] = "b";
PdgCodes[ 6] = "t";
PdgCodes[ 7] = "b'";
PdgCodes[ 8] = "t'";
PdgCodes[11] = "e<sup>-</sup>"; PdgCodes[-11] = "e<sup>+</sup>";
PdgCodes[12] = "ν<sub>e</sub>"; PdgCodes[-12] = overbar("ν")+"<sub>e</sub>";
PdgCodes[13] = "μ<sup>-</sup>"; PdgCodes[-13] = "μ<sup>+</sup>";
PdgCodes[14] = "ν<sub>μ</sub>"; PdgCodes[-14] = overbar("ν")+"<sub>μ</sub>";
PdgCodes[15] = "τ<sup>-</sup>"; PdgCodes[-15] = "τ<sup>+</sup>";
PdgCodes[16] = "ν<sub>τ</sub>"; PdgCodes[-16] = overbar("ν")+"<sub>τ</sub>";
PdgCodes[21] = PdgCodes[-21] = "g";
PdgCodes[22] = PdgCodes[-22] = "γ";
PdgCodes[23] = PdgCodes[-23] = "Z<sup>0</sup>;";
PdgCodes[24] = "W<sup>+</sup>;"; PdgCodes[-24] = "W<sup>-</sup>;";
PdgCodes[211] = "π<sup>+</sup>"; PdgCodes[-211] = "π<sup>-</sup>";
PdgCodes[111] = "π<sup>0</sup>"; PdgCodes[-111] = overbar(PdgCodes[111]);
PdgCodes[321] = "K<sup>+</sup>"; PdgCodes[-321] = "K<sup>-</sup>";
PdgCodes[311] = "K<sup>0</sup>"; PdgCodes[-311] = overbar(PdgCodes[311]);
PdgCodes[310] = "K<sup>0</sup><sub>short</sub>"; PdgCodes[-310] = overbar(PdgCodes[310]);
PdgCodes[130] = "K<sup>0</sup><sub>long</sub>"; PdgCodes[-130] = overbar(PdgCodes[130]);
PdgCodes[221] = "η"; PdgCodes[-221] = overbar(PdgCodes[221]);
PdgCodes[2112] = "n"; PdgCodes[-2112] = overbar(PdgCodes[2112]);
PdgCodes[2212] = "p"; PdgCodes[-2212] = overbar(PdgCodes[2212]);
PdgCodes[3122] = "Λ"; PdgCodes[-3122] = overbar(PdgCodes[3122]);
PdgCodes[3112] = "Σ<sup>-</sup>";
PdgCodes[3222] = "Σ<sup>+</sup>";
PdgCodes[3212] = "Σ<sup>0</sup>";
PdgCodes[-3112] = overbar("Σ")+"<sup>+</sup>";
PdgCodes[-3222] = overbar("Σ")+"<sup>-</sup>";
PdgCodes[-3212] = overbar("Σ")+"<sup>0</sup>";
PdgCodes[3322] = "Ξ<sup>0</sup>"; PdgCodes[-3322] = overbar(PdgCodes[3322]);
PdgCodes[3312] = "Ξ<sup>-</sup>"; PdgCodes[-3312] = overbar(PdgCodes[3312]);
PdgCodes[3334] = "Ω<sup>-</sup>"; PdgCodes[-3334] = "Ω<sup>+</sup>";
ElementName = [];
ElementName[0 ] = "X";
ElementName[1 ] = "H";
ElementName[2 ] = "He";
ElementName[3 ] = "Li";
ElementName[4 ] = "Be";
ElementName[5 ] = "B";
ElementName[6 ] = "C";
ElementName[7 ] = "N";
ElementName[8 ] = "O";
ElementName[9 ] = "F";
ElementName[10 ] = "Ne";
ElementName[11 ] = "Na";
ElementName[12 ] = "Mg";
ElementName[13 ] = "Al";
ElementName[14 ] = "Si";
ElementName[15 ] = "P";
ElementName[16 ] = "S";
ElementName[17 ] = "Cl";
ElementName[18 ] = "Ar";
ElementName[19 ] = "K";
ElementName[20 ] = "Ca";
ElementName[21 ] = "Sc";
ElementName[22 ] = "Ti";
ElementName[23 ] = "V";
ElementName[24 ] = "Cr";
ElementName[25 ] = "Mn";
ElementName[26 ] = "Fe";
ElementName[27 ] = "Co";
ElementName[28 ] = "Ni";
ElementName[29 ] = "Cu";
ElementName[30 ] = "Zn";
ElementName[31 ] = "Ga";
ElementName[32 ] = "Ge";
ElementName[33 ] = "As";
ElementName[34 ] = "Se";
ElementName[35 ] = "Br";
ElementName[36 ] = "Kr";
ElementName[37 ] = "Rb";
ElementName[38 ] = "Sr";
ElementName[39 ] = "Y";
ElementName[40 ] = "Zr";
ElementName[41 ] = "Nb";
ElementName[42 ] = "Mo";
ElementName[43 ] = "Tc";
ElementName[44 ] = "Ru";
ElementName[45 ] = "Rh";
ElementName[46 ] = "Pd";
ElementName[47 ] = "Ag";
ElementName[48 ] = "Cd";
ElementName[49 ] = "In";
ElementName[50 ] = "Sn";
ElementName[51 ] = "Sb";
ElementName[52 ] = "Te";
ElementName[53 ] = "I";
ElementName[54 ] = "Xe";
ElementName[55 ] = "Cs";
ElementName[56 ] = "Ba";
ElementName[57 ] = "La";
ElementName[58 ] = "Ce";
ElementName[59 ] = "Pr";
ElementName[60 ] = "Nd";
ElementName[61 ] = "Pm";
ElementName[62 ] = "Sm";
ElementName[63 ] = "Eu";
ElementName[64 ] = "Gd";
ElementName[65 ] = "Tb";
ElementName[66 ] = "Dy";
ElementName[67 ] = "Ho";
ElementName[68 ] = "Er";
ElementName[69 ] = "Tm";
ElementName[70 ] = "Yb";
ElementName[71 ] = "Lu";
ElementName[72 ] = "Hf";
ElementName[73 ] = "Ta";
ElementName[74 ] = "W";
ElementName[75 ] = "Re";
ElementName[76 ] = "Os";
ElementName[77 ] = "Ir";
ElementName[78 ] = "Pt";
ElementName[79 ] = "Au";
ElementName[80 ] = "Hg";
ElementName[81 ] = "Tl";
ElementName[82 ] = "Pb";
ElementName[83 ] = "Bi";
ElementName[84 ] = "Po";
ElementName[85 ] = "At";
ElementName[86 ] = "Rn";
ElementName[87 ] = "Fr";
ElementName[88 ] = "Ra";
ElementName[89 ] = "Ac";
ElementName[90 ] = "Th";
ElementName[91 ] = "Pa";
ElementName[92 ] = "U";
ElementName[93 ] = "Np";
ElementName[94 ] = "Pu";
ElementName[95 ] = "Am";
ElementName[96 ] = "Cm";
ElementName[97 ] = "Bk";
ElementName[98 ] = "Cf";
ElementName[99 ] = "Es";
ElementName[100] = "Fm";
ElementName[101] = "Md";
ElementName[102] = "No";
ElementName[103] = "Lr";
ElementName[104] = "Rf";
ElementName[105] = "Db";
ElementName[106] = "Sg";
ElementName[107] = "Bh";
ElementName[108] = "Hs";
ElementName[109] = "Mt";
ChannelCode = [];
ChannelCode[0] = "No interaction";
ChannelCode[1] = "QE";
ChannelCode[2] = "Res";
ChannelCode[3] = "DIS";
ChannelCode[4] = "Coh π";
ChannelCode[5] = "AMNUGAMMA";
ChannelCode[6] = "Inv &\mu; decay";
ChannelCode[7] = "&\nu<sub>e</sub> elastic";
ChannelCode[8] = "Unknown";
CurrentCode = [];
CurrentCode[1] = "CC";
CurrentCode[2] = "NC";
function GetParticle(code)
{
///
/// Transform a PDG code into something printable.
///
var p = PdgCodes[code];
if(p) return p;
// googled from geant4 code, seems consistent with the picture.
// Nuclear codes are given as 10-digit numbers +-10LZZZAAAI.
//For a nucleus consisting of np protons and nn neutrons
// A = np + nn +nlambda and Z = np.
// L = nlambda
// I gives the isomer level, with I = 0 corresponding
// to the ground state and I >0 to excitations
if(code > 1000000000) {
var I = code%10;
var A = Math.floor(code/10)%1000;
var Z = Math.floor(code/10000)%1000;
var el = ElementName[Z];
if(el == undefined) el = Z;
var p = "<sup>"+A+"</sup>"+el;
if(I>0) p += "<sup>*</sup>";
return p;
}
console.log("Can't find lookup code for ",code);
return code;
}
function MCInfoDisplay( element )
{
// console.debug("MCInfoDisplay::ctor",element);
this.fElement = element;
$(".accordion",this.fElement).accordion({
collapsible: true
});
// gStateMachine.BindObj("mcChange",this,"Build");
gStateMachine.BindObj("gateChange",this,"Build");
}
MCInfoDisplay.prototype.Build = function()
{
// console.debug("MCInfoDisplay::Build()");
$(".mc-event-info",this.fElement).empty();
$(".accordion",this.fElement).empty();
$(".accordion",this.fElement).accordion("destroy");
var mc = gRecord.mc;
if(!mc) return;
$(".mc-event-info",this.fElement).html(
"MC event: "
+ mc.mc_run + "|"
+ mc.mc_subrun + "|"
+ mc.mc_spill
);
var h="";
var ints = mc.interactions;
for(var whichint=0;whichint<ints.length;whichint++) {
var inter = ints[whichint];
console.log(inter.index);
h += "<h3 interaction='" + inter.index+"'>";
h += "<a href='#' >Interaction " + inter.index + "</a></h3>";
h += "<div>";
//var incE = parseFloat($('incoming4p',inter).text().split(',')[3])/1000.;
var incE = inter.incomingE/1000;
h+="<span>" + incE.toFixed(3) + " GeV " + GetParticle(inter.incomingPDG) +"</span><br/>";
var channel = inter.channel;
var current = inter.current;
h+= "<span>" + CurrentCode[current] + "("+current+") / " + ChannelCode[channel] + "("+channel+")" + "</span>";
h += '<table border="0" class="mc-info" >';
var r1 = '<tr><td width="100px" class="mc-info-caption">';
var r2 = '</td><td class="mc-info-el">';
var r3 = '</td></tr>';
h+= r1 + "Target Nucleus" + r2 + "<sup>" + inter.targetA + "</sup>" + ElementName[inter.targetZ] + r3;
h+= r1 + "Target Nucleon" + r2 + "<sup>" + GetParticle(inter.tgtNucleon) + r3;
h+= r1 + "Process Type" + r2 + inter.processType + r3;
h+= r1 + "Inc Particle" + r2 + GetParticle(inter.incomingPDG) + r3;
h+= r1 + "Q<sup>2</sup>" + r2 + inter.QSquared/1e6 + " GeV<sup>2</sup>" + r3;
h+= r1 + "X" + r2 + inter.bjorkenX + r3;
h+= r1 + "Y" + r2 + inter.bjorkenY + r3;
h+= r1 + "W" + r2 + inter.W/1000 + r3;
vtx = inter.vtx;
h+= r1 + "Vertex (mm)" + r2 + "x:"+Math.round(vtx[0])+ "<br/>"
+ "y:"+Math.round(vtx[1])+ "<br/>"
+ "z:"+Math.round(vtx[2])
+ r3;
h += '</table>';
h += "Final State: <br/>";
var fss = inter.FSParticles;
h+= "<table border='0' class='mc-fs'>";
for(var j=0;j<fss.length;j++) {
var fs = fss[j];
var pdg = fs.Pdg;
var px = fs.Px;
var py = fs.Py
var pz = fs.Pz
var etot = fs.E;
var p2 = px*px + py*py + pz*pz;
var p = Math.sqrt(p2);
var m2 = etot*etot - p2;
var m = 0;
if(m2>0) m = Math.sqrt(m2);
var ke = etot - m;
h+="<tr>";
h+="<td class='mc-fs-particle'>"
+ GetParticle(pdg)
+ "</td><td class='mc-fs-energy'>"
+ '<div class="collapsible-title" revealed="false">'
+ "KE=" + ke.toFixed(1) + " MeV"
+ '</div>'
+ '<div class="collapsible">'
+ " p=" + p.toFixed(1) + " MeV/c<br/>"
+ " px=" + px.toFixed(1) + " MeV/c<br/>"
+ " py=" + py.toFixed(1) + " MeV/c<br/>"
+ " pz=" + pz.toFixed(1) + " MeV/c<br/>"
+ " E =" + etot.toFixed(1) + " MeV<br/>"
+ " θ: " + (Math.acos(pz/p)*180/Math.PI).toFixed(1) + "°<br/>"
+ " φ: " + (Math.atan2(py,px)*180/Math.PI).toFixed(1) + "°<br/>"
+ '</div>'
+ "</td>";
h+="</tr>";
}
h+= "</table>";
h += '</div>';
}
$(".accordion",this.fElement).html(h);
make_collapsibles(this.fElement);
console.log($(".accordion",this.fElement));
$(".accordion",this.fElement).accordion({
collapsible: true
})
.bind('accordionchange', function(event, ui) {
console.log("accordian selection: ",ui.newHeader.attr('interaction'));
gInteraction = ui.newHeader.attr('interaction');
gStateMachine.Trigger('selectedHitChange');
// alert("interaction " + $(ui.newHeader).attr('interaction')); // jQuery object, activated header
});
} | // Code for the Arachne Event Display
// Author: Nathaniel Tagg ntagg@otterbein.edu
//
// Licence: this code is free for non-commertial use. Note other licences may apply to 3rd-party code. | random_line_split |
action.rs | use crate::core::ValueType;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::{Neg, Sub};
type SignalType = u8;
const BOUND: SignalType = SignalType::MAX;
const BOUND_FLOAT: f64 = BOUND as f64;
/// Action is basic type of Indicator's signals
///
/// It may be positive \(means *Buy* some amount\). It may be negative \(means *Sell* some amount\). Or there may be no signal at all.
///
/// You can convert `Action` to *analog* `i8` value using [`analog()`](Action::analog) method, where:
/// * `1` means *buy*;
/// * `-1` means *sell*;
/// * `0` means no signal.
///
/// You can convert `Action` to *digital* `Option<f64>` value using [`ratio()`](Action::ratio) method with internal value in range \[`-1.0`; `1.0`\], where:
/// * negative value means *sell* some portion;
/// * positive value means *buy* some potion;
/// * zero value means there is no distinct decision;
/// * [`None`](core::option::Option::None) means no signal.
#[derive(Clone, Copy, Eq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Action {
/// Buy signal
Buy(SignalType),
/// No signal
None,
/// Sell signal
Sell(SignalType),
}
impl Action {
/// Shortcut for *Buy All* signal
pub const BUY_ALL: Self = Self::Buy(BOUND);
/// Shortcut for *Sell All* signal
pub const SELL_ALL: Self = Self::Sell(BOUND);
/// Create instance from *analog* signal (which can be only `-1`, `0` or `1`)
///
/// Any positive number converts to `BUY_ALL`
///
/// Any negative number converts to `SELL_ALL`
///
/// Zero converts to None
#[must_use]
pub fn from_analog(value: i8) -> Self {
Self::from(value)
}
/// Converts value with the interval \[`-1.0`; `1.0`\]
#[must_use]
pub fn ratio(self) -> Option<ValueType> {
self.into()
}
/// Returns a sign (`1` or `-1`) of internal value if value exists and not zero.
///
/// Otherwise returns `0`.
#[must_use]
pub fn analog(self) -> i8 {
self.into()
}
/// Returns a sign of internal value if value exists
///
/// Otherwise returns None
#[must_use]
pub fn | (self) -> Option<i8> {
self.into()
}
/// Return an internal representation of the value if signal exists or None if it doesn't.
#[must_use]
pub const fn value(self) -> Option<SignalType> {
match self {
Self::None => None,
Self::Buy(v) | Self::Sell(v) => Some(v),
}
}
/// Checks if there is no signal
#[must_use]
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
/// Checks if there is signal
#[must_use]
pub const fn is_some(self) -> bool {
!self.is_none()
}
}
impl PartialEq for Action {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(Self::None, Self::None)
| (Self::Buy(0), Self::Sell(0))
| (Self::Sell(0), Self::Buy(0)) => true,
(Self::Buy(a), Self::Buy(b)) | (Self::Sell(a), Self::Sell(b)) => a == b,
_ => false,
}
}
}
impl Default for Action {
fn default() -> Self {
Self::None
}
}
impl From<bool> for Action {
fn from(value: bool) -> Self {
if value {
Self::BUY_ALL
} else {
Self::None
}
}
}
impl From<i8> for Action {
fn from(value: i8) -> Self {
match value {
0 => Self::None,
v => {
if v > 0 {
Self::BUY_ALL
} else {
Self::SELL_ALL
}
}
}
}
}
impl From<Action> for i8 {
fn from(value: Action) -> Self {
match value {
Action::Buy(value) => (value > 0) as Self,
Action::None => 0,
Action::Sell(value) => -((value > 0) as Self),
}
}
}
impl From<Option<i8>> for Action {
fn from(value: Option<i8>) -> Self {
match value {
None => Self::None,
Some(v) => v.into(),
}
}
}
impl From<Action> for Option<i8> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
_ => Some(value.into()),
}
}
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn from_normalized_f64_to_bounded(value: f64) -> SignalType {
debug_assert!((0.0..=1.0).contains(&value));
(value * BOUND_FLOAT).round() as SignalType
}
impl From<f64> for Action {
fn from(v: f64) -> Self {
if v.is_nan() {
return Self::None;
}
let normalized = v.max(-1.0).min(1.0);
let value = from_normalized_f64_to_bounded(normalized.abs());
if normalized.is_sign_negative() {
if value == BOUND {
Self::SELL_ALL
} else {
Self::Sell(value)
}
} else if value == BOUND {
Self::BUY_ALL
} else {
Self::Buy(value)
}
}
}
impl From<Option<f64>> for Action {
fn from(value: Option<f64>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<f32> for Action {
#[allow(clippy::cast_possible_truncation)]
fn from(v: f32) -> Self {
Self::from(v as f64)
}
}
impl From<Option<f32>> for Action {
fn from(value: Option<f32>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<Action> for Option<ValueType> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
Action::Buy(value) => Some((value as ValueType) / (BOUND as ValueType)),
Action::Sell(value) => Some(-(value as ValueType) / (BOUND as ValueType)),
}
}
}
impl<T: Into<Self> + Copy> From<&T> for Action {
fn from(value: &T) -> Self {
(*value).into()
}
}
// impl<T: Borrow<Action>> From<T> for i8 {
// fn from(value: T) -> Self {
// //value.
// }
// }
impl Neg for Action {
type Output = Self;
fn neg(self) -> Self::Output {
match self {
Self::None => Self::None,
Self::Buy(value) => Self::Sell(value),
Self::Sell(value) => Self::Buy(value),
}
}
}
impl Sub for Action {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Self::None, Self::None) => Self::None,
(s, Self::None) => s,
(Self::None, s) => -s,
(Self::Buy(v1), Self::Buy(v2)) => {
if v1 >= v2 {
Self::Buy(v1 - v2)
} else {
Self::Sell(v2 - v1)
}
}
(Self::Sell(v1), Self::Sell(v2)) => {
if v1 >= v2 {
Self::Sell(v1 - v2)
} else {
Self::Buy(v2 - v1)
}
}
(s1, s2) => s1 - (-s2),
}
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(value) => write!(f, "+{}", value),
Self::Sell(value) => write!(f, "-{}", value),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(_) => write!(f, "+{:.2}", self.ratio().unwrap()),
Self::Sell(_) => write!(f, "-{:.2}", self.ratio().unwrap().abs()),
}
}
}
#[cfg(test)]
mod tests {
use super::{Action, BOUND};
use crate::core::ValueType;
use std::cmp::Ordering;
#[test]
fn test_action_ratio() {
assert_eq!(Some(1.0), Action::Buy(BOUND).ratio());
assert_eq!(Some(-1.0), Action::Sell(BOUND).ratio());
assert_eq!(Some(0.0), Action::Sell(0).ratio());
assert_eq!(Some(0.0), Action::Buy(0).ratio());
assert_eq!(Action::Sell(0), Action::Buy(0));
}
#[test]
fn test_action_from_float() {
let half_bound = if BOUND % 2 == 1 {
BOUND / 2 + 1
} else {
BOUND / 2
};
// f64
assert_eq!(Action::from(0.0_f64), Action::Buy(0));
assert_eq!(Action::from(-0.5_f64), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f64), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f64), Action::SELL_ALL);
// f32
assert_eq!(Action::from(0.0_f32), Action::Buy(0));
assert_eq!(Action::from(-0.5_f32), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f32), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f32), Action::SELL_ALL);
// other
assert_eq!(Action::from(1. / BOUND as ValueType), Action::Buy(1));
assert_eq!(Action::from(-1. / BOUND as ValueType), Action::Sell(1));
assert_eq!(Action::from(-2. / BOUND as ValueType), Action::Sell(2));
}
#[test]
fn test_action_from_into() {
(1..=BOUND).for_each(|x| {
let action = if x < BOUND {
Action::Buy(x)
} else {
Action::BUY_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio > 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
let action = if x < BOUND {
Action::Sell(x)
} else {
Action::SELL_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio < 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
});
}
#[test]
fn test_action_from_float_histogram() {
let half_value = Action::Buy(1).ratio().unwrap() / 2.0;
let delta = if cfg!(feature = "value_type_f32") {
1e-7
} else {
1e-15
};
println!("{}", delta);
(0..=BOUND).for_each(|x| {
let xx = x as ValueType;
assert_eq!(Action::Buy(x), (half_value * 2. * xx).into());
assert_eq!(Action::Sell(x), (-half_value * 2. * xx).into());
if x > 0 {
let y = x - 1;
assert_eq!(
Action::Buy(y),
(half_value * 2. * xx - half_value - delta).into()
);
assert_eq!(
Action::Sell(y),
(-(half_value * 2. * xx - half_value - delta)).into()
);
}
});
assert_eq!(Action::Buy(1), (half_value * 3. - delta).into());
assert_eq!(Action::Buy(2), (half_value * 3.).into());
}
#[test]
fn test_action_from_i8() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(s);
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_from_i8_optional() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(Some(s));
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_neg() {
(0..=BOUND).for_each(|x| {
let s = Action::Buy(x);
let b = Action::Sell(x);
assert_eq!(s, -b);
assert_eq!(-s, b);
});
}
#[test]
#[allow(clippy::eq_op)]
fn test_action_eq() {
assert_eq!(Action::None, Action::None);
assert_ne!(Action::Buy(0), Action::None);
assert_ne!(Action::Sell(0), Action::None);
assert_eq!(Action::Buy(0), Action::Buy(0));
assert_eq!(Action::Sell(0), Action::Sell(0));
assert_eq!(Action::Buy(0), Action::Sell(0));
assert_eq!(Action::Sell(0), Action::Buy(0));
assert_ne!(Action::Sell(2), Action::Buy(5));
assert_ne!(Action::Buy(2), Action::Sell(5));
assert_ne!(Action::Buy(2), Action::Buy(5));
assert_eq!(Action::Buy(5), Action::Buy(5));
assert_ne!(Action::Sell(2), Action::Sell(5));
assert_eq!(Action::Sell(5), Action::Sell(5));
}
}
| sign | identifier_name |
action.rs | use crate::core::ValueType;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::{Neg, Sub};
type SignalType = u8;
const BOUND: SignalType = SignalType::MAX;
const BOUND_FLOAT: f64 = BOUND as f64;
/// Action is basic type of Indicator's signals
///
/// It may be positive \(means *Buy* some amount\). It may be negative \(means *Sell* some amount\). Or there may be no signal at all.
///
/// You can convert `Action` to *analog* `i8` value using [`analog()`](Action::analog) method, where:
/// * `1` means *buy*;
/// * `-1` means *sell*;
/// * `0` means no signal.
///
/// You can convert `Action` to *digital* `Option<f64>` value using [`ratio()`](Action::ratio) method with internal value in range \[`-1.0`; `1.0`\], where:
/// * negative value means *sell* some portion;
/// * positive value means *buy* some potion;
/// * zero value means there is no distinct decision;
/// * [`None`](core::option::Option::None) means no signal.
#[derive(Clone, Copy, Eq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Action {
/// Buy signal
Buy(SignalType),
/// No signal
None,
/// Sell signal
Sell(SignalType),
}
impl Action {
/// Shortcut for *Buy All* signal
pub const BUY_ALL: Self = Self::Buy(BOUND);
/// Shortcut for *Sell All* signal
pub const SELL_ALL: Self = Self::Sell(BOUND);
/// Create instance from *analog* signal (which can be only `-1`, `0` or `1`)
///
/// Any positive number converts to `BUY_ALL`
///
/// Any negative number converts to `SELL_ALL`
///
/// Zero converts to None
#[must_use]
pub fn from_analog(value: i8) -> Self {
Self::from(value)
}
/// Converts value with the interval \[`-1.0`; `1.0`\]
#[must_use]
pub fn ratio(self) -> Option<ValueType> {
self.into()
}
/// Returns a sign (`1` or `-1`) of internal value if value exists and not zero.
///
/// Otherwise returns `0`.
#[must_use]
pub fn analog(self) -> i8 {
self.into()
}
/// Returns a sign of internal value if value exists
///
/// Otherwise returns None
#[must_use]
pub fn sign(self) -> Option<i8> {
self.into()
}
/// Return an internal representation of the value if signal exists or None if it doesn't.
#[must_use]
pub const fn value(self) -> Option<SignalType> {
match self {
Self::None => None,
Self::Buy(v) | Self::Sell(v) => Some(v),
}
}
/// Checks if there is no signal
#[must_use]
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
/// Checks if there is signal
#[must_use]
pub const fn is_some(self) -> bool {
!self.is_none()
}
}
impl PartialEq for Action {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(Self::None, Self::None)
| (Self::Buy(0), Self::Sell(0))
| (Self::Sell(0), Self::Buy(0)) => true,
(Self::Buy(a), Self::Buy(b)) | (Self::Sell(a), Self::Sell(b)) => a == b,
_ => false,
}
}
}
impl Default for Action {
fn default() -> Self {
Self::None
}
}
impl From<bool> for Action {
fn from(value: bool) -> Self {
if value {
Self::BUY_ALL
} else {
Self::None
}
}
}
impl From<i8> for Action {
fn from(value: i8) -> Self {
match value {
0 => Self::None,
v => {
if v > 0 {
Self::BUY_ALL
} else {
Self::SELL_ALL
}
}
}
}
}
impl From<Action> for i8 {
fn from(value: Action) -> Self {
match value {
Action::Buy(value) => (value > 0) as Self,
Action::None => 0,
Action::Sell(value) => -((value > 0) as Self),
}
}
}
| Some(v) => v.into(),
}
}
}
impl From<Action> for Option<i8> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
_ => Some(value.into()),
}
}
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn from_normalized_f64_to_bounded(value: f64) -> SignalType {
debug_assert!((0.0..=1.0).contains(&value));
(value * BOUND_FLOAT).round() as SignalType
}
impl From<f64> for Action {
fn from(v: f64) -> Self {
if v.is_nan() {
return Self::None;
}
let normalized = v.max(-1.0).min(1.0);
let value = from_normalized_f64_to_bounded(normalized.abs());
if normalized.is_sign_negative() {
if value == BOUND {
Self::SELL_ALL
} else {
Self::Sell(value)
}
} else if value == BOUND {
Self::BUY_ALL
} else {
Self::Buy(value)
}
}
}
impl From<Option<f64>> for Action {
fn from(value: Option<f64>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<f32> for Action {
#[allow(clippy::cast_possible_truncation)]
fn from(v: f32) -> Self {
Self::from(v as f64)
}
}
impl From<Option<f32>> for Action {
fn from(value: Option<f32>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<Action> for Option<ValueType> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
Action::Buy(value) => Some((value as ValueType) / (BOUND as ValueType)),
Action::Sell(value) => Some(-(value as ValueType) / (BOUND as ValueType)),
}
}
}
impl<T: Into<Self> + Copy> From<&T> for Action {
fn from(value: &T) -> Self {
(*value).into()
}
}
// impl<T: Borrow<Action>> From<T> for i8 {
// fn from(value: T) -> Self {
// //value.
// }
// }
impl Neg for Action {
type Output = Self;
fn neg(self) -> Self::Output {
match self {
Self::None => Self::None,
Self::Buy(value) => Self::Sell(value),
Self::Sell(value) => Self::Buy(value),
}
}
}
impl Sub for Action {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Self::None, Self::None) => Self::None,
(s, Self::None) => s,
(Self::None, s) => -s,
(Self::Buy(v1), Self::Buy(v2)) => {
if v1 >= v2 {
Self::Buy(v1 - v2)
} else {
Self::Sell(v2 - v1)
}
}
(Self::Sell(v1), Self::Sell(v2)) => {
if v1 >= v2 {
Self::Sell(v1 - v2)
} else {
Self::Buy(v2 - v1)
}
}
(s1, s2) => s1 - (-s2),
}
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(value) => write!(f, "+{}", value),
Self::Sell(value) => write!(f, "-{}", value),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(_) => write!(f, "+{:.2}", self.ratio().unwrap()),
Self::Sell(_) => write!(f, "-{:.2}", self.ratio().unwrap().abs()),
}
}
}
#[cfg(test)]
mod tests {
use super::{Action, BOUND};
use crate::core::ValueType;
use std::cmp::Ordering;
#[test]
fn test_action_ratio() {
assert_eq!(Some(1.0), Action::Buy(BOUND).ratio());
assert_eq!(Some(-1.0), Action::Sell(BOUND).ratio());
assert_eq!(Some(0.0), Action::Sell(0).ratio());
assert_eq!(Some(0.0), Action::Buy(0).ratio());
assert_eq!(Action::Sell(0), Action::Buy(0));
}
#[test]
fn test_action_from_float() {
let half_bound = if BOUND % 2 == 1 {
BOUND / 2 + 1
} else {
BOUND / 2
};
// f64
assert_eq!(Action::from(0.0_f64), Action::Buy(0));
assert_eq!(Action::from(-0.5_f64), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f64), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f64), Action::SELL_ALL);
// f32
assert_eq!(Action::from(0.0_f32), Action::Buy(0));
assert_eq!(Action::from(-0.5_f32), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f32), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f32), Action::SELL_ALL);
// other
assert_eq!(Action::from(1. / BOUND as ValueType), Action::Buy(1));
assert_eq!(Action::from(-1. / BOUND as ValueType), Action::Sell(1));
assert_eq!(Action::from(-2. / BOUND as ValueType), Action::Sell(2));
}
#[test]
fn test_action_from_into() {
(1..=BOUND).for_each(|x| {
let action = if x < BOUND {
Action::Buy(x)
} else {
Action::BUY_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio > 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
let action = if x < BOUND {
Action::Sell(x)
} else {
Action::SELL_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio < 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
});
}
#[test]
fn test_action_from_float_histogram() {
let half_value = Action::Buy(1).ratio().unwrap() / 2.0;
let delta = if cfg!(feature = "value_type_f32") {
1e-7
} else {
1e-15
};
println!("{}", delta);
(0..=BOUND).for_each(|x| {
let xx = x as ValueType;
assert_eq!(Action::Buy(x), (half_value * 2. * xx).into());
assert_eq!(Action::Sell(x), (-half_value * 2. * xx).into());
if x > 0 {
let y = x - 1;
assert_eq!(
Action::Buy(y),
(half_value * 2. * xx - half_value - delta).into()
);
assert_eq!(
Action::Sell(y),
(-(half_value * 2. * xx - half_value - delta)).into()
);
}
});
assert_eq!(Action::Buy(1), (half_value * 3. - delta).into());
assert_eq!(Action::Buy(2), (half_value * 3.).into());
}
#[test]
fn test_action_from_i8() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(s);
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_from_i8_optional() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(Some(s));
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_neg() {
(0..=BOUND).for_each(|x| {
let s = Action::Buy(x);
let b = Action::Sell(x);
assert_eq!(s, -b);
assert_eq!(-s, b);
});
}
#[test]
#[allow(clippy::eq_op)]
fn test_action_eq() {
assert_eq!(Action::None, Action::None);
assert_ne!(Action::Buy(0), Action::None);
assert_ne!(Action::Sell(0), Action::None);
assert_eq!(Action::Buy(0), Action::Buy(0));
assert_eq!(Action::Sell(0), Action::Sell(0));
assert_eq!(Action::Buy(0), Action::Sell(0));
assert_eq!(Action::Sell(0), Action::Buy(0));
assert_ne!(Action::Sell(2), Action::Buy(5));
assert_ne!(Action::Buy(2), Action::Sell(5));
assert_ne!(Action::Buy(2), Action::Buy(5));
assert_eq!(Action::Buy(5), Action::Buy(5));
assert_ne!(Action::Sell(2), Action::Sell(5));
assert_eq!(Action::Sell(5), Action::Sell(5));
}
} | impl From<Option<i8>> for Action {
fn from(value: Option<i8>) -> Self {
match value {
None => Self::None, | random_line_split |
action.rs | use crate::core::ValueType;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use std::fmt;
use std::ops::{Neg, Sub};
type SignalType = u8;
const BOUND: SignalType = SignalType::MAX;
const BOUND_FLOAT: f64 = BOUND as f64;
/// Action is basic type of Indicator's signals
///
/// It may be positive \(means *Buy* some amount\). It may be negative \(means *Sell* some amount\). Or there may be no signal at all.
///
/// You can convert `Action` to *analog* `i8` value using [`analog()`](Action::analog) method, where:
/// * `1` means *buy*;
/// * `-1` means *sell*;
/// * `0` means no signal.
///
/// You can convert `Action` to *digital* `Option<f64>` value using [`ratio()`](Action::ratio) method with internal value in range \[`-1.0`; `1.0`\], where:
/// * negative value means *sell* some portion;
/// * positive value means *buy* some potion;
/// * zero value means there is no distinct decision;
/// * [`None`](core::option::Option::None) means no signal.
#[derive(Clone, Copy, Eq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Action {
/// Buy signal
Buy(SignalType),
/// No signal
None,
/// Sell signal
Sell(SignalType),
}
impl Action {
/// Shortcut for *Buy All* signal
pub const BUY_ALL: Self = Self::Buy(BOUND);
/// Shortcut for *Sell All* signal
pub const SELL_ALL: Self = Self::Sell(BOUND);
/// Create instance from *analog* signal (which can be only `-1`, `0` or `1`)
///
/// Any positive number converts to `BUY_ALL`
///
/// Any negative number converts to `SELL_ALL`
///
/// Zero converts to None
#[must_use]
pub fn from_analog(value: i8) -> Self {
Self::from(value)
}
/// Converts value with the interval \[`-1.0`; `1.0`\]
#[must_use]
pub fn ratio(self) -> Option<ValueType> {
self.into()
}
/// Returns a sign (`1` or `-1`) of internal value if value exists and not zero.
///
/// Otherwise returns `0`.
#[must_use]
pub fn analog(self) -> i8 {
self.into()
}
/// Returns a sign of internal value if value exists
///
/// Otherwise returns None
#[must_use]
pub fn sign(self) -> Option<i8> {
self.into()
}
/// Return an internal representation of the value if signal exists or None if it doesn't.
#[must_use]
pub const fn value(self) -> Option<SignalType> {
match self {
Self::None => None,
Self::Buy(v) | Self::Sell(v) => Some(v),
}
}
/// Checks if there is no signal
#[must_use]
pub const fn is_none(self) -> bool {
matches!(self, Self::None)
}
/// Checks if there is signal
#[must_use]
pub const fn is_some(self) -> bool {
!self.is_none()
}
}
impl PartialEq for Action {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(Self::None, Self::None)
| (Self::Buy(0), Self::Sell(0))
| (Self::Sell(0), Self::Buy(0)) => true,
(Self::Buy(a), Self::Buy(b)) | (Self::Sell(a), Self::Sell(b)) => a == b,
_ => false,
}
}
}
impl Default for Action {
fn default() -> Self {
Self::None
}
}
impl From<bool> for Action {
fn from(value: bool) -> Self {
if value {
Self::BUY_ALL
} else {
Self::None
}
}
}
impl From<i8> for Action {
fn from(value: i8) -> Self {
match value {
0 => Self::None,
v => {
if v > 0 {
Self::BUY_ALL
} else {
Self::SELL_ALL
}
}
}
}
}
impl From<Action> for i8 {
fn from(value: Action) -> Self {
match value {
Action::Buy(value) => (value > 0) as Self,
Action::None => 0,
Action::Sell(value) => -((value > 0) as Self),
}
}
}
impl From<Option<i8>> for Action {
fn from(value: Option<i8>) -> Self |
}
impl From<Action> for Option<i8> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
_ => Some(value.into()),
}
}
}
#[inline]
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
fn from_normalized_f64_to_bounded(value: f64) -> SignalType {
debug_assert!((0.0..=1.0).contains(&value));
(value * BOUND_FLOAT).round() as SignalType
}
impl From<f64> for Action {
fn from(v: f64) -> Self {
if v.is_nan() {
return Self::None;
}
let normalized = v.max(-1.0).min(1.0);
let value = from_normalized_f64_to_bounded(normalized.abs());
if normalized.is_sign_negative() {
if value == BOUND {
Self::SELL_ALL
} else {
Self::Sell(value)
}
} else if value == BOUND {
Self::BUY_ALL
} else {
Self::Buy(value)
}
}
}
impl From<Option<f64>> for Action {
fn from(value: Option<f64>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<f32> for Action {
#[allow(clippy::cast_possible_truncation)]
fn from(v: f32) -> Self {
Self::from(v as f64)
}
}
impl From<Option<f32>> for Action {
fn from(value: Option<f32>) -> Self {
match value {
None => Self::None,
Some(value) => value.into(),
}
}
}
impl From<Action> for Option<ValueType> {
fn from(value: Action) -> Self {
match value {
Action::None => None,
Action::Buy(value) => Some((value as ValueType) / (BOUND as ValueType)),
Action::Sell(value) => Some(-(value as ValueType) / (BOUND as ValueType)),
}
}
}
impl<T: Into<Self> + Copy> From<&T> for Action {
fn from(value: &T) -> Self {
(*value).into()
}
}
// impl<T: Borrow<Action>> From<T> for i8 {
// fn from(value: T) -> Self {
// //value.
// }
// }
impl Neg for Action {
type Output = Self;
fn neg(self) -> Self::Output {
match self {
Self::None => Self::None,
Self::Buy(value) => Self::Sell(value),
Self::Sell(value) => Self::Buy(value),
}
}
}
impl Sub for Action {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(Self::None, Self::None) => Self::None,
(s, Self::None) => s,
(Self::None, s) => -s,
(Self::Buy(v1), Self::Buy(v2)) => {
if v1 >= v2 {
Self::Buy(v1 - v2)
} else {
Self::Sell(v2 - v1)
}
}
(Self::Sell(v1), Self::Sell(v2)) => {
if v1 >= v2 {
Self::Sell(v1 - v2)
} else {
Self::Buy(v2 - v1)
}
}
(s1, s2) => s1 - (-s2),
}
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(value) => write!(f, "+{}", value),
Self::Sell(value) => write!(f, "-{}", value),
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None => write!(f, "N"),
Self::Buy(_) => write!(f, "+{:.2}", self.ratio().unwrap()),
Self::Sell(_) => write!(f, "-{:.2}", self.ratio().unwrap().abs()),
}
}
}
#[cfg(test)]
mod tests {
use super::{Action, BOUND};
use crate::core::ValueType;
use std::cmp::Ordering;
#[test]
fn test_action_ratio() {
assert_eq!(Some(1.0), Action::Buy(BOUND).ratio());
assert_eq!(Some(-1.0), Action::Sell(BOUND).ratio());
assert_eq!(Some(0.0), Action::Sell(0).ratio());
assert_eq!(Some(0.0), Action::Buy(0).ratio());
assert_eq!(Action::Sell(0), Action::Buy(0));
}
#[test]
fn test_action_from_float() {
let half_bound = if BOUND % 2 == 1 {
BOUND / 2 + 1
} else {
BOUND / 2
};
// f64
assert_eq!(Action::from(0.0_f64), Action::Buy(0));
assert_eq!(Action::from(-0.5_f64), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f64), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f64), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f64), Action::SELL_ALL);
// f32
assert_eq!(Action::from(0.0_f32), Action::Buy(0));
assert_eq!(Action::from(-0.5_f32), Action::Sell(half_bound));
assert_eq!(Action::from(1.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-1.0_f32), Action::SELL_ALL);
assert_eq!(Action::from(2.0_f32), Action::BUY_ALL);
assert_eq!(Action::from(-2.0_f32), Action::SELL_ALL);
// other
assert_eq!(Action::from(1. / BOUND as ValueType), Action::Buy(1));
assert_eq!(Action::from(-1. / BOUND as ValueType), Action::Sell(1));
assert_eq!(Action::from(-2. / BOUND as ValueType), Action::Sell(2));
}
#[test]
fn test_action_from_into() {
(1..=BOUND).for_each(|x| {
let action = if x < BOUND {
Action::Buy(x)
} else {
Action::BUY_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio > 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
let action = if x < BOUND {
Action::Sell(x)
} else {
Action::SELL_ALL
};
let ratio = action.ratio().unwrap();
let action2: Action = ratio.into();
assert!(ratio < 0.);
assert_eq!(
action,
ratio.into(),
"at index {} with action {:?} ratio {}, action#2 {:?}",
x,
action,
ratio,
action2,
);
});
}
#[test]
fn test_action_from_float_histogram() {
let half_value = Action::Buy(1).ratio().unwrap() / 2.0;
let delta = if cfg!(feature = "value_type_f32") {
1e-7
} else {
1e-15
};
println!("{}", delta);
(0..=BOUND).for_each(|x| {
let xx = x as ValueType;
assert_eq!(Action::Buy(x), (half_value * 2. * xx).into());
assert_eq!(Action::Sell(x), (-half_value * 2. * xx).into());
if x > 0 {
let y = x - 1;
assert_eq!(
Action::Buy(y),
(half_value * 2. * xx - half_value - delta).into()
);
assert_eq!(
Action::Sell(y),
(-(half_value * 2. * xx - half_value - delta)).into()
);
}
});
assert_eq!(Action::Buy(1), (half_value * 3. - delta).into());
assert_eq!(Action::Buy(2), (half_value * 3.).into());
}
#[test]
fn test_action_from_i8() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(s);
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_from_i8_optional() {
(i8::MIN..=i8::MAX).for_each(|s| {
let action = Action::from(Some(s));
match s.cmp(&0) {
Ordering::Greater => assert_eq!(action, Action::BUY_ALL),
Ordering::Less => assert_eq!(action, Action::SELL_ALL),
Ordering::Equal => assert_eq!(action, Action::None),
}
});
}
#[test]
fn test_action_neg() {
(0..=BOUND).for_each(|x| {
let s = Action::Buy(x);
let b = Action::Sell(x);
assert_eq!(s, -b);
assert_eq!(-s, b);
});
}
#[test]
#[allow(clippy::eq_op)]
fn test_action_eq() {
assert_eq!(Action::None, Action::None);
assert_ne!(Action::Buy(0), Action::None);
assert_ne!(Action::Sell(0), Action::None);
assert_eq!(Action::Buy(0), Action::Buy(0));
assert_eq!(Action::Sell(0), Action::Sell(0));
assert_eq!(Action::Buy(0), Action::Sell(0));
assert_eq!(Action::Sell(0), Action::Buy(0));
assert_ne!(Action::Sell(2), Action::Buy(5));
assert_ne!(Action::Buy(2), Action::Sell(5));
assert_ne!(Action::Buy(2), Action::Buy(5));
assert_eq!(Action::Buy(5), Action::Buy(5));
assert_ne!(Action::Sell(2), Action::Sell(5));
assert_eq!(Action::Sell(5), Action::Sell(5));
}
}
| {
match value {
None => Self::None,
Some(v) => v.into(),
}
} | identifier_body |
util.go | package routes
import (
"context"
"database/sql"
"doubleboiler/config"
"doubleboiler/copy"
"doubleboiler/flashes"
"doubleboiler/heroicons"
"doubleboiler/logger"
"doubleboiler/models"
"doubleboiler/util"
"errors"
"fmt"
"html/template"
"net/http"
"net/url"
"os"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/davidbanham/english_conjoin"
"github.com/davidbanham/human_duration"
kewpie "github.com/davidbanham/kewpie_go/v3"
"github.com/davidbanham/notifications"
uuid "github.com/satori/go.uuid"
)
// Tmpl exports the compiled templates
var Tmpl *template.Template
func init() {
templateFuncMap := template.FuncMap{
"hash": calcHash,
"despace": func(s string) string {
return strings.Replace(s, " ", "_", -1)
},
"ToLower": strings.ToLower,
"humanTime": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format(time.RFC822)
},
"humanDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("02 Jan 2006")
},
"humanDayDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("Mon 02 Jan 2006")
},
"isoTime": func(t time.Time) string {
return t.Format(time.RFC3339)
},
"stringToTime": func(d string) time.Time {
t, _ := time.Parse(time.RFC3339, d)
return t
},
"weekdayOffset": func(s string) int {
t, _ := time.Parse(time.RFC3339, s)
return int(t.Weekday())
},
"diff": func(a, b int) int {
return a - b
},
"breakMonths": func(nights []string) [][]string {
monthNights := [][]string{}
target := 0
monthNights = append(monthNights, []string{})
for i, n := range nights {
night, _ := time.Parse(time.RFC3339, n)
if i != 0 {
lastNight, _ := time.Parse(time.RFC3339, nights[i-1])
if night.Month() != lastNight.Month() {
monthNights = append(monthNights, []string{})
target += 1
}
}
monthNights[target] = append(monthNights[target], night.Format(time.RFC3339))
}
return monthNights
},
"dollarise": func(in int) string {
return util.Dollarise(in)
},
"dollarise_float": func(in float32) string {
return util.Dollarise(int(in))
},
"dollarise_int64": func(in int64) string {
return util.Dollarise(int(in))
},
"cents_to_dollars_int": func(in int) float64 {
return float64(in) / 100
},
"cents_to_dollars_int64": func(in int64) float64 {
return float64(in) / 100
},
"cents_to_dollars": func(in float32) float32 {
return in / 100
},
"csv": func(in []string) string {
return strings.Join(in, ",")
},
"ssv": func(in []string) string {
return strings.Join(in, "; ")
},
"dateonly": func(in time.Time) string {
return in.Format("2006-01-02")
},
"datetime": func(in time.Time) string {
return in.Format("Mon Jan 2 15:04:05 -0700 MST 2006")
},
"breakLines": func(in string) []string {
return strings.Split(in, "\n")
},
"breakOnAnd": func(in string) []string {
return strings.Split(in, " AND ")
},
"humanDuration": human_duration.String,
"nextPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(dur)
},
"nextPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(dur)
},
"prevPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(-dur)
},
"prevPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(-dur)
},
"contains": func(str []string, target string) bool {
for _, s := range str {
if s == target {
return true
}
}
return false
},
"unix_to_time": func(in int64) time.Time {
return time.Unix(in, 0)
},
"unrealDate": func(d time.Time) bool {
tooLong := time.Date(1950, time.January, 0, 0, 0, 0, 0, time.Local)
tooLate := time.Date(9000, time.January, 0, 0, 0, 0, 0, time.Local)
if d.Before(tooLong) {
return true
}
if d.After(tooLate) {
return true
}
return false
},
"add": func(i, j int) int {
return i + j
},
"firstFiveChars": util.FirstFiveChars,
"toUpper": strings.ToUpper,
"randID": func() string {
return util.FirstFiveChars(uuid.NewV4().String())
},
"auditActions": func(abbrev string) string {
mapping := map[string]string{
"I": "Created",
"U": "Updated",
"D": "Deleted", | }
return mapping[abbrev]
},
"loggedIn": isLoggedIn,
"userEmail": func(ctx context.Context) string {
return ctx.Value("user").(models.User).Email
},
"user": func(ctx context.Context) models.User {
return ctx.Value("user").(models.User)
},
"orgsFromContext": func(ctx context.Context) models.Organisations {
return orgsFromContext(ctx)
},
"flashes": flashesFromContext,
"activeOrgFromContext": func(ctx context.Context) models.Organisation {
return activeOrgFromContext(ctx)
},
"can": func(ctx context.Context, role string) bool {
org := activeOrgFromContext(ctx)
return can(ctx, org, role)
},
"csrf": func(ctx context.Context) string {
unconv := ctx.Value("user")
if unconv == nil {
return ""
}
user := unconv.(models.User)
return util.CalcToken(user.ID, "")
},
"isAppAdmin": isAppAdmin,
"chrome": func(ctx context.Context) bool {
if ctx == nil {
return true
}
val := ctx.Value("chrome")
if val == nil {
return true
}
return val.(bool)
},
"percentage": func(total, percentage int) int {
return int(float64(total) * float64(percentage) / 100)
},
"percentify": func(in float32) string {
return fmt.Sprintf("%.2f", in) + "%"
},
"thisYear": func() int {
return time.Now().Year()
},
"mod": func(i, j int) bool { return i%j == 0 },
"numDays": func(d time.Duration) int { return int(d / (24 * time.Hour)) },
"isProd": func() bool { return config.STAGE == "production" },
"isLocal": func() bool { return config.LOCAL },
"now": func() string {
return time.Now().Format("2006-01-02")
},
"nextWeekStart": func() string {
return util.NextDay(time.Now(), time.Monday).Format("2006-01-02")
},
"conjoinAnd": func(in []string) string {
return english_conjoin.ConjoinAnd(in)
},
"logoLink": func(ctx context.Context) string {
if !isLoggedIn(ctx) {
return "/"
}
unconv := ctx.Value("url")
if unconv == nil {
return "/"
}
url := unconv.(*url.URL)
if strings.Contains(url.Path, "/welcome") {
return "/"
}
return "/welcome"
},
"dict": func(values ...interface{}) (map[string]interface{}, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
dict := make(map[string]interface{}, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, ok := values[i].(string)
if !ok {
return nil, errors.New("dict keys must be strings")
}
dict[key] = values[i+1]
}
return dict, nil
},
"crumbs": func(values ...string) ([]Crumb, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
crumbs := []Crumb{}
for i := 0; i < len(values); i += 2 {
crumbs = append(crumbs, Crumb{
Title: values[i],
Path: values[i+1],
})
}
return crumbs, nil
},
"noescape": func(str string) template.HTML {
return template.HTML(str)
},
"urlescape": func(input string) string {
return url.QueryEscape(input)
},
"heroIcon": func(name string) string {
return heroicons.Icons[name]
},
"uniq": func() string {
return uuid.NewV4().String()
},
"queryString": func(vals url.Values) template.URL {
return "?" + template.URL(vals.Encode())
},
"searchableEntities": func(ctx context.Context) []models.Searchable {
ret := []models.Searchable{}
org := activeOrgFromContext(ctx)
for _, entity := range models.Searchables {
if can(ctx, org, entity.RequiredRole.Name) {
ret = append(ret, entity)
}
}
return ret
},
"isOrgSettingsPage": func(ctx context.Context) bool {
currentURL := ctx.Value("url")
if v, ok := currentURL.(*url.URL); ok {
parts := strings.Split(v.Path, "/")
if len(parts) > 1 {
if parts[1] == "organisations" {
return true
}
}
}
return false
},
"selectorSafe": func(in string) string {
return strings.ReplaceAll(in, ".", "-")
},
}
Tmpl = template.Must(template.New("main").Funcs(templateFuncMap).ParseGlob(getPath() + "/*"))
}
func getPath() string {
if _, err := os.Open("../views"); err == nil {
return "../views/"
}
if _, err := os.Open("./views"); err == nil {
return "./views/"
}
if _, err := os.Open("../../../views"); err == nil {
return "../../../views"
}
if _, err := os.Open("../../views"); err == nil {
return "../../views"
}
return ""
}
func checkFormInput(required []string, form url.Values, w http.ResponseWriter, r *http.Request) bool {
for _, val := range required {
if len(form[val]) < 1 {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
if form[val][0] == "" {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
}
return true
}
type errorPageData struct {
basePageData
Message string
Context context.Context
}
type Crumb struct {
Title string
Path string
}
func errRes(w http.ResponseWriter, r *http.Request, code int, message string, err error) {
sendErr := func() {
if err != nil && err.Error() == "http2: stream closed" {
return
}
reportableErr := err
if err == nil {
reportableErr = errors.New(strconv.Itoa(code) + " " + message)
}
if err != nil {
config.ReportError(reportableErr)
}
if clientSafe, addendum := isClientSafe(err); clientSafe {
message += " " + addendum
}
logger.Log(r.Context(), logger.Warning, fmt.Sprintf("Sending Error Response: %+v, %+v, %+v, %+v", code, message, r.URL.String(), err))
if code == 500 {
logger.Log(r.Context(), logger.Error, err)
logger.Log(r.Context(), logger.Debug, string(debug.Stack()))
}
w.WriteHeader(code)
if r.Header.Get("Accept") == "application/json" {
w.Write([]byte(fmt.Sprintf(`{"error": "%s"}`, message)))
return
}
ohshit := Tmpl.ExecuteTemplate(w, "error.html", errorPageData{
Message: message,
Context: r.Context(),
})
if ohshit != nil {
w.Write([]byte("Error rendering the error template. Oh dear."))
return
}
}
tx := r.Context().Value("tx")
switch v := tx.(type) {
case *sql.Tx:
rollbackErr := v.Rollback()
if rollbackErr != nil {
logger.Log(r.Context(), logger.Error, fmt.Sprintf("Error rolling back tx: %+v", rollbackErr))
}
default:
//fmt.Printf("DEBUG no transaction on error\n")
}
sendErr()
}
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}
func redirToDefaultOrg(w http.ResponseWriter, r *http.Request) {
orgs := orgsFromContext(r.Context())
if len(orgs.Data) < 1 {
http.Redirect(w, r, "/create-organisation", http.StatusFound)
return
} else {
query := r.URL.Query()
query.Set("organisationid", orgs.Data[0].ID)
r.URL.RawQuery = query.Encode()
}
http.Redirect(w, r, r.URL.String(), http.StatusFound)
}
func parseFormDate(input string) (time.Time, error) {
return time.Parse("2006-01-02", input)
}
func defaultedDatesFromQueryString(query url.Values, numDaysFromNowDefault int, weekBoundary bool) (startTime, endTime time.Time, err error) {
start := query.Get("start")
end := query.Get("end")
format := "2006-01-02"
begin := time.Now()
if weekBoundary {
begin = util.NextDay(begin, time.Sunday)
}
now := begin.Format(format)
then := begin.Add(24 * time.Duration(numDaysFromNowDefault) * time.Hour).Format(format)
startTime, _ = time.Parse(format, now)
endTime, _ = time.Parse(format, then)
if start != "" {
parsed, err := time.Parse(format, start)
if err != nil {
return startTime, endTime, err
}
startTime = parsed
}
if end != "" {
parsed, err := time.Parse(format, end)
if err != nil {
return startTime, endTime, err
}
endTime = parsed
}
return startTime, endTime, nil
}
func deblank(arr []string) (deblanked []string) {
for _, v := range arr {
if v != "" {
deblanked = append(deblanked, v)
}
}
return
}
func sendEmailChangedNotification(ctx context.Context, target, old string) error {
emailHTML, emailText := copy.EmailChangedEmail(target, old)
subject := fmt.Sprintf("%s email changed", config.NAME)
recipients := []string{target, old}
for _, recipient := range recipients {
mail := notifications.Email{
To: recipient,
From: config.SYSTEM_EMAIL,
ReplyTo: config.SUPPORT_EMAIL,
Text: emailText,
HTML: emailHTML,
Subject: subject,
}
task := kewpie.Task{}
if err := task.Marshal(mail); err != nil {
return err
}
if err := config.QUEUE.Publish(ctx, config.SEND_EMAIL_QUEUE_NAME, &task); err != nil {
return err
}
}
return nil
}
func dollarsToCents(in string) (int, error) {
dollars, err := strconv.ParseFloat(in, 64)
return int((dollars * 1000) / 10), err
}
func redirToLogin(w http.ResponseWriter, r *http.Request) {
values := url.Values{
"next": []string{r.URL.String()},
}
http.Redirect(w, r, "/login?"+values.Encode(), 302)
return
}
func nextFlow(defaultURL string, form url.Values) string {
ret, _ := url.Parse(defaultURL)
next := form.Get("next")
if next != "" {
parsed, _ := url.Parse(next)
if parsed.Path != "login" && parsed.Path != "/login" {
ret.Path = parsed.Path
}
for k, v := range parsed.Query() {
q := ret.Query()
q[k] = v
ret.RawQuery = q.Encode()
}
}
if form.Get("flow") != "" {
q := ret.Query()
q.Set("flow", form.Get("flow"))
ret.RawQuery = q.Encode()
}
if form.Get("next_fragment") != "" {
ret.Fragment = form.Get("next_fragment")
}
return ret.String()
}
func isClientSafe(err error) (bool, string) {
type clientSafe interface {
ClientSafeMessage() string
}
cse, ok := err.(clientSafe)
if ok {
return ok, cse.ClientSafeMessage()
} else {
return false, ""
}
}
func isAppAdmin(ctx context.Context) bool {
if !isLoggedIn(ctx) {
return false
}
return ctx.Value("user").(models.User).Admin
}
func userFromContext(ctx context.Context) models.User {
if !isLoggedIn(ctx) {
return models.User{}
}
return ctx.Value("user").(models.User)
}
func orgUserFromContext(ctx context.Context, org models.Organisation) models.OrganisationUser {
if v, ok := ctx.Value("organisation_users").(models.OrganisationUsers); ok {
for _, ou := range v.Data {
if ou.OrganisationID == org.ID {
return ou
}
}
}
return models.OrganisationUser{}
}
func flashesFromContext(ctx context.Context) flashes.Flashes {
if ctx == nil {
return flashes.Flashes{}
}
unconv := ctx.Value("flashes")
unconvUser := ctx.Value("user")
if unconv == nil && unconvUser == nil {
return flashes.Flashes{}
}
f := flashes.Flashes{}
if unconv != nil {
f = unconv.(flashes.Flashes)
}
if unconvUser != nil {
user := unconvUser.(models.User)
for _, flash := range user.Flashes {
if !flash.Sticky {
user.DeleteFlash(ctx, flash)
}
}
f = append(f, user.Flashes...)
}
return f
} | "T": "Truncated", | random_line_split |
util.go | package routes
import (
"context"
"database/sql"
"doubleboiler/config"
"doubleboiler/copy"
"doubleboiler/flashes"
"doubleboiler/heroicons"
"doubleboiler/logger"
"doubleboiler/models"
"doubleboiler/util"
"errors"
"fmt"
"html/template"
"net/http"
"net/url"
"os"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/davidbanham/english_conjoin"
"github.com/davidbanham/human_duration"
kewpie "github.com/davidbanham/kewpie_go/v3"
"github.com/davidbanham/notifications"
uuid "github.com/satori/go.uuid"
)
// Tmpl exports the compiled templates
var Tmpl *template.Template
func init() {
templateFuncMap := template.FuncMap{
"hash": calcHash,
"despace": func(s string) string {
return strings.Replace(s, " ", "_", -1)
},
"ToLower": strings.ToLower,
"humanTime": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format(time.RFC822)
},
"humanDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("02 Jan 2006")
},
"humanDayDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("Mon 02 Jan 2006")
},
"isoTime": func(t time.Time) string {
return t.Format(time.RFC3339)
},
"stringToTime": func(d string) time.Time {
t, _ := time.Parse(time.RFC3339, d)
return t
},
"weekdayOffset": func(s string) int {
t, _ := time.Parse(time.RFC3339, s)
return int(t.Weekday())
},
"diff": func(a, b int) int {
return a - b
},
"breakMonths": func(nights []string) [][]string {
monthNights := [][]string{}
target := 0
monthNights = append(monthNights, []string{})
for i, n := range nights {
night, _ := time.Parse(time.RFC3339, n)
if i != 0 {
lastNight, _ := time.Parse(time.RFC3339, nights[i-1])
if night.Month() != lastNight.Month() {
monthNights = append(monthNights, []string{})
target += 1
}
}
monthNights[target] = append(monthNights[target], night.Format(time.RFC3339))
}
return monthNights
},
"dollarise": func(in int) string {
return util.Dollarise(in)
},
"dollarise_float": func(in float32) string {
return util.Dollarise(int(in))
},
"dollarise_int64": func(in int64) string {
return util.Dollarise(int(in))
},
"cents_to_dollars_int": func(in int) float64 {
return float64(in) / 100
},
"cents_to_dollars_int64": func(in int64) float64 {
return float64(in) / 100
},
"cents_to_dollars": func(in float32) float32 {
return in / 100
},
"csv": func(in []string) string {
return strings.Join(in, ",")
},
"ssv": func(in []string) string {
return strings.Join(in, "; ")
},
"dateonly": func(in time.Time) string {
return in.Format("2006-01-02")
},
"datetime": func(in time.Time) string {
return in.Format("Mon Jan 2 15:04:05 -0700 MST 2006")
},
"breakLines": func(in string) []string {
return strings.Split(in, "\n")
},
"breakOnAnd": func(in string) []string {
return strings.Split(in, " AND ")
},
"humanDuration": human_duration.String,
"nextPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(dur)
},
"nextPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(dur)
},
"prevPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(-dur)
},
"prevPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(-dur)
},
"contains": func(str []string, target string) bool {
for _, s := range str {
if s == target {
return true
}
}
return false
},
"unix_to_time": func(in int64) time.Time {
return time.Unix(in, 0)
},
"unrealDate": func(d time.Time) bool {
tooLong := time.Date(1950, time.January, 0, 0, 0, 0, 0, time.Local)
tooLate := time.Date(9000, time.January, 0, 0, 0, 0, 0, time.Local)
if d.Before(tooLong) {
return true
}
if d.After(tooLate) {
return true
}
return false
},
"add": func(i, j int) int {
return i + j
},
"firstFiveChars": util.FirstFiveChars,
"toUpper": strings.ToUpper,
"randID": func() string {
return util.FirstFiveChars(uuid.NewV4().String())
},
"auditActions": func(abbrev string) string {
mapping := map[string]string{
"I": "Created",
"U": "Updated",
"D": "Deleted",
"T": "Truncated",
}
return mapping[abbrev]
},
"loggedIn": isLoggedIn,
"userEmail": func(ctx context.Context) string {
return ctx.Value("user").(models.User).Email
},
"user": func(ctx context.Context) models.User {
return ctx.Value("user").(models.User)
},
"orgsFromContext": func(ctx context.Context) models.Organisations {
return orgsFromContext(ctx)
},
"flashes": flashesFromContext,
"activeOrgFromContext": func(ctx context.Context) models.Organisation {
return activeOrgFromContext(ctx)
},
"can": func(ctx context.Context, role string) bool {
org := activeOrgFromContext(ctx)
return can(ctx, org, role)
},
"csrf": func(ctx context.Context) string {
unconv := ctx.Value("user")
if unconv == nil {
return ""
}
user := unconv.(models.User)
return util.CalcToken(user.ID, "")
},
"isAppAdmin": isAppAdmin,
"chrome": func(ctx context.Context) bool {
if ctx == nil {
return true
}
val := ctx.Value("chrome")
if val == nil {
return true
}
return val.(bool)
},
"percentage": func(total, percentage int) int {
return int(float64(total) * float64(percentage) / 100)
},
"percentify": func(in float32) string {
return fmt.Sprintf("%.2f", in) + "%"
},
"thisYear": func() int {
return time.Now().Year()
},
"mod": func(i, j int) bool { return i%j == 0 },
"numDays": func(d time.Duration) int { return int(d / (24 * time.Hour)) },
"isProd": func() bool { return config.STAGE == "production" },
"isLocal": func() bool { return config.LOCAL },
"now": func() string {
return time.Now().Format("2006-01-02")
},
"nextWeekStart": func() string {
return util.NextDay(time.Now(), time.Monday).Format("2006-01-02")
},
"conjoinAnd": func(in []string) string {
return english_conjoin.ConjoinAnd(in)
},
"logoLink": func(ctx context.Context) string {
if !isLoggedIn(ctx) {
return "/"
}
unconv := ctx.Value("url")
if unconv == nil {
return "/"
}
url := unconv.(*url.URL)
if strings.Contains(url.Path, "/welcome") {
return "/"
}
return "/welcome"
},
"dict": func(values ...interface{}) (map[string]interface{}, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
dict := make(map[string]interface{}, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, ok := values[i].(string)
if !ok {
return nil, errors.New("dict keys must be strings")
}
dict[key] = values[i+1]
}
return dict, nil
},
"crumbs": func(values ...string) ([]Crumb, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
crumbs := []Crumb{}
for i := 0; i < len(values); i += 2 {
crumbs = append(crumbs, Crumb{
Title: values[i],
Path: values[i+1],
})
}
return crumbs, nil
},
"noescape": func(str string) template.HTML {
return template.HTML(str)
},
"urlescape": func(input string) string {
return url.QueryEscape(input)
},
"heroIcon": func(name string) string {
return heroicons.Icons[name]
},
"uniq": func() string {
return uuid.NewV4().String()
},
"queryString": func(vals url.Values) template.URL {
return "?" + template.URL(vals.Encode())
},
"searchableEntities": func(ctx context.Context) []models.Searchable {
ret := []models.Searchable{}
org := activeOrgFromContext(ctx)
for _, entity := range models.Searchables {
if can(ctx, org, entity.RequiredRole.Name) {
ret = append(ret, entity)
}
}
return ret
},
"isOrgSettingsPage": func(ctx context.Context) bool {
currentURL := ctx.Value("url")
if v, ok := currentURL.(*url.URL); ok {
parts := strings.Split(v.Path, "/")
if len(parts) > 1 {
if parts[1] == "organisations" {
return true
}
}
}
return false
},
"selectorSafe": func(in string) string {
return strings.ReplaceAll(in, ".", "-")
},
}
Tmpl = template.Must(template.New("main").Funcs(templateFuncMap).ParseGlob(getPath() + "/*"))
}
func getPath() string {
if _, err := os.Open("../views"); err == nil {
return "../views/"
}
if _, err := os.Open("./views"); err == nil {
return "./views/"
}
if _, err := os.Open("../../../views"); err == nil {
return "../../../views"
}
if _, err := os.Open("../../views"); err == nil {
return "../../views"
}
return ""
}
func checkFormInput(required []string, form url.Values, w http.ResponseWriter, r *http.Request) bool {
for _, val := range required {
if len(form[val]) < 1 {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
if form[val][0] == "" {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
}
return true
}
type errorPageData struct {
basePageData
Message string
Context context.Context
}
type Crumb struct {
Title string
Path string
}
func errRes(w http.ResponseWriter, r *http.Request, code int, message string, err error) {
sendErr := func() {
if err != nil && err.Error() == "http2: stream closed" {
return
}
reportableErr := err
if err == nil {
reportableErr = errors.New(strconv.Itoa(code) + " " + message)
}
if err != nil {
config.ReportError(reportableErr)
}
if clientSafe, addendum := isClientSafe(err); clientSafe {
message += " " + addendum
}
logger.Log(r.Context(), logger.Warning, fmt.Sprintf("Sending Error Response: %+v, %+v, %+v, %+v", code, message, r.URL.String(), err))
if code == 500 {
logger.Log(r.Context(), logger.Error, err)
logger.Log(r.Context(), logger.Debug, string(debug.Stack()))
}
w.WriteHeader(code)
if r.Header.Get("Accept") == "application/json" {
w.Write([]byte(fmt.Sprintf(`{"error": "%s"}`, message)))
return
}
ohshit := Tmpl.ExecuteTemplate(w, "error.html", errorPageData{
Message: message,
Context: r.Context(),
})
if ohshit != nil {
w.Write([]byte("Error rendering the error template. Oh dear."))
return
}
}
tx := r.Context().Value("tx")
switch v := tx.(type) {
case *sql.Tx:
rollbackErr := v.Rollback()
if rollbackErr != nil {
logger.Log(r.Context(), logger.Error, fmt.Sprintf("Error rolling back tx: %+v", rollbackErr))
}
default:
//fmt.Printf("DEBUG no transaction on error\n")
}
sendErr()
}
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}
func redirToDefaultOrg(w http.ResponseWriter, r *http.Request) |
func parseFormDate(input string) (time.Time, error) {
return time.Parse("2006-01-02", input)
}
func defaultedDatesFromQueryString(query url.Values, numDaysFromNowDefault int, weekBoundary bool) (startTime, endTime time.Time, err error) {
start := query.Get("start")
end := query.Get("end")
format := "2006-01-02"
begin := time.Now()
if weekBoundary {
begin = util.NextDay(begin, time.Sunday)
}
now := begin.Format(format)
then := begin.Add(24 * time.Duration(numDaysFromNowDefault) * time.Hour).Format(format)
startTime, _ = time.Parse(format, now)
endTime, _ = time.Parse(format, then)
if start != "" {
parsed, err := time.Parse(format, start)
if err != nil {
return startTime, endTime, err
}
startTime = parsed
}
if end != "" {
parsed, err := time.Parse(format, end)
if err != nil {
return startTime, endTime, err
}
endTime = parsed
}
return startTime, endTime, nil
}
func deblank(arr []string) (deblanked []string) {
for _, v := range arr {
if v != "" {
deblanked = append(deblanked, v)
}
}
return
}
func sendEmailChangedNotification(ctx context.Context, target, old string) error {
emailHTML, emailText := copy.EmailChangedEmail(target, old)
subject := fmt.Sprintf("%s email changed", config.NAME)
recipients := []string{target, old}
for _, recipient := range recipients {
mail := notifications.Email{
To: recipient,
From: config.SYSTEM_EMAIL,
ReplyTo: config.SUPPORT_EMAIL,
Text: emailText,
HTML: emailHTML,
Subject: subject,
}
task := kewpie.Task{}
if err := task.Marshal(mail); err != nil {
return err
}
if err := config.QUEUE.Publish(ctx, config.SEND_EMAIL_QUEUE_NAME, &task); err != nil {
return err
}
}
return nil
}
func dollarsToCents(in string) (int, error) {
dollars, err := strconv.ParseFloat(in, 64)
return int((dollars * 1000) / 10), err
}
func redirToLogin(w http.ResponseWriter, r *http.Request) {
values := url.Values{
"next": []string{r.URL.String()},
}
http.Redirect(w, r, "/login?"+values.Encode(), 302)
return
}
func nextFlow(defaultURL string, form url.Values) string {
ret, _ := url.Parse(defaultURL)
next := form.Get("next")
if next != "" {
parsed, _ := url.Parse(next)
if parsed.Path != "login" && parsed.Path != "/login" {
ret.Path = parsed.Path
}
for k, v := range parsed.Query() {
q := ret.Query()
q[k] = v
ret.RawQuery = q.Encode()
}
}
if form.Get("flow") != "" {
q := ret.Query()
q.Set("flow", form.Get("flow"))
ret.RawQuery = q.Encode()
}
if form.Get("next_fragment") != "" {
ret.Fragment = form.Get("next_fragment")
}
return ret.String()
}
func isClientSafe(err error) (bool, string) {
type clientSafe interface {
ClientSafeMessage() string
}
cse, ok := err.(clientSafe)
if ok {
return ok, cse.ClientSafeMessage()
} else {
return false, ""
}
}
func isAppAdmin(ctx context.Context) bool {
if !isLoggedIn(ctx) {
return false
}
return ctx.Value("user").(models.User).Admin
}
func userFromContext(ctx context.Context) models.User {
if !isLoggedIn(ctx) {
return models.User{}
}
return ctx.Value("user").(models.User)
}
func orgUserFromContext(ctx context.Context, org models.Organisation) models.OrganisationUser {
if v, ok := ctx.Value("organisation_users").(models.OrganisationUsers); ok {
for _, ou := range v.Data {
if ou.OrganisationID == org.ID {
return ou
}
}
}
return models.OrganisationUser{}
}
func flashesFromContext(ctx context.Context) flashes.Flashes {
if ctx == nil {
return flashes.Flashes{}
}
unconv := ctx.Value("flashes")
unconvUser := ctx.Value("user")
if unconv == nil && unconvUser == nil {
return flashes.Flashes{}
}
f := flashes.Flashes{}
if unconv != nil {
f = unconv.(flashes.Flashes)
}
if unconvUser != nil {
user := unconvUser.(models.User)
for _, flash := range user.Flashes {
if !flash.Sticky {
user.DeleteFlash(ctx, flash)
}
}
f = append(f, user.Flashes...)
}
return f
}
| {
orgs := orgsFromContext(r.Context())
if len(orgs.Data) < 1 {
http.Redirect(w, r, "/create-organisation", http.StatusFound)
return
} else {
query := r.URL.Query()
query.Set("organisationid", orgs.Data[0].ID)
r.URL.RawQuery = query.Encode()
}
http.Redirect(w, r, r.URL.String(), http.StatusFound)
} | identifier_body |
util.go | package routes
import (
"context"
"database/sql"
"doubleboiler/config"
"doubleboiler/copy"
"doubleboiler/flashes"
"doubleboiler/heroicons"
"doubleboiler/logger"
"doubleboiler/models"
"doubleboiler/util"
"errors"
"fmt"
"html/template"
"net/http"
"net/url"
"os"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/davidbanham/english_conjoin"
"github.com/davidbanham/human_duration"
kewpie "github.com/davidbanham/kewpie_go/v3"
"github.com/davidbanham/notifications"
uuid "github.com/satori/go.uuid"
)
// Tmpl exports the compiled templates
var Tmpl *template.Template
func init() {
templateFuncMap := template.FuncMap{
"hash": calcHash,
"despace": func(s string) string {
return strings.Replace(s, " ", "_", -1)
},
"ToLower": strings.ToLower,
"humanTime": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format(time.RFC822)
},
"humanDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("02 Jan 2006")
},
"humanDayDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("Mon 02 Jan 2006")
},
"isoTime": func(t time.Time) string {
return t.Format(time.RFC3339)
},
"stringToTime": func(d string) time.Time {
t, _ := time.Parse(time.RFC3339, d)
return t
},
"weekdayOffset": func(s string) int {
t, _ := time.Parse(time.RFC3339, s)
return int(t.Weekday())
},
"diff": func(a, b int) int {
return a - b
},
"breakMonths": func(nights []string) [][]string {
monthNights := [][]string{}
target := 0
monthNights = append(monthNights, []string{})
for i, n := range nights {
night, _ := time.Parse(time.RFC3339, n)
if i != 0 {
lastNight, _ := time.Parse(time.RFC3339, nights[i-1])
if night.Month() != lastNight.Month() {
monthNights = append(monthNights, []string{})
target += 1
}
}
monthNights[target] = append(monthNights[target], night.Format(time.RFC3339))
}
return monthNights
},
"dollarise": func(in int) string {
return util.Dollarise(in)
},
"dollarise_float": func(in float32) string {
return util.Dollarise(int(in))
},
"dollarise_int64": func(in int64) string {
return util.Dollarise(int(in))
},
"cents_to_dollars_int": func(in int) float64 {
return float64(in) / 100
},
"cents_to_dollars_int64": func(in int64) float64 {
return float64(in) / 100
},
"cents_to_dollars": func(in float32) float32 {
return in / 100
},
"csv": func(in []string) string {
return strings.Join(in, ",")
},
"ssv": func(in []string) string {
return strings.Join(in, "; ")
},
"dateonly": func(in time.Time) string {
return in.Format("2006-01-02")
},
"datetime": func(in time.Time) string {
return in.Format("Mon Jan 2 15:04:05 -0700 MST 2006")
},
"breakLines": func(in string) []string {
return strings.Split(in, "\n")
},
"breakOnAnd": func(in string) []string {
return strings.Split(in, " AND ")
},
"humanDuration": human_duration.String,
"nextPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(dur)
},
"nextPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(dur)
},
"prevPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(-dur)
},
"prevPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(-dur)
},
"contains": func(str []string, target string) bool {
for _, s := range str {
if s == target {
return true
}
}
return false
},
"unix_to_time": func(in int64) time.Time {
return time.Unix(in, 0)
},
"unrealDate": func(d time.Time) bool {
tooLong := time.Date(1950, time.January, 0, 0, 0, 0, 0, time.Local)
tooLate := time.Date(9000, time.January, 0, 0, 0, 0, 0, time.Local)
if d.Before(tooLong) {
return true
}
if d.After(tooLate) {
return true
}
return false
},
"add": func(i, j int) int {
return i + j
},
"firstFiveChars": util.FirstFiveChars,
"toUpper": strings.ToUpper,
"randID": func() string {
return util.FirstFiveChars(uuid.NewV4().String())
},
"auditActions": func(abbrev string) string {
mapping := map[string]string{
"I": "Created",
"U": "Updated",
"D": "Deleted",
"T": "Truncated",
}
return mapping[abbrev]
},
"loggedIn": isLoggedIn,
"userEmail": func(ctx context.Context) string {
return ctx.Value("user").(models.User).Email
},
"user": func(ctx context.Context) models.User {
return ctx.Value("user").(models.User)
},
"orgsFromContext": func(ctx context.Context) models.Organisations {
return orgsFromContext(ctx)
},
"flashes": flashesFromContext,
"activeOrgFromContext": func(ctx context.Context) models.Organisation {
return activeOrgFromContext(ctx)
},
"can": func(ctx context.Context, role string) bool {
org := activeOrgFromContext(ctx)
return can(ctx, org, role)
},
"csrf": func(ctx context.Context) string {
unconv := ctx.Value("user")
if unconv == nil {
return ""
}
user := unconv.(models.User)
return util.CalcToken(user.ID, "")
},
"isAppAdmin": isAppAdmin,
"chrome": func(ctx context.Context) bool {
if ctx == nil {
return true
}
val := ctx.Value("chrome")
if val == nil {
return true
}
return val.(bool)
},
"percentage": func(total, percentage int) int {
return int(float64(total) * float64(percentage) / 100)
},
"percentify": func(in float32) string {
return fmt.Sprintf("%.2f", in) + "%"
},
"thisYear": func() int {
return time.Now().Year()
},
"mod": func(i, j int) bool { return i%j == 0 },
"numDays": func(d time.Duration) int { return int(d / (24 * time.Hour)) },
"isProd": func() bool { return config.STAGE == "production" },
"isLocal": func() bool { return config.LOCAL },
"now": func() string {
return time.Now().Format("2006-01-02")
},
"nextWeekStart": func() string {
return util.NextDay(time.Now(), time.Monday).Format("2006-01-02")
},
"conjoinAnd": func(in []string) string {
return english_conjoin.ConjoinAnd(in)
},
"logoLink": func(ctx context.Context) string {
if !isLoggedIn(ctx) {
return "/"
}
unconv := ctx.Value("url")
if unconv == nil {
return "/"
}
url := unconv.(*url.URL)
if strings.Contains(url.Path, "/welcome") {
return "/"
}
return "/welcome"
},
"dict": func(values ...interface{}) (map[string]interface{}, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
dict := make(map[string]interface{}, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, ok := values[i].(string)
if !ok {
return nil, errors.New("dict keys must be strings")
}
dict[key] = values[i+1]
}
return dict, nil
},
"crumbs": func(values ...string) ([]Crumb, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
crumbs := []Crumb{}
for i := 0; i < len(values); i += 2 {
crumbs = append(crumbs, Crumb{
Title: values[i],
Path: values[i+1],
})
}
return crumbs, nil
},
"noescape": func(str string) template.HTML {
return template.HTML(str)
},
"urlescape": func(input string) string {
return url.QueryEscape(input)
},
"heroIcon": func(name string) string {
return heroicons.Icons[name]
},
"uniq": func() string {
return uuid.NewV4().String()
},
"queryString": func(vals url.Values) template.URL {
return "?" + template.URL(vals.Encode())
},
"searchableEntities": func(ctx context.Context) []models.Searchable {
ret := []models.Searchable{}
org := activeOrgFromContext(ctx)
for _, entity := range models.Searchables {
if can(ctx, org, entity.RequiredRole.Name) {
ret = append(ret, entity)
}
}
return ret
},
"isOrgSettingsPage": func(ctx context.Context) bool {
currentURL := ctx.Value("url")
if v, ok := currentURL.(*url.URL); ok {
parts := strings.Split(v.Path, "/")
if len(parts) > 1 {
if parts[1] == "organisations" {
return true
}
}
}
return false
},
"selectorSafe": func(in string) string {
return strings.ReplaceAll(in, ".", "-")
},
}
Tmpl = template.Must(template.New("main").Funcs(templateFuncMap).ParseGlob(getPath() + "/*"))
}
func getPath() string {
if _, err := os.Open("../views"); err == nil {
return "../views/"
}
if _, err := os.Open("./views"); err == nil {
return "./views/"
}
if _, err := os.Open("../../../views"); err == nil {
return "../../../views"
}
if _, err := os.Open("../../views"); err == nil {
return "../../views"
}
return ""
}
func checkFormInput(required []string, form url.Values, w http.ResponseWriter, r *http.Request) bool {
for _, val := range required {
if len(form[val]) < 1 {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
if form[val][0] == "" {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
}
return true
}
type errorPageData struct {
basePageData
Message string
Context context.Context
}
type Crumb struct {
Title string
Path string
}
func errRes(w http.ResponseWriter, r *http.Request, code int, message string, err error) {
sendErr := func() {
if err != nil && err.Error() == "http2: stream closed" {
return
}
reportableErr := err
if err == nil {
reportableErr = errors.New(strconv.Itoa(code) + " " + message)
}
if err != nil {
config.ReportError(reportableErr)
}
if clientSafe, addendum := isClientSafe(err); clientSafe {
message += " " + addendum
}
logger.Log(r.Context(), logger.Warning, fmt.Sprintf("Sending Error Response: %+v, %+v, %+v, %+v", code, message, r.URL.String(), err))
if code == 500 {
logger.Log(r.Context(), logger.Error, err)
logger.Log(r.Context(), logger.Debug, string(debug.Stack()))
}
w.WriteHeader(code)
if r.Header.Get("Accept") == "application/json" {
w.Write([]byte(fmt.Sprintf(`{"error": "%s"}`, message)))
return
}
ohshit := Tmpl.ExecuteTemplate(w, "error.html", errorPageData{
Message: message,
Context: r.Context(),
})
if ohshit != nil {
w.Write([]byte("Error rendering the error template. Oh dear."))
return
}
}
tx := r.Context().Value("tx")
switch v := tx.(type) {
case *sql.Tx:
rollbackErr := v.Rollback()
if rollbackErr != nil {
logger.Log(r.Context(), logger.Error, fmt.Sprintf("Error rolling back tx: %+v", rollbackErr))
}
default:
//fmt.Printf("DEBUG no transaction on error\n")
}
sendErr()
}
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}
func redirToDefaultOrg(w http.ResponseWriter, r *http.Request) {
orgs := orgsFromContext(r.Context())
if len(orgs.Data) < 1 {
http.Redirect(w, r, "/create-organisation", http.StatusFound)
return
} else {
query := r.URL.Query()
query.Set("organisationid", orgs.Data[0].ID)
r.URL.RawQuery = query.Encode()
}
http.Redirect(w, r, r.URL.String(), http.StatusFound)
}
func parseFormDate(input string) (time.Time, error) {
return time.Parse("2006-01-02", input)
}
func defaultedDatesFromQueryString(query url.Values, numDaysFromNowDefault int, weekBoundary bool) (startTime, endTime time.Time, err error) {
start := query.Get("start")
end := query.Get("end")
format := "2006-01-02"
begin := time.Now()
if weekBoundary {
begin = util.NextDay(begin, time.Sunday)
}
now := begin.Format(format)
then := begin.Add(24 * time.Duration(numDaysFromNowDefault) * time.Hour).Format(format)
startTime, _ = time.Parse(format, now)
endTime, _ = time.Parse(format, then)
if start != "" {
parsed, err := time.Parse(format, start)
if err != nil {
return startTime, endTime, err
}
startTime = parsed
}
if end != "" {
parsed, err := time.Parse(format, end)
if err != nil {
return startTime, endTime, err
}
endTime = parsed
}
return startTime, endTime, nil
}
func deblank(arr []string) (deblanked []string) {
for _, v := range arr {
if v != "" {
deblanked = append(deblanked, v)
}
}
return
}
func sendEmailChangedNotification(ctx context.Context, target, old string) error {
emailHTML, emailText := copy.EmailChangedEmail(target, old)
subject := fmt.Sprintf("%s email changed", config.NAME)
recipients := []string{target, old}
for _, recipient := range recipients {
mail := notifications.Email{
To: recipient,
From: config.SYSTEM_EMAIL,
ReplyTo: config.SUPPORT_EMAIL,
Text: emailText,
HTML: emailHTML,
Subject: subject,
}
task := kewpie.Task{}
if err := task.Marshal(mail); err != nil {
return err
}
if err := config.QUEUE.Publish(ctx, config.SEND_EMAIL_QUEUE_NAME, &task); err != nil {
return err
}
}
return nil
}
func dollarsToCents(in string) (int, error) {
dollars, err := strconv.ParseFloat(in, 64)
return int((dollars * 1000) / 10), err
}
func | (w http.ResponseWriter, r *http.Request) {
values := url.Values{
"next": []string{r.URL.String()},
}
http.Redirect(w, r, "/login?"+values.Encode(), 302)
return
}
func nextFlow(defaultURL string, form url.Values) string {
ret, _ := url.Parse(defaultURL)
next := form.Get("next")
if next != "" {
parsed, _ := url.Parse(next)
if parsed.Path != "login" && parsed.Path != "/login" {
ret.Path = parsed.Path
}
for k, v := range parsed.Query() {
q := ret.Query()
q[k] = v
ret.RawQuery = q.Encode()
}
}
if form.Get("flow") != "" {
q := ret.Query()
q.Set("flow", form.Get("flow"))
ret.RawQuery = q.Encode()
}
if form.Get("next_fragment") != "" {
ret.Fragment = form.Get("next_fragment")
}
return ret.String()
}
func isClientSafe(err error) (bool, string) {
type clientSafe interface {
ClientSafeMessage() string
}
cse, ok := err.(clientSafe)
if ok {
return ok, cse.ClientSafeMessage()
} else {
return false, ""
}
}
func isAppAdmin(ctx context.Context) bool {
if !isLoggedIn(ctx) {
return false
}
return ctx.Value("user").(models.User).Admin
}
func userFromContext(ctx context.Context) models.User {
if !isLoggedIn(ctx) {
return models.User{}
}
return ctx.Value("user").(models.User)
}
func orgUserFromContext(ctx context.Context, org models.Organisation) models.OrganisationUser {
if v, ok := ctx.Value("organisation_users").(models.OrganisationUsers); ok {
for _, ou := range v.Data {
if ou.OrganisationID == org.ID {
return ou
}
}
}
return models.OrganisationUser{}
}
func flashesFromContext(ctx context.Context) flashes.Flashes {
if ctx == nil {
return flashes.Flashes{}
}
unconv := ctx.Value("flashes")
unconvUser := ctx.Value("user")
if unconv == nil && unconvUser == nil {
return flashes.Flashes{}
}
f := flashes.Flashes{}
if unconv != nil {
f = unconv.(flashes.Flashes)
}
if unconvUser != nil {
user := unconvUser.(models.User)
for _, flash := range user.Flashes {
if !flash.Sticky {
user.DeleteFlash(ctx, flash)
}
}
f = append(f, user.Flashes...)
}
return f
}
| redirToLogin | identifier_name |
util.go | package routes
import (
"context"
"database/sql"
"doubleboiler/config"
"doubleboiler/copy"
"doubleboiler/flashes"
"doubleboiler/heroicons"
"doubleboiler/logger"
"doubleboiler/models"
"doubleboiler/util"
"errors"
"fmt"
"html/template"
"net/http"
"net/url"
"os"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/davidbanham/english_conjoin"
"github.com/davidbanham/human_duration"
kewpie "github.com/davidbanham/kewpie_go/v3"
"github.com/davidbanham/notifications"
uuid "github.com/satori/go.uuid"
)
// Tmpl exports the compiled templates
var Tmpl *template.Template
func init() {
templateFuncMap := template.FuncMap{
"hash": calcHash,
"despace": func(s string) string {
return strings.Replace(s, " ", "_", -1)
},
"ToLower": strings.ToLower,
"humanTime": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format(time.RFC822)
},
"humanDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("02 Jan 2006")
},
"humanDayDate": func(t time.Time) string {
loc, err := time.LoadLocation("Australia/Sydney")
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
return t.In(loc).Format("Mon 02 Jan 2006")
},
"isoTime": func(t time.Time) string {
return t.Format(time.RFC3339)
},
"stringToTime": func(d string) time.Time {
t, _ := time.Parse(time.RFC3339, d)
return t
},
"weekdayOffset": func(s string) int {
t, _ := time.Parse(time.RFC3339, s)
return int(t.Weekday())
},
"diff": func(a, b int) int {
return a - b
},
"breakMonths": func(nights []string) [][]string {
monthNights := [][]string{}
target := 0
monthNights = append(monthNights, []string{})
for i, n := range nights {
night, _ := time.Parse(time.RFC3339, n)
if i != 0 {
lastNight, _ := time.Parse(time.RFC3339, nights[i-1])
if night.Month() != lastNight.Month() {
monthNights = append(monthNights, []string{})
target += 1
}
}
monthNights[target] = append(monthNights[target], night.Format(time.RFC3339))
}
return monthNights
},
"dollarise": func(in int) string {
return util.Dollarise(in)
},
"dollarise_float": func(in float32) string {
return util.Dollarise(int(in))
},
"dollarise_int64": func(in int64) string {
return util.Dollarise(int(in))
},
"cents_to_dollars_int": func(in int) float64 {
return float64(in) / 100
},
"cents_to_dollars_int64": func(in int64) float64 {
return float64(in) / 100
},
"cents_to_dollars": func(in float32) float32 {
return in / 100
},
"csv": func(in []string) string {
return strings.Join(in, ",")
},
"ssv": func(in []string) string {
return strings.Join(in, "; ")
},
"dateonly": func(in time.Time) string {
return in.Format("2006-01-02")
},
"datetime": func(in time.Time) string {
return in.Format("Mon Jan 2 15:04:05 -0700 MST 2006")
},
"breakLines": func(in string) []string {
return strings.Split(in, "\n")
},
"breakOnAnd": func(in string) []string {
return strings.Split(in, " AND ")
},
"humanDuration": human_duration.String,
"nextPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(dur)
},
"nextPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(dur)
},
"prevPeriodStart": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return start.Add(-dur)
},
"prevPeriodEnd": func(start, end time.Time) time.Time {
dur := end.Sub(start) + (24 * time.Hour)
return end.Add(-dur)
},
"contains": func(str []string, target string) bool {
for _, s := range str {
if s == target {
return true
}
}
return false
},
"unix_to_time": func(in int64) time.Time {
return time.Unix(in, 0)
},
"unrealDate": func(d time.Time) bool {
tooLong := time.Date(1950, time.January, 0, 0, 0, 0, 0, time.Local)
tooLate := time.Date(9000, time.January, 0, 0, 0, 0, 0, time.Local)
if d.Before(tooLong) {
return true
}
if d.After(tooLate) {
return true
}
return false
},
"add": func(i, j int) int {
return i + j
},
"firstFiveChars": util.FirstFiveChars,
"toUpper": strings.ToUpper,
"randID": func() string {
return util.FirstFiveChars(uuid.NewV4().String())
},
"auditActions": func(abbrev string) string {
mapping := map[string]string{
"I": "Created",
"U": "Updated",
"D": "Deleted",
"T": "Truncated",
}
return mapping[abbrev]
},
"loggedIn": isLoggedIn,
"userEmail": func(ctx context.Context) string {
return ctx.Value("user").(models.User).Email
},
"user": func(ctx context.Context) models.User {
return ctx.Value("user").(models.User)
},
"orgsFromContext": func(ctx context.Context) models.Organisations {
return orgsFromContext(ctx)
},
"flashes": flashesFromContext,
"activeOrgFromContext": func(ctx context.Context) models.Organisation {
return activeOrgFromContext(ctx)
},
"can": func(ctx context.Context, role string) bool {
org := activeOrgFromContext(ctx)
return can(ctx, org, role)
},
"csrf": func(ctx context.Context) string {
unconv := ctx.Value("user")
if unconv == nil {
return ""
}
user := unconv.(models.User)
return util.CalcToken(user.ID, "")
},
"isAppAdmin": isAppAdmin,
"chrome": func(ctx context.Context) bool {
if ctx == nil {
return true
}
val := ctx.Value("chrome")
if val == nil {
return true
}
return val.(bool)
},
"percentage": func(total, percentage int) int {
return int(float64(total) * float64(percentage) / 100)
},
"percentify": func(in float32) string {
return fmt.Sprintf("%.2f", in) + "%"
},
"thisYear": func() int {
return time.Now().Year()
},
"mod": func(i, j int) bool { return i%j == 0 },
"numDays": func(d time.Duration) int { return int(d / (24 * time.Hour)) },
"isProd": func() bool { return config.STAGE == "production" },
"isLocal": func() bool { return config.LOCAL },
"now": func() string {
return time.Now().Format("2006-01-02")
},
"nextWeekStart": func() string {
return util.NextDay(time.Now(), time.Monday).Format("2006-01-02")
},
"conjoinAnd": func(in []string) string {
return english_conjoin.ConjoinAnd(in)
},
"logoLink": func(ctx context.Context) string {
if !isLoggedIn(ctx) {
return "/"
}
unconv := ctx.Value("url")
if unconv == nil {
return "/"
}
url := unconv.(*url.URL)
if strings.Contains(url.Path, "/welcome") {
return "/"
}
return "/welcome"
},
"dict": func(values ...interface{}) (map[string]interface{}, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
dict := make(map[string]interface{}, len(values)/2)
for i := 0; i < len(values); i += 2 {
key, ok := values[i].(string)
if !ok {
return nil, errors.New("dict keys must be strings")
}
dict[key] = values[i+1]
}
return dict, nil
},
"crumbs": func(values ...string) ([]Crumb, error) {
if len(values)%2 != 0 {
return nil, errors.New("invalid dict call")
}
crumbs := []Crumb{}
for i := 0; i < len(values); i += 2 {
crumbs = append(crumbs, Crumb{
Title: values[i],
Path: values[i+1],
})
}
return crumbs, nil
},
"noescape": func(str string) template.HTML {
return template.HTML(str)
},
"urlescape": func(input string) string {
return url.QueryEscape(input)
},
"heroIcon": func(name string) string {
return heroicons.Icons[name]
},
"uniq": func() string {
return uuid.NewV4().String()
},
"queryString": func(vals url.Values) template.URL {
return "?" + template.URL(vals.Encode())
},
"searchableEntities": func(ctx context.Context) []models.Searchable {
ret := []models.Searchable{}
org := activeOrgFromContext(ctx)
for _, entity := range models.Searchables {
if can(ctx, org, entity.RequiredRole.Name) {
ret = append(ret, entity)
}
}
return ret
},
"isOrgSettingsPage": func(ctx context.Context) bool {
currentURL := ctx.Value("url")
if v, ok := currentURL.(*url.URL); ok {
parts := strings.Split(v.Path, "/")
if len(parts) > 1 {
if parts[1] == "organisations" {
return true
}
}
}
return false
},
"selectorSafe": func(in string) string {
return strings.ReplaceAll(in, ".", "-")
},
}
Tmpl = template.Must(template.New("main").Funcs(templateFuncMap).ParseGlob(getPath() + "/*"))
}
func getPath() string {
if _, err := os.Open("../views"); err == nil {
return "../views/"
}
if _, err := os.Open("./views"); err == nil {
return "./views/"
}
if _, err := os.Open("../../../views"); err == nil {
return "../../../views"
}
if _, err := os.Open("../../views"); err == nil {
return "../../views"
}
return ""
}
func checkFormInput(required []string, form url.Values, w http.ResponseWriter, r *http.Request) bool {
for _, val := range required {
if len(form[val]) < 1 {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
if form[val][0] == "" {
errRes(w, r, 400, "Invalid "+val, nil)
return false
}
}
return true
}
type errorPageData struct {
basePageData
Message string
Context context.Context
}
type Crumb struct {
Title string
Path string
}
func errRes(w http.ResponseWriter, r *http.Request, code int, message string, err error) {
sendErr := func() {
if err != nil && err.Error() == "http2: stream closed" {
return
}
reportableErr := err
if err == nil {
reportableErr = errors.New(strconv.Itoa(code) + " " + message)
}
if err != nil {
config.ReportError(reportableErr)
}
if clientSafe, addendum := isClientSafe(err); clientSafe {
message += " " + addendum
}
logger.Log(r.Context(), logger.Warning, fmt.Sprintf("Sending Error Response: %+v, %+v, %+v, %+v", code, message, r.URL.String(), err))
if code == 500 {
logger.Log(r.Context(), logger.Error, err)
logger.Log(r.Context(), logger.Debug, string(debug.Stack()))
}
w.WriteHeader(code)
if r.Header.Get("Accept") == "application/json" {
w.Write([]byte(fmt.Sprintf(`{"error": "%s"}`, message)))
return
}
ohshit := Tmpl.ExecuteTemplate(w, "error.html", errorPageData{
Message: message,
Context: r.Context(),
})
if ohshit != nil {
w.Write([]byte("Error rendering the error template. Oh dear."))
return
}
}
tx := r.Context().Value("tx")
switch v := tx.(type) {
case *sql.Tx:
rollbackErr := v.Rollback()
if rollbackErr != nil {
logger.Log(r.Context(), logger.Error, fmt.Sprintf("Error rolling back tx: %+v", rollbackErr))
}
default:
//fmt.Printf("DEBUG no transaction on error\n")
}
sendErr()
}
func shortDur(d time.Duration) string {
s := d.String()
if strings.HasSuffix(s, "m0s") {
s = s[:len(s)-2]
}
if strings.HasSuffix(s, "h0m") {
s = s[:len(s)-2]
}
return s
}
func redirToDefaultOrg(w http.ResponseWriter, r *http.Request) {
orgs := orgsFromContext(r.Context())
if len(orgs.Data) < 1 {
http.Redirect(w, r, "/create-organisation", http.StatusFound)
return
} else {
query := r.URL.Query()
query.Set("organisationid", orgs.Data[0].ID)
r.URL.RawQuery = query.Encode()
}
http.Redirect(w, r, r.URL.String(), http.StatusFound)
}
func parseFormDate(input string) (time.Time, error) {
return time.Parse("2006-01-02", input)
}
func defaultedDatesFromQueryString(query url.Values, numDaysFromNowDefault int, weekBoundary bool) (startTime, endTime time.Time, err error) {
start := query.Get("start")
end := query.Get("end")
format := "2006-01-02"
begin := time.Now()
if weekBoundary {
begin = util.NextDay(begin, time.Sunday)
}
now := begin.Format(format)
then := begin.Add(24 * time.Duration(numDaysFromNowDefault) * time.Hour).Format(format)
startTime, _ = time.Parse(format, now)
endTime, _ = time.Parse(format, then)
if start != "" {
parsed, err := time.Parse(format, start)
if err != nil {
return startTime, endTime, err
}
startTime = parsed
}
if end != "" {
parsed, err := time.Parse(format, end)
if err != nil {
return startTime, endTime, err
}
endTime = parsed
}
return startTime, endTime, nil
}
func deblank(arr []string) (deblanked []string) {
for _, v := range arr {
if v != "" {
deblanked = append(deblanked, v)
}
}
return
}
func sendEmailChangedNotification(ctx context.Context, target, old string) error {
emailHTML, emailText := copy.EmailChangedEmail(target, old)
subject := fmt.Sprintf("%s email changed", config.NAME)
recipients := []string{target, old}
for _, recipient := range recipients {
mail := notifications.Email{
To: recipient,
From: config.SYSTEM_EMAIL,
ReplyTo: config.SUPPORT_EMAIL,
Text: emailText,
HTML: emailHTML,
Subject: subject,
}
task := kewpie.Task{}
if err := task.Marshal(mail); err != nil {
return err
}
if err := config.QUEUE.Publish(ctx, config.SEND_EMAIL_QUEUE_NAME, &task); err != nil {
return err
}
}
return nil
}
func dollarsToCents(in string) (int, error) {
dollars, err := strconv.ParseFloat(in, 64)
return int((dollars * 1000) / 10), err
}
func redirToLogin(w http.ResponseWriter, r *http.Request) {
values := url.Values{
"next": []string{r.URL.String()},
}
http.Redirect(w, r, "/login?"+values.Encode(), 302)
return
}
func nextFlow(defaultURL string, form url.Values) string {
ret, _ := url.Parse(defaultURL)
next := form.Get("next")
if next != "" {
parsed, _ := url.Parse(next)
if parsed.Path != "login" && parsed.Path != "/login" {
ret.Path = parsed.Path
}
for k, v := range parsed.Query() {
q := ret.Query()
q[k] = v
ret.RawQuery = q.Encode()
}
}
if form.Get("flow") != "" {
q := ret.Query()
q.Set("flow", form.Get("flow"))
ret.RawQuery = q.Encode()
}
if form.Get("next_fragment") != "" {
ret.Fragment = form.Get("next_fragment")
}
return ret.String()
}
func isClientSafe(err error) (bool, string) {
type clientSafe interface {
ClientSafeMessage() string
}
cse, ok := err.(clientSafe)
if ok {
return ok, cse.ClientSafeMessage()
} else {
return false, ""
}
}
func isAppAdmin(ctx context.Context) bool {
if !isLoggedIn(ctx) {
return false
}
return ctx.Value("user").(models.User).Admin
}
func userFromContext(ctx context.Context) models.User {
if !isLoggedIn(ctx) {
return models.User{}
}
return ctx.Value("user").(models.User)
}
func orgUserFromContext(ctx context.Context, org models.Organisation) models.OrganisationUser {
if v, ok := ctx.Value("organisation_users").(models.OrganisationUsers); ok {
for _, ou := range v.Data {
if ou.OrganisationID == org.ID {
return ou
}
}
}
return models.OrganisationUser{}
}
func flashesFromContext(ctx context.Context) flashes.Flashes {
if ctx == nil {
return flashes.Flashes{}
}
unconv := ctx.Value("flashes")
unconvUser := ctx.Value("user")
if unconv == nil && unconvUser == nil {
return flashes.Flashes{}
}
f := flashes.Flashes{}
if unconv != nil {
f = unconv.(flashes.Flashes)
}
if unconvUser != nil {
user := unconvUser.(models.User)
for _, flash := range user.Flashes {
if !flash.Sticky |
}
f = append(f, user.Flashes...)
}
return f
}
| {
user.DeleteFlash(ctx, flash)
} | conditional_block |
docker-build-server.go | package main
import (
"fmt"
"net/http"
"os"
"bytes"
"strings"
"time"
"io/ioutil"
"github.com/dimiro1/banner"
"flag"
"gopkg.in/src-d/go-git.v4"
. "gopkg.in/src-d/go-git.v4/_examples"
"gopkg.in/src-d/go-git.v4/plumbing"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"path"
"path/filepath"
"io"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
"bufio"
"math/rand"
"log"
"github.com/DataDog/datadog-go/statsd"
)
type GithubUrl struct {
GithubDomain string
GithubRepo string
}
func main() {
isEnabled := true
isColorEnabled := true
fmt.Println("Started Confluent Docker Build Server ...")
// print bob ascii
bob_bytes, err := ioutil.ReadFile("resources/bob.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
bob_str := string(bob_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(bob_str))
fmt.Println("")
fmt.Println("Confluent Docker Build Server started ... accepting requests")
// print bob the cat
cat_bytes, err := ioutil.ReadFile("resources/cat.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
cat_str := string(cat_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(cat_str))
http.HandleFunc("/", DockerConfluentBuildServer)
http.ListenAndServe(":8080", nil)
}
func DockerConfluentBuildServer(w http.ResponseWriter, r *http.Request) {
url_param := r.URL.Path[1:]
if !strings.Contains(url_param, "github.com") {
fmt.Println("Invalid Github Repo!")
return
}
fmt.Println("Lightning fast docker build for ", url_param)
// v2/github.com/mohnishbasha/hello-world/manifests/latest
if strings.Contains(url_param, "v2") {
url_param_1 := strings.ReplaceAll(url_param, "v2/", "")
url_param_2 := strings.ReplaceAll(url_param_1, "/manifests/latest", "")
Info(url_param_1)
Info(url_param_2)
url_param = url_param_2
}
statsd, err := statsd.New("127.0.0.1:8125")
if err != nil {
log.Fatal(err)
}
clone_n_dockerbuild(url_param, statsd)
}
func clone_n_dockerbuild(git_url string, statsd *statsd.Client) (types.ImageBuildResponse) {
domain_name := "bob.run"
git_parts := strings.SplitN(git_url, "/", -1)
fmt.Printf("\nSlice 1: %s", git_parts)
dockerImageTag := domain_name + "/" + git_url
fmt.Printf("DockerImageTag: %s", dockerImageTag)
cloneDirStr := fmt.Sprintf("%s%d%s", "clone-dir/",rand.Int(),git_url);
cloneUrlStr := fmt.Sprintf("%s%d%s", "clone-url/",rand.Int(),git_url);
shaStr := fmt.Sprintf("%s%d%s", "sha/",rand.Int(),git_url);
cloneDirPtr := flag.String(cloneDirStr, "clone-dir/" + git_url, "Directory to clone")
cloneUrlPtr := flag.String(cloneUrlStr, "https://" + git_url, "URL to clone")
shaPtr := flag.String(shaStr, "", "sha to clone")
flag.Parse()
cloneOptions := git.CloneOptions{
URL: *cloneUrlPtr,
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
SingleBranch: true,
Progress: os.Stdout,
Tags: git.NoTags,
}
repo, err := git.PlainClone(*cloneDirPtr, false, &cloneOptions)
if err != nil {
os.RemoveAll("clone-dir")
}
CheckIfError(err)
reference, err := repo.Head()
CheckIfError(err)
Info("Cloned! Head at %s", reference)
workTree, err := repo.Worktree()
CheckIfError(err)
err = workTree.Reset(&git.ResetOptions{
Commit: plumbing.NewHash(*shaPtr),
Mode: git.HardReset,
})
CheckIfError(err)
Info("Hard reseted to %s", *shaPtr)
status, err := workTree.Status()
CheckIfError(err)
Info("Status after reset: %s", status)
repo.Storer.Index()
srcPath, err1 := filepath.Abs("clone-dir/" + git_url)
if err1 != nil {
fmt.Errorf("error1: '%s'", err1.Error())
}
// dockerfilePath, err2 := filepath.Abs("clone-dir/" + git_url + "/Dockerfile")
// if err2 != nil {
// fmt.Errorf("error2: '%s'", err2.Error())
// }
dockerfilePath := "Dockerfile"
fmt.Printf("srcPath=%q, dockerfilePath=%q\n", srcPath, dockerfilePath)
Info("Tar file paths: %s %s", srcPath, dockerfilePath)
defer timeTrack(time.Now(), "dockerbuild-time", statsd, dockerImageTag)
tarReader, err := CreateTarStream(srcPath, dockerfilePath)
if err != nil {
fmt.Errorf("error creating docker tar stream: '%s'", err.Error())
}
Info("Created tar stream ....")
os.Setenv("DOCKER_BUILDKIT", "1")
// initialize docker client & background context
c := ensureDockerClient()
netCtx := context.Background()
Info("dockerfilepath: '%s'", dockerfilePath)
Info("dockerImageTag: '%s'", dockerImageTag)
// set build options for docker build
opts := types.ImageBuildOptions{
Tags: []string{dockerImageTag + ":latest"},
Dockerfile: dockerfilePath,
}
// invoke docker build
buildResp, err := c.ImageBuild(netCtx,
tarReader, opts)
if err != nil {
fmt.Errorf("error creating docker build image: '%s'", err.Error())
}
fmt.Printf("OSType=%q\n", buildResp.OSType)
// docker image details
bodyReader := bufio.NewReader(buildResp.Body)
for {
line, _, err := bodyReader.ReadLine()
fmt.Printf("build: %q\n", string(line))
if err == io.EOF {
break
} else if err != nil {
fmt.Errorf("error read docker build image: '%s'", err.Error())
}
}
fmt.Println("Image available: ", dockerImageTag)
os.RemoveAll(srcPath)
response, err := ioutil.ReadAll(buildResp.Body)
if err != nil {
fmt.Errorf(err.Error())
}
buildResp.Body.Close()
if string(response) != "body" {
fmt.Errorf("expected Body to contain 'body' string, got %s", response)
}
return buildResp
}
// generate a tar stream for ImageBuild API
// https://docs.docker.com/engine/api/v1.40/#operation/ImageBuild
func CreateTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
excludes, err := parseDockerIgnore(srcPath)
if err != nil {
return nil, err
}
//excludes := []string{"."}
includes := []string{"."}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
//
// https://github.com/docker/docker/issues/8330
//
forceIncludeFiles := []string{".dockerignore", dockerfilePath}
for _, includeFile := range forceIncludeFiles {
if includeFile == "" {
continue
}
keepThem, err := fileutils.Matches(includeFile, excludes)
if err != nil {
return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
}
if keepThem {
Info(includeFile)
includes = append(includes, includeFile)
}
}
if err := validateDockerContextDirectory(srcPath, excludes); err != nil {
return nil, err
}
tarOpts := &archive.TarOptions{
ExcludePatterns: excludes,
IncludeFiles: includes,
Compression: archive.Uncompressed,
NoLchown: true,
}
return archive.TarWithOptions(srcPath, tarOpts)
}
// validateContextDirectory checks if all the contents of the directory
// can be read and returns an error if some files can't be read.
// Symlinks which point to non-existing files don't trigger an error
func validateDockerContextDirectory(srcPath string, excludes []string) error {
return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
// skip this directory/file if it's not in the path, it won't get added to the context
if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
return err
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
return err
} else if skip {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
if err != nil {
if os.IsPermission(err) {
return fmt.Errorf("can't stat '%s'", filePath)
}
if os.IsNotExist(err) {
return nil
}
return err
}
// skip checking if symlinks point to non-existing files, such symlinks can be useful
// also skip named pipes, because they hanging on open
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
return nil
}
if !f.IsDir() |
return nil
})
}
func parseDockerIgnore(root string) ([]string, error) {
var excludes []string
ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
if err != nil && !os.IsNotExist(err) {
return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
}
excludes = strings.Split(string(ignore), "\n")
return excludes, nil
}
func ensureDockerClient() *client.Client {
c, err := client.NewEnvClient()
if err != nil {
fmt.Errorf("DOCKER_HOST not set?: %v", err)
}
return c
}
func timeTrack(start time.Time, name string, statsd *statsd.Client, dockerImageTag string) {
elapsed := time.Since(start)
Info("%s took %s", name, elapsed)
//statsd, err := statsd.New("127.0.0.1:8125")
//if err != nil {
// log.Fatal(err)
//}
statsd.Gauge("btc-dockerbuild." + name, float64(elapsed.Milliseconds()), []string{"Owner:tools","role:dockerbuildserver-hack","environment:dev","imageTag:"+dockerImageTag,"buildkit:true"}, 1)
time.Sleep(1 * time.Second)
}
| {
currentFile, err := os.Open(filePath)
if err != nil && os.IsPermission(err) {
return fmt.Errorf("no permission to read from '%s'", filePath)
}
currentFile.Close()
} | conditional_block |
docker-build-server.go | package main
import (
"fmt"
"net/http"
"os"
"bytes"
"strings"
"time"
"io/ioutil"
"github.com/dimiro1/banner"
"flag"
"gopkg.in/src-d/go-git.v4"
. "gopkg.in/src-d/go-git.v4/_examples"
"gopkg.in/src-d/go-git.v4/plumbing"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"path"
"path/filepath"
"io"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
"bufio"
"math/rand"
"log"
"github.com/DataDog/datadog-go/statsd"
)
type GithubUrl struct {
GithubDomain string
GithubRepo string
}
func main() {
isEnabled := true
isColorEnabled := true
fmt.Println("Started Confluent Docker Build Server ...")
// print bob ascii
bob_bytes, err := ioutil.ReadFile("resources/bob.txt") // just pass the file name
if err != nil { | bob_str := string(bob_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(bob_str))
fmt.Println("")
fmt.Println("Confluent Docker Build Server started ... accepting requests")
// print bob the cat
cat_bytes, err := ioutil.ReadFile("resources/cat.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
cat_str := string(cat_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(cat_str))
http.HandleFunc("/", DockerConfluentBuildServer)
http.ListenAndServe(":8080", nil)
}
func DockerConfluentBuildServer(w http.ResponseWriter, r *http.Request) {
url_param := r.URL.Path[1:]
if !strings.Contains(url_param, "github.com") {
fmt.Println("Invalid Github Repo!")
return
}
fmt.Println("Lightning fast docker build for ", url_param)
// v2/github.com/mohnishbasha/hello-world/manifests/latest
if strings.Contains(url_param, "v2") {
url_param_1 := strings.ReplaceAll(url_param, "v2/", "")
url_param_2 := strings.ReplaceAll(url_param_1, "/manifests/latest", "")
Info(url_param_1)
Info(url_param_2)
url_param = url_param_2
}
statsd, err := statsd.New("127.0.0.1:8125")
if err != nil {
log.Fatal(err)
}
clone_n_dockerbuild(url_param, statsd)
}
func clone_n_dockerbuild(git_url string, statsd *statsd.Client) (types.ImageBuildResponse) {
domain_name := "bob.run"
git_parts := strings.SplitN(git_url, "/", -1)
fmt.Printf("\nSlice 1: %s", git_parts)
dockerImageTag := domain_name + "/" + git_url
fmt.Printf("DockerImageTag: %s", dockerImageTag)
cloneDirStr := fmt.Sprintf("%s%d%s", "clone-dir/",rand.Int(),git_url);
cloneUrlStr := fmt.Sprintf("%s%d%s", "clone-url/",rand.Int(),git_url);
shaStr := fmt.Sprintf("%s%d%s", "sha/",rand.Int(),git_url);
cloneDirPtr := flag.String(cloneDirStr, "clone-dir/" + git_url, "Directory to clone")
cloneUrlPtr := flag.String(cloneUrlStr, "https://" + git_url, "URL to clone")
shaPtr := flag.String(shaStr, "", "sha to clone")
flag.Parse()
cloneOptions := git.CloneOptions{
URL: *cloneUrlPtr,
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
SingleBranch: true,
Progress: os.Stdout,
Tags: git.NoTags,
}
repo, err := git.PlainClone(*cloneDirPtr, false, &cloneOptions)
if err != nil {
os.RemoveAll("clone-dir")
}
CheckIfError(err)
reference, err := repo.Head()
CheckIfError(err)
Info("Cloned! Head at %s", reference)
workTree, err := repo.Worktree()
CheckIfError(err)
err = workTree.Reset(&git.ResetOptions{
Commit: plumbing.NewHash(*shaPtr),
Mode: git.HardReset,
})
CheckIfError(err)
Info("Hard reseted to %s", *shaPtr)
status, err := workTree.Status()
CheckIfError(err)
Info("Status after reset: %s", status)
repo.Storer.Index()
srcPath, err1 := filepath.Abs("clone-dir/" + git_url)
if err1 != nil {
fmt.Errorf("error1: '%s'", err1.Error())
}
// dockerfilePath, err2 := filepath.Abs("clone-dir/" + git_url + "/Dockerfile")
// if err2 != nil {
// fmt.Errorf("error2: '%s'", err2.Error())
// }
dockerfilePath := "Dockerfile"
fmt.Printf("srcPath=%q, dockerfilePath=%q\n", srcPath, dockerfilePath)
Info("Tar file paths: %s %s", srcPath, dockerfilePath)
defer timeTrack(time.Now(), "dockerbuild-time", statsd, dockerImageTag)
tarReader, err := CreateTarStream(srcPath, dockerfilePath)
if err != nil {
fmt.Errorf("error creating docker tar stream: '%s'", err.Error())
}
Info("Created tar stream ....")
os.Setenv("DOCKER_BUILDKIT", "1")
// initialize docker client & background context
c := ensureDockerClient()
netCtx := context.Background()
Info("dockerfilepath: '%s'", dockerfilePath)
Info("dockerImageTag: '%s'", dockerImageTag)
// set build options for docker build
opts := types.ImageBuildOptions{
Tags: []string{dockerImageTag + ":latest"},
Dockerfile: dockerfilePath,
}
// invoke docker build
buildResp, err := c.ImageBuild(netCtx,
tarReader, opts)
if err != nil {
fmt.Errorf("error creating docker build image: '%s'", err.Error())
}
fmt.Printf("OSType=%q\n", buildResp.OSType)
// docker image details
bodyReader := bufio.NewReader(buildResp.Body)
for {
line, _, err := bodyReader.ReadLine()
fmt.Printf("build: %q\n", string(line))
if err == io.EOF {
break
} else if err != nil {
fmt.Errorf("error read docker build image: '%s'", err.Error())
}
}
fmt.Println("Image available: ", dockerImageTag)
os.RemoveAll(srcPath)
response, err := ioutil.ReadAll(buildResp.Body)
if err != nil {
fmt.Errorf(err.Error())
}
buildResp.Body.Close()
if string(response) != "body" {
fmt.Errorf("expected Body to contain 'body' string, got %s", response)
}
return buildResp
}
// generate a tar stream for ImageBuild API
// https://docs.docker.com/engine/api/v1.40/#operation/ImageBuild
func CreateTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
excludes, err := parseDockerIgnore(srcPath)
if err != nil {
return nil, err
}
//excludes := []string{"."}
includes := []string{"."}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
//
// https://github.com/docker/docker/issues/8330
//
forceIncludeFiles := []string{".dockerignore", dockerfilePath}
for _, includeFile := range forceIncludeFiles {
if includeFile == "" {
continue
}
keepThem, err := fileutils.Matches(includeFile, excludes)
if err != nil {
return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
}
if keepThem {
Info(includeFile)
includes = append(includes, includeFile)
}
}
if err := validateDockerContextDirectory(srcPath, excludes); err != nil {
return nil, err
}
tarOpts := &archive.TarOptions{
ExcludePatterns: excludes,
IncludeFiles: includes,
Compression: archive.Uncompressed,
NoLchown: true,
}
return archive.TarWithOptions(srcPath, tarOpts)
}
// validateContextDirectory checks if all the contents of the directory
// can be read and returns an error if some files can't be read.
// Symlinks which point to non-existing files don't trigger an error
func validateDockerContextDirectory(srcPath string, excludes []string) error {
return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
// skip this directory/file if it's not in the path, it won't get added to the context
if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
return err
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
return err
} else if skip {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
if err != nil {
if os.IsPermission(err) {
return fmt.Errorf("can't stat '%s'", filePath)
}
if os.IsNotExist(err) {
return nil
}
return err
}
// skip checking if symlinks point to non-existing files, such symlinks can be useful
// also skip named pipes, because they hanging on open
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
return nil
}
if !f.IsDir() {
currentFile, err := os.Open(filePath)
if err != nil && os.IsPermission(err) {
return fmt.Errorf("no permission to read from '%s'", filePath)
}
currentFile.Close()
}
return nil
})
}
func parseDockerIgnore(root string) ([]string, error) {
var excludes []string
ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
if err != nil && !os.IsNotExist(err) {
return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
}
excludes = strings.Split(string(ignore), "\n")
return excludes, nil
}
func ensureDockerClient() *client.Client {
c, err := client.NewEnvClient()
if err != nil {
fmt.Errorf("DOCKER_HOST not set?: %v", err)
}
return c
}
func timeTrack(start time.Time, name string, statsd *statsd.Client, dockerImageTag string) {
elapsed := time.Since(start)
Info("%s took %s", name, elapsed)
//statsd, err := statsd.New("127.0.0.1:8125")
//if err != nil {
// log.Fatal(err)
//}
statsd.Gauge("btc-dockerbuild." + name, float64(elapsed.Milliseconds()), []string{"Owner:tools","role:dockerbuildserver-hack","environment:dev","imageTag:"+dockerImageTag,"buildkit:true"}, 1)
time.Sleep(1 * time.Second)
} | fmt.Print(err)
} | random_line_split |
docker-build-server.go | package main
import (
"fmt"
"net/http"
"os"
"bytes"
"strings"
"time"
"io/ioutil"
"github.com/dimiro1/banner"
"flag"
"gopkg.in/src-d/go-git.v4"
. "gopkg.in/src-d/go-git.v4/_examples"
"gopkg.in/src-d/go-git.v4/plumbing"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"path"
"path/filepath"
"io"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
"bufio"
"math/rand"
"log"
"github.com/DataDog/datadog-go/statsd"
)
type GithubUrl struct {
GithubDomain string
GithubRepo string
}
func main() {
isEnabled := true
isColorEnabled := true
fmt.Println("Started Confluent Docker Build Server ...")
// print bob ascii
bob_bytes, err := ioutil.ReadFile("resources/bob.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
bob_str := string(bob_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(bob_str))
fmt.Println("")
fmt.Println("Confluent Docker Build Server started ... accepting requests")
// print bob the cat
cat_bytes, err := ioutil.ReadFile("resources/cat.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
cat_str := string(cat_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(cat_str))
http.HandleFunc("/", DockerConfluentBuildServer)
http.ListenAndServe(":8080", nil)
}
func DockerConfluentBuildServer(w http.ResponseWriter, r *http.Request) |
func clone_n_dockerbuild(git_url string, statsd *statsd.Client) (types.ImageBuildResponse) {
domain_name := "bob.run"
git_parts := strings.SplitN(git_url, "/", -1)
fmt.Printf("\nSlice 1: %s", git_parts)
dockerImageTag := domain_name + "/" + git_url
fmt.Printf("DockerImageTag: %s", dockerImageTag)
cloneDirStr := fmt.Sprintf("%s%d%s", "clone-dir/",rand.Int(),git_url);
cloneUrlStr := fmt.Sprintf("%s%d%s", "clone-url/",rand.Int(),git_url);
shaStr := fmt.Sprintf("%s%d%s", "sha/",rand.Int(),git_url);
cloneDirPtr := flag.String(cloneDirStr, "clone-dir/" + git_url, "Directory to clone")
cloneUrlPtr := flag.String(cloneUrlStr, "https://" + git_url, "URL to clone")
shaPtr := flag.String(shaStr, "", "sha to clone")
flag.Parse()
cloneOptions := git.CloneOptions{
URL: *cloneUrlPtr,
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
SingleBranch: true,
Progress: os.Stdout,
Tags: git.NoTags,
}
repo, err := git.PlainClone(*cloneDirPtr, false, &cloneOptions)
if err != nil {
os.RemoveAll("clone-dir")
}
CheckIfError(err)
reference, err := repo.Head()
CheckIfError(err)
Info("Cloned! Head at %s", reference)
workTree, err := repo.Worktree()
CheckIfError(err)
err = workTree.Reset(&git.ResetOptions{
Commit: plumbing.NewHash(*shaPtr),
Mode: git.HardReset,
})
CheckIfError(err)
Info("Hard reseted to %s", *shaPtr)
status, err := workTree.Status()
CheckIfError(err)
Info("Status after reset: %s", status)
repo.Storer.Index()
srcPath, err1 := filepath.Abs("clone-dir/" + git_url)
if err1 != nil {
fmt.Errorf("error1: '%s'", err1.Error())
}
// dockerfilePath, err2 := filepath.Abs("clone-dir/" + git_url + "/Dockerfile")
// if err2 != nil {
// fmt.Errorf("error2: '%s'", err2.Error())
// }
dockerfilePath := "Dockerfile"
fmt.Printf("srcPath=%q, dockerfilePath=%q\n", srcPath, dockerfilePath)
Info("Tar file paths: %s %s", srcPath, dockerfilePath)
defer timeTrack(time.Now(), "dockerbuild-time", statsd, dockerImageTag)
tarReader, err := CreateTarStream(srcPath, dockerfilePath)
if err != nil {
fmt.Errorf("error creating docker tar stream: '%s'", err.Error())
}
Info("Created tar stream ....")
os.Setenv("DOCKER_BUILDKIT", "1")
// initialize docker client & background context
c := ensureDockerClient()
netCtx := context.Background()
Info("dockerfilepath: '%s'", dockerfilePath)
Info("dockerImageTag: '%s'", dockerImageTag)
// set build options for docker build
opts := types.ImageBuildOptions{
Tags: []string{dockerImageTag + ":latest"},
Dockerfile: dockerfilePath,
}
// invoke docker build
buildResp, err := c.ImageBuild(netCtx,
tarReader, opts)
if err != nil {
fmt.Errorf("error creating docker build image: '%s'", err.Error())
}
fmt.Printf("OSType=%q\n", buildResp.OSType)
// docker image details
bodyReader := bufio.NewReader(buildResp.Body)
for {
line, _, err := bodyReader.ReadLine()
fmt.Printf("build: %q\n", string(line))
if err == io.EOF {
break
} else if err != nil {
fmt.Errorf("error read docker build image: '%s'", err.Error())
}
}
fmt.Println("Image available: ", dockerImageTag)
os.RemoveAll(srcPath)
response, err := ioutil.ReadAll(buildResp.Body)
if err != nil {
fmt.Errorf(err.Error())
}
buildResp.Body.Close()
if string(response) != "body" {
fmt.Errorf("expected Body to contain 'body' string, got %s", response)
}
return buildResp
}
// generate a tar stream for ImageBuild API
// https://docs.docker.com/engine/api/v1.40/#operation/ImageBuild
func CreateTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
excludes, err := parseDockerIgnore(srcPath)
if err != nil {
return nil, err
}
//excludes := []string{"."}
includes := []string{"."}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
//
// https://github.com/docker/docker/issues/8330
//
forceIncludeFiles := []string{".dockerignore", dockerfilePath}
for _, includeFile := range forceIncludeFiles {
if includeFile == "" {
continue
}
keepThem, err := fileutils.Matches(includeFile, excludes)
if err != nil {
return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
}
if keepThem {
Info(includeFile)
includes = append(includes, includeFile)
}
}
if err := validateDockerContextDirectory(srcPath, excludes); err != nil {
return nil, err
}
tarOpts := &archive.TarOptions{
ExcludePatterns: excludes,
IncludeFiles: includes,
Compression: archive.Uncompressed,
NoLchown: true,
}
return archive.TarWithOptions(srcPath, tarOpts)
}
// validateContextDirectory checks if all the contents of the directory
// can be read and returns an error if some files can't be read.
// Symlinks which point to non-existing files don't trigger an error
func validateDockerContextDirectory(srcPath string, excludes []string) error {
return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
// skip this directory/file if it's not in the path, it won't get added to the context
if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
return err
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
return err
} else if skip {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
if err != nil {
if os.IsPermission(err) {
return fmt.Errorf("can't stat '%s'", filePath)
}
if os.IsNotExist(err) {
return nil
}
return err
}
// skip checking if symlinks point to non-existing files, such symlinks can be useful
// also skip named pipes, because they hanging on open
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
return nil
}
if !f.IsDir() {
currentFile, err := os.Open(filePath)
if err != nil && os.IsPermission(err) {
return fmt.Errorf("no permission to read from '%s'", filePath)
}
currentFile.Close()
}
return nil
})
}
func parseDockerIgnore(root string) ([]string, error) {
var excludes []string
ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
if err != nil && !os.IsNotExist(err) {
return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
}
excludes = strings.Split(string(ignore), "\n")
return excludes, nil
}
func ensureDockerClient() *client.Client {
c, err := client.NewEnvClient()
if err != nil {
fmt.Errorf("DOCKER_HOST not set?: %v", err)
}
return c
}
func timeTrack(start time.Time, name string, statsd *statsd.Client, dockerImageTag string) {
elapsed := time.Since(start)
Info("%s took %s", name, elapsed)
//statsd, err := statsd.New("127.0.0.1:8125")
//if err != nil {
// log.Fatal(err)
//}
statsd.Gauge("btc-dockerbuild." + name, float64(elapsed.Milliseconds()), []string{"Owner:tools","role:dockerbuildserver-hack","environment:dev","imageTag:"+dockerImageTag,"buildkit:true"}, 1)
time.Sleep(1 * time.Second)
}
| {
url_param := r.URL.Path[1:]
if !strings.Contains(url_param, "github.com") {
fmt.Println("Invalid Github Repo!")
return
}
fmt.Println("Lightning fast docker build for ", url_param)
// v2/github.com/mohnishbasha/hello-world/manifests/latest
if strings.Contains(url_param, "v2") {
url_param_1 := strings.ReplaceAll(url_param, "v2/", "")
url_param_2 := strings.ReplaceAll(url_param_1, "/manifests/latest", "")
Info(url_param_1)
Info(url_param_2)
url_param = url_param_2
}
statsd, err := statsd.New("127.0.0.1:8125")
if err != nil {
log.Fatal(err)
}
clone_n_dockerbuild(url_param, statsd)
} | identifier_body |
docker-build-server.go | package main
import (
"fmt"
"net/http"
"os"
"bytes"
"strings"
"time"
"io/ioutil"
"github.com/dimiro1/banner"
"flag"
"gopkg.in/src-d/go-git.v4"
. "gopkg.in/src-d/go-git.v4/_examples"
"gopkg.in/src-d/go-git.v4/plumbing"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"path"
"path/filepath"
"io"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"golang.org/x/net/context"
"bufio"
"math/rand"
"log"
"github.com/DataDog/datadog-go/statsd"
)
type GithubUrl struct {
GithubDomain string
GithubRepo string
}
func main() {
isEnabled := true
isColorEnabled := true
fmt.Println("Started Confluent Docker Build Server ...")
// print bob ascii
bob_bytes, err := ioutil.ReadFile("resources/bob.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
bob_str := string(bob_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(bob_str))
fmt.Println("")
fmt.Println("Confluent Docker Build Server started ... accepting requests")
// print bob the cat
cat_bytes, err := ioutil.ReadFile("resources/cat.txt") // just pass the file name
if err != nil {
fmt.Print(err)
}
cat_str := string(cat_bytes)
banner.Init(os.Stdout, isEnabled, isColorEnabled, bytes.NewBufferString(cat_str))
http.HandleFunc("/", DockerConfluentBuildServer)
http.ListenAndServe(":8080", nil)
}
func DockerConfluentBuildServer(w http.ResponseWriter, r *http.Request) {
url_param := r.URL.Path[1:]
if !strings.Contains(url_param, "github.com") {
fmt.Println("Invalid Github Repo!")
return
}
fmt.Println("Lightning fast docker build for ", url_param)
// v2/github.com/mohnishbasha/hello-world/manifests/latest
if strings.Contains(url_param, "v2") {
url_param_1 := strings.ReplaceAll(url_param, "v2/", "")
url_param_2 := strings.ReplaceAll(url_param_1, "/manifests/latest", "")
Info(url_param_1)
Info(url_param_2)
url_param = url_param_2
}
statsd, err := statsd.New("127.0.0.1:8125")
if err != nil {
log.Fatal(err)
}
clone_n_dockerbuild(url_param, statsd)
}
func clone_n_dockerbuild(git_url string, statsd *statsd.Client) (types.ImageBuildResponse) {
domain_name := "bob.run"
git_parts := strings.SplitN(git_url, "/", -1)
fmt.Printf("\nSlice 1: %s", git_parts)
dockerImageTag := domain_name + "/" + git_url
fmt.Printf("DockerImageTag: %s", dockerImageTag)
cloneDirStr := fmt.Sprintf("%s%d%s", "clone-dir/",rand.Int(),git_url);
cloneUrlStr := fmt.Sprintf("%s%d%s", "clone-url/",rand.Int(),git_url);
shaStr := fmt.Sprintf("%s%d%s", "sha/",rand.Int(),git_url);
cloneDirPtr := flag.String(cloneDirStr, "clone-dir/" + git_url, "Directory to clone")
cloneUrlPtr := flag.String(cloneUrlStr, "https://" + git_url, "URL to clone")
shaPtr := flag.String(shaStr, "", "sha to clone")
flag.Parse()
cloneOptions := git.CloneOptions{
URL: *cloneUrlPtr,
ReferenceName: plumbing.ReferenceName("refs/heads/master"),
SingleBranch: true,
Progress: os.Stdout,
Tags: git.NoTags,
}
repo, err := git.PlainClone(*cloneDirPtr, false, &cloneOptions)
if err != nil {
os.RemoveAll("clone-dir")
}
CheckIfError(err)
reference, err := repo.Head()
CheckIfError(err)
Info("Cloned! Head at %s", reference)
workTree, err := repo.Worktree()
CheckIfError(err)
err = workTree.Reset(&git.ResetOptions{
Commit: plumbing.NewHash(*shaPtr),
Mode: git.HardReset,
})
CheckIfError(err)
Info("Hard reseted to %s", *shaPtr)
status, err := workTree.Status()
CheckIfError(err)
Info("Status after reset: %s", status)
repo.Storer.Index()
srcPath, err1 := filepath.Abs("clone-dir/" + git_url)
if err1 != nil {
fmt.Errorf("error1: '%s'", err1.Error())
}
// dockerfilePath, err2 := filepath.Abs("clone-dir/" + git_url + "/Dockerfile")
// if err2 != nil {
// fmt.Errorf("error2: '%s'", err2.Error())
// }
dockerfilePath := "Dockerfile"
fmt.Printf("srcPath=%q, dockerfilePath=%q\n", srcPath, dockerfilePath)
Info("Tar file paths: %s %s", srcPath, dockerfilePath)
defer timeTrack(time.Now(), "dockerbuild-time", statsd, dockerImageTag)
tarReader, err := CreateTarStream(srcPath, dockerfilePath)
if err != nil {
fmt.Errorf("error creating docker tar stream: '%s'", err.Error())
}
Info("Created tar stream ....")
os.Setenv("DOCKER_BUILDKIT", "1")
// initialize docker client & background context
c := ensureDockerClient()
netCtx := context.Background()
Info("dockerfilepath: '%s'", dockerfilePath)
Info("dockerImageTag: '%s'", dockerImageTag)
// set build options for docker build
opts := types.ImageBuildOptions{
Tags: []string{dockerImageTag + ":latest"},
Dockerfile: dockerfilePath,
}
// invoke docker build
buildResp, err := c.ImageBuild(netCtx,
tarReader, opts)
if err != nil {
fmt.Errorf("error creating docker build image: '%s'", err.Error())
}
fmt.Printf("OSType=%q\n", buildResp.OSType)
// docker image details
bodyReader := bufio.NewReader(buildResp.Body)
for {
line, _, err := bodyReader.ReadLine()
fmt.Printf("build: %q\n", string(line))
if err == io.EOF {
break
} else if err != nil {
fmt.Errorf("error read docker build image: '%s'", err.Error())
}
}
fmt.Println("Image available: ", dockerImageTag)
os.RemoveAll(srcPath)
response, err := ioutil.ReadAll(buildResp.Body)
if err != nil {
fmt.Errorf(err.Error())
}
buildResp.Body.Close()
if string(response) != "body" {
fmt.Errorf("expected Body to contain 'body' string, got %s", response)
}
return buildResp
}
// generate a tar stream for ImageBuild API
// https://docs.docker.com/engine/api/v1.40/#operation/ImageBuild
func CreateTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) {
excludes, err := parseDockerIgnore(srcPath)
if err != nil {
return nil, err
}
//excludes := []string{"."}
includes := []string{"."}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
//
// https://github.com/docker/docker/issues/8330
//
forceIncludeFiles := []string{".dockerignore", dockerfilePath}
for _, includeFile := range forceIncludeFiles {
if includeFile == "" {
continue
}
keepThem, err := fileutils.Matches(includeFile, excludes)
if err != nil {
return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err)
}
if keepThem {
Info(includeFile)
includes = append(includes, includeFile)
}
}
if err := validateDockerContextDirectory(srcPath, excludes); err != nil {
return nil, err
}
tarOpts := &archive.TarOptions{
ExcludePatterns: excludes,
IncludeFiles: includes,
Compression: archive.Uncompressed,
NoLchown: true,
}
return archive.TarWithOptions(srcPath, tarOpts)
}
// validateContextDirectory checks if all the contents of the directory
// can be read and returns an error if some files can't be read.
// Symlinks which point to non-existing files don't trigger an error
func | (srcPath string, excludes []string) error {
return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
// skip this directory/file if it's not in the path, it won't get added to the context
if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil {
return err
} else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil {
return err
} else if skip {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
if err != nil {
if os.IsPermission(err) {
return fmt.Errorf("can't stat '%s'", filePath)
}
if os.IsNotExist(err) {
return nil
}
return err
}
// skip checking if symlinks point to non-existing files, such symlinks can be useful
// also skip named pipes, because they hanging on open
if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
return nil
}
if !f.IsDir() {
currentFile, err := os.Open(filePath)
if err != nil && os.IsPermission(err) {
return fmt.Errorf("no permission to read from '%s'", filePath)
}
currentFile.Close()
}
return nil
})
}
func parseDockerIgnore(root string) ([]string, error) {
var excludes []string
ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
if err != nil && !os.IsNotExist(err) {
return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err)
}
excludes = strings.Split(string(ignore), "\n")
return excludes, nil
}
func ensureDockerClient() *client.Client {
c, err := client.NewEnvClient()
if err != nil {
fmt.Errorf("DOCKER_HOST not set?: %v", err)
}
return c
}
func timeTrack(start time.Time, name string, statsd *statsd.Client, dockerImageTag string) {
elapsed := time.Since(start)
Info("%s took %s", name, elapsed)
//statsd, err := statsd.New("127.0.0.1:8125")
//if err != nil {
// log.Fatal(err)
//}
statsd.Gauge("btc-dockerbuild." + name, float64(elapsed.Milliseconds()), []string{"Owner:tools","role:dockerbuildserver-hack","environment:dev","imageTag:"+dockerImageTag,"buildkit:true"}, 1)
time.Sleep(1 * time.Second)
}
| validateDockerContextDirectory | identifier_name |
input_without_IP_group1min.py | import csv
import os.path
import collections
import tldextract
import itertools
from urllib.parse import urlsplit
from urllib.parse import urlparse
from datetime import datetime
import timeit
import math
PATH_WEB_LOG_FILE = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/weblog/data/weblog-20180312_2/12/%s'
PATH_SAVE_LOG = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Input'
def readFile():
list_folder_name = ["09%s",
"10%s",
"11%s"]
list_file_name =["/web-20180312%s"]
list_minute_name = ["00%s",
"01%s",
"02%s",
"03%s",
"04%s",
"05%s",
"06%s",
"07%s",
"08%s",
"09%s",
"10%s",
"11%s",
"12%s",
"13%s",
"14%s",
"15%s",
"16%s",
"17%s",
"18%s",
"19%s",
"20%s",
"21%s",
"22%s",
"23%s",
"24%s",
"25%s",
"26%s",
"27%s",
"28%s",
"29%s",
"30%s",
"31%s",
"32%s",
"33%s",
"34%s",
"35%s",
"36%s",
"37%s",
"38%s",
"39%s",
"40%s",
"41%s",
"42%s",
"43%s",
"44%s",
"45%s",
"46%s",
"47%s",
"48%s",
"49%s",
"50%s",
"51%s",
"52%s",
"53%s",
"54%s",
"55%s",
"56%s",
"57%s",
"58%s",
"59%s"]
list_sub_name = [".0.txt",".1.txt",".2.txt",".3.txt",".4.txt",".5.txt"]
list_data = []
list_save = []
list_unique_len = []
for folder in list_folder_name :
for file_name in list_file_name :
for minute in list_minute_name :
for sub in list_sub_name :
#print(PATH_WEB_LOG_FILE%folder%file_name%folder%minute%sub)
with open (PATH_WEB_LOG_FILE%(folder%(file_name%(folder%(minute%(sub))))),'r',encoding='utf-8', errors='ignore') as file:
for line in file:
line_split = line.split(" ")
row_data = {}
row_data['Request'] = line_split[15]
time = file_name%(folder%(minute%(sub)))
time_after = time[5:-4]
realtime = time_after.split(".txt")[0]
row_data['time'] = time_after
row_data['domain'] = get_domainname(line_split[16])
row_data['IP'] = line_split[10]
print(time_after)
print(row_data['Request'])
if row_data['Request'] == 'GET':
#if row_data['Request'] == 'GET' or row_data['Request'] == 'HTTPS' :
if '.' in row_data['domain'] or row_data['domain'] == '-' :
pass
else :
list_data.append(row_data)
#print(list_data)
group_domain(list_data)
def get_domainname(domainname):
if 'http://' not in domainname and 'https://' not in domainname and 'ftp://' not in domainname:
domainname = 'http://' + domainname
subdomain,domain,suf = tldextract.extract(domainname)
if domain in 'edgesuite' :
subdomain,domain,suf = tldextract.extract(subdomain)
elif subdomain == 'ads' :
domain = '-'
elif domain == '':
domain = '-'
return domain
def group_domain(list_data) :
dmain = {}
dmain["facebook"] = [('fbcdn'),('facebook'),('fb')]
dmain['apple'] = [('apple'),('mail')]
dmain['ku'] = [('ku'),('kasetsart')]
dmain["line"] = [('naver'),('line-apps'),('line'),('line-cdn'),('linetv')]
dmain["baidu"] = [('baidu')]
dmain['avast'] = [('avast')]
dmain['microsoft'] = [('microsoft')]
dmain["google"] = [('google'),('google'),('google-analytics')]
dmain["steam"] = [('steamcontent'),('steamstatic'),('steampowered')]
dmain["youtube"] = [('googlevideo'),('youtube'),('youtu'),('youtube-nocookie')]
dmain["garena"] = [('garenanow')]
dmain['icould'] = [('icloud')]
dmain["twitter"] = [('twimg'),('twitter')]
dmain['instagram'] = [('instagram')]
dmain['adobe'] = [('adobe')]
dmain["wechat"] = [('wechat'),('qq')]
dmain["sanook"] = [('isanook'),('fsanook'),('sanook')]
dmain['kapook'] = [('kapook')]
dmain['shopee'] = [('shopee')]
dmain['videe'] = [('videe')]
dmain['live'] = [('live')]
dmain['outlook'] = [('outlook')]
dmain['android'] = [('android')]
dmain["pantip"] = [('pantip'),('ptcdn'),('mikelab')]
dmain['teamviewer'] = [('teamviewer')]
dmain["msn"] = [('msn'),('s-msn')]
dmain["windowsupdate"] = [('windowsupdate')]
dmain["springserve"] = [('springserve')]
dmain["chula"] = [('chula')]
dmain['lkqd'] = [('lkqd')]
dmain['ptvcdn'] = [('ptvcdn')]
dmain['gstatic'] = [('gstatic'),('gstatic')]
dmain['googlesyndication'] = [('googlesyndication')]
dmain['akami'] = [('akami')]
dmain['manager'] = [('manager')]
dmain['adobe'] = [('adobe'),('adobesc')]
dmain['mozilla'] = [('mozilla'),('mozillamessaging')]
dmain['firefox'] = [('firefoxplugin'),('firefoxusercontent'),('firefox')]
dmain['aniview'] = [('aniview'),('ani-view')]
dmain['addthis'] = [('addthis'),('addthisedge')]
dmain['tapjoy'] = [('tapjoy'),('tapjoyads')]
list_domain = []
list_not_group = []
list_save_cvs = []
for i in range(len(list_data)) :
for d in dmain :
for word in dmain[d] :
if '.'+str(word)+'.' in '.'+list_data[i]['domain']+'.' :
list_data[i]['domain'] = d
list_data = sorted(list_data, key=getKey)
print(list_data)
#sequence_gather(list_data)
check_user(list_data)
def check_user(list_data):
|
def getKey(item):
return item['IP']
def cal_time(data):
time_split = data.split(".")
hour = time_split[0][-4:-2]
minute = time_split[0][-2:]
second = time_split[1]
time = (int(hour)*3600)+(int(minute)*60)+(int(second)*10)
return time
def sequence_gather(list_data):
list_IP = []
list_sequence = {}
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
list_link = sorted(list_data, key=lambda x:(x['IP'],x['domain'],x['time']))
print(list_link)
link = [list_link[0]]
for i in range(1,len(list_link)):
if link[-1]['domain'] == list_link[i]['domain']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link[i]['time'])) > 60:
link.append(list_link[i])
else:
link.append(list_link[i])
link = sorted(link, key=lambda x:(x['IP'],x['time'],x['domain']))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test.txt')
with open(compath,'a') as f:
for i in list_link:
f.writelines(str(i)+'\n')
compath2 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test2.txt')
with open(compath2,'a') as f:
for i in list_link2:
f.writelines(str(i)+'\n')
compath3 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test3.txt')
with open(compath3,'a') as f:
for i in link:
f.writelines(str(i)+'\n')
list_link2=[]
list_link=[]
print("///////////////////////////////////////////")
for i in range(len(link)-1) :
print(i)
print(link[i]['domain'])
if(link[i]['IP'] == link[i+1]['IP']):
print(link[i]['IP'])
if(link[i]['domain'] != link[i+1]['domain']):
tempIP = str(link[i]['IP'])
tempdomain = link[i]['domain']
if tempIP not in list_sequence.keys():
list_sequence.update({tempIP:[tempdomain]})
else :
list_sequence[tempIP].append(tempdomain)
print(len(list_sequence))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip.txt')
count = 0
with open(compath,'a') as f:
for key in list_sequence:
temp = ''
for value in range(len(list_sequence[key])):
temp += list_sequence[key][value]+','
count +=1
temp = temp[:-1]
print(count)
f.writelines(key+" "+temp+'\n')
if __name__ == '__main__':
start = timeit.default_timer()
readFile()
#check_user([{'time': '201803120900.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.5', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121100.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803120900.3', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'sa'}])
#cal_time('201803120900.2')
stop = timeit.default_timer()
print(stop - start)
#readAllurl() | list_new_IP = []
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
link = [list_link2[0]]
for i in range(len(list_link2)) :
if '_' in link[-1]['IP'] :
IP = link[-1]['IP'].split("_")[0]
print(IP)
if IP == list_link2[i]['IP']:
list_link2[i]['IP'] = link[-1]['IP']
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
if link[-1]['IP'] == list_link2[i]['IP']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
#print(link)
sequence_gather(link) | identifier_body |
input_without_IP_group1min.py | import csv
import os.path
import collections
import tldextract
import itertools
from urllib.parse import urlsplit
from urllib.parse import urlparse
from datetime import datetime
import timeit
import math
PATH_WEB_LOG_FILE = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/weblog/data/weblog-20180312_2/12/%s'
PATH_SAVE_LOG = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Input'
def readFile():
list_folder_name = ["09%s",
"10%s",
"11%s"]
list_file_name =["/web-20180312%s"]
list_minute_name = ["00%s",
"01%s",
"02%s",
"03%s",
"04%s",
"05%s",
"06%s",
"07%s",
"08%s",
"09%s",
"10%s",
"11%s",
"12%s",
"13%s",
"14%s",
"15%s",
"16%s",
"17%s",
"18%s",
"19%s",
"20%s",
"21%s",
"22%s",
"23%s",
"24%s",
"25%s",
"26%s",
"27%s",
"28%s",
"29%s",
"30%s",
"31%s",
"32%s",
"33%s",
"34%s",
"35%s",
"36%s",
"37%s",
"38%s",
"39%s",
"40%s",
"41%s",
"42%s",
"43%s",
"44%s",
"45%s",
"46%s",
"47%s",
"48%s",
"49%s",
"50%s",
"51%s",
"52%s",
"53%s",
"54%s",
"55%s",
"56%s",
"57%s",
"58%s",
"59%s"]
list_sub_name = [".0.txt",".1.txt",".2.txt",".3.txt",".4.txt",".5.txt"]
list_data = []
list_save = []
list_unique_len = []
for folder in list_folder_name :
for file_name in list_file_name :
for minute in list_minute_name :
for sub in list_sub_name : | for line in file:
line_split = line.split(" ")
row_data = {}
row_data['Request'] = line_split[15]
time = file_name%(folder%(minute%(sub)))
time_after = time[5:-4]
realtime = time_after.split(".txt")[0]
row_data['time'] = time_after
row_data['domain'] = get_domainname(line_split[16])
row_data['IP'] = line_split[10]
print(time_after)
print(row_data['Request'])
if row_data['Request'] == 'GET':
#if row_data['Request'] == 'GET' or row_data['Request'] == 'HTTPS' :
if '.' in row_data['domain'] or row_data['domain'] == '-' :
pass
else :
list_data.append(row_data)
#print(list_data)
group_domain(list_data)
def get_domainname(domainname):
if 'http://' not in domainname and 'https://' not in domainname and 'ftp://' not in domainname:
domainname = 'http://' + domainname
subdomain,domain,suf = tldextract.extract(domainname)
if domain in 'edgesuite' :
subdomain,domain,suf = tldextract.extract(subdomain)
elif subdomain == 'ads' :
domain = '-'
elif domain == '':
domain = '-'
return domain
def group_domain(list_data) :
dmain = {}
dmain["facebook"] = [('fbcdn'),('facebook'),('fb')]
dmain['apple'] = [('apple'),('mail')]
dmain['ku'] = [('ku'),('kasetsart')]
dmain["line"] = [('naver'),('line-apps'),('line'),('line-cdn'),('linetv')]
dmain["baidu"] = [('baidu')]
dmain['avast'] = [('avast')]
dmain['microsoft'] = [('microsoft')]
dmain["google"] = [('google'),('google'),('google-analytics')]
dmain["steam"] = [('steamcontent'),('steamstatic'),('steampowered')]
dmain["youtube"] = [('googlevideo'),('youtube'),('youtu'),('youtube-nocookie')]
dmain["garena"] = [('garenanow')]
dmain['icould'] = [('icloud')]
dmain["twitter"] = [('twimg'),('twitter')]
dmain['instagram'] = [('instagram')]
dmain['adobe'] = [('adobe')]
dmain["wechat"] = [('wechat'),('qq')]
dmain["sanook"] = [('isanook'),('fsanook'),('sanook')]
dmain['kapook'] = [('kapook')]
dmain['shopee'] = [('shopee')]
dmain['videe'] = [('videe')]
dmain['live'] = [('live')]
dmain['outlook'] = [('outlook')]
dmain['android'] = [('android')]
dmain["pantip"] = [('pantip'),('ptcdn'),('mikelab')]
dmain['teamviewer'] = [('teamviewer')]
dmain["msn"] = [('msn'),('s-msn')]
dmain["windowsupdate"] = [('windowsupdate')]
dmain["springserve"] = [('springserve')]
dmain["chula"] = [('chula')]
dmain['lkqd'] = [('lkqd')]
dmain['ptvcdn'] = [('ptvcdn')]
dmain['gstatic'] = [('gstatic'),('gstatic')]
dmain['googlesyndication'] = [('googlesyndication')]
dmain['akami'] = [('akami')]
dmain['manager'] = [('manager')]
dmain['adobe'] = [('adobe'),('adobesc')]
dmain['mozilla'] = [('mozilla'),('mozillamessaging')]
dmain['firefox'] = [('firefoxplugin'),('firefoxusercontent'),('firefox')]
dmain['aniview'] = [('aniview'),('ani-view')]
dmain['addthis'] = [('addthis'),('addthisedge')]
dmain['tapjoy'] = [('tapjoy'),('tapjoyads')]
list_domain = []
list_not_group = []
list_save_cvs = []
for i in range(len(list_data)) :
for d in dmain :
for word in dmain[d] :
if '.'+str(word)+'.' in '.'+list_data[i]['domain']+'.' :
list_data[i]['domain'] = d
list_data = sorted(list_data, key=getKey)
print(list_data)
#sequence_gather(list_data)
check_user(list_data)
def check_user(list_data):
list_new_IP = []
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
link = [list_link2[0]]
for i in range(len(list_link2)) :
if '_' in link[-1]['IP'] :
IP = link[-1]['IP'].split("_")[0]
print(IP)
if IP == list_link2[i]['IP']:
list_link2[i]['IP'] = link[-1]['IP']
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
if link[-1]['IP'] == list_link2[i]['IP']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
#print(link)
sequence_gather(link)
def getKey(item):
return item['IP']
def cal_time(data):
time_split = data.split(".")
hour = time_split[0][-4:-2]
minute = time_split[0][-2:]
second = time_split[1]
time = (int(hour)*3600)+(int(minute)*60)+(int(second)*10)
return time
def sequence_gather(list_data):
list_IP = []
list_sequence = {}
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
list_link = sorted(list_data, key=lambda x:(x['IP'],x['domain'],x['time']))
print(list_link)
link = [list_link[0]]
for i in range(1,len(list_link)):
if link[-1]['domain'] == list_link[i]['domain']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link[i]['time'])) > 60:
link.append(list_link[i])
else:
link.append(list_link[i])
link = sorted(link, key=lambda x:(x['IP'],x['time'],x['domain']))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test.txt')
with open(compath,'a') as f:
for i in list_link:
f.writelines(str(i)+'\n')
compath2 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test2.txt')
with open(compath2,'a') as f:
for i in list_link2:
f.writelines(str(i)+'\n')
compath3 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test3.txt')
with open(compath3,'a') as f:
for i in link:
f.writelines(str(i)+'\n')
list_link2=[]
list_link=[]
print("///////////////////////////////////////////")
for i in range(len(link)-1) :
print(i)
print(link[i]['domain'])
if(link[i]['IP'] == link[i+1]['IP']):
print(link[i]['IP'])
if(link[i]['domain'] != link[i+1]['domain']):
tempIP = str(link[i]['IP'])
tempdomain = link[i]['domain']
if tempIP not in list_sequence.keys():
list_sequence.update({tempIP:[tempdomain]})
else :
list_sequence[tempIP].append(tempdomain)
print(len(list_sequence))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip.txt')
count = 0
with open(compath,'a') as f:
for key in list_sequence:
temp = ''
for value in range(len(list_sequence[key])):
temp += list_sequence[key][value]+','
count +=1
temp = temp[:-1]
print(count)
f.writelines(key+" "+temp+'\n')
if __name__ == '__main__':
start = timeit.default_timer()
readFile()
#check_user([{'time': '201803120900.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.5', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121100.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803120900.3', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'sa'}])
#cal_time('201803120900.2')
stop = timeit.default_timer()
print(stop - start)
#readAllurl() | #print(PATH_WEB_LOG_FILE%folder%file_name%folder%minute%sub)
with open (PATH_WEB_LOG_FILE%(folder%(file_name%(folder%(minute%(sub))))),'r',encoding='utf-8', errors='ignore') as file: | random_line_split |
input_without_IP_group1min.py | import csv
import os.path
import collections
import tldextract
import itertools
from urllib.parse import urlsplit
from urllib.parse import urlparse
from datetime import datetime
import timeit
import math
PATH_WEB_LOG_FILE = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/weblog/data/weblog-20180312_2/12/%s'
PATH_SAVE_LOG = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Input'
def readFile():
list_folder_name = ["09%s",
"10%s",
"11%s"]
list_file_name =["/web-20180312%s"]
list_minute_name = ["00%s",
"01%s",
"02%s",
"03%s",
"04%s",
"05%s",
"06%s",
"07%s",
"08%s",
"09%s",
"10%s",
"11%s",
"12%s",
"13%s",
"14%s",
"15%s",
"16%s",
"17%s",
"18%s",
"19%s",
"20%s",
"21%s",
"22%s",
"23%s",
"24%s",
"25%s",
"26%s",
"27%s",
"28%s",
"29%s",
"30%s",
"31%s",
"32%s",
"33%s",
"34%s",
"35%s",
"36%s",
"37%s",
"38%s",
"39%s",
"40%s",
"41%s",
"42%s",
"43%s",
"44%s",
"45%s",
"46%s",
"47%s",
"48%s",
"49%s",
"50%s",
"51%s",
"52%s",
"53%s",
"54%s",
"55%s",
"56%s",
"57%s",
"58%s",
"59%s"]
list_sub_name = [".0.txt",".1.txt",".2.txt",".3.txt",".4.txt",".5.txt"]
list_data = []
list_save = []
list_unique_len = []
for folder in list_folder_name :
for file_name in list_file_name :
for minute in list_minute_name :
for sub in list_sub_name :
#print(PATH_WEB_LOG_FILE%folder%file_name%folder%minute%sub)
with open (PATH_WEB_LOG_FILE%(folder%(file_name%(folder%(minute%(sub))))),'r',encoding='utf-8', errors='ignore') as file:
for line in file:
line_split = line.split(" ")
row_data = {}
row_data['Request'] = line_split[15]
time = file_name%(folder%(minute%(sub)))
time_after = time[5:-4]
realtime = time_after.split(".txt")[0]
row_data['time'] = time_after
row_data['domain'] = get_domainname(line_split[16])
row_data['IP'] = line_split[10]
print(time_after)
print(row_data['Request'])
if row_data['Request'] == 'GET':
#if row_data['Request'] == 'GET' or row_data['Request'] == 'HTTPS' :
if '.' in row_data['domain'] or row_data['domain'] == '-' :
pass
else :
list_data.append(row_data)
#print(list_data)
group_domain(list_data)
def get_domainname(domainname):
if 'http://' not in domainname and 'https://' not in domainname and 'ftp://' not in domainname:
domainname = 'http://' + domainname
subdomain,domain,suf = tldextract.extract(domainname)
if domain in 'edgesuite' :
subdomain,domain,suf = tldextract.extract(subdomain)
elif subdomain == 'ads' :
domain = '-'
elif domain == '':
domain = '-'
return domain
def group_domain(list_data) :
dmain = {}
dmain["facebook"] = [('fbcdn'),('facebook'),('fb')]
dmain['apple'] = [('apple'),('mail')]
dmain['ku'] = [('ku'),('kasetsart')]
dmain["line"] = [('naver'),('line-apps'),('line'),('line-cdn'),('linetv')]
dmain["baidu"] = [('baidu')]
dmain['avast'] = [('avast')]
dmain['microsoft'] = [('microsoft')]
dmain["google"] = [('google'),('google'),('google-analytics')]
dmain["steam"] = [('steamcontent'),('steamstatic'),('steampowered')]
dmain["youtube"] = [('googlevideo'),('youtube'),('youtu'),('youtube-nocookie')]
dmain["garena"] = [('garenanow')]
dmain['icould'] = [('icloud')]
dmain["twitter"] = [('twimg'),('twitter')]
dmain['instagram'] = [('instagram')]
dmain['adobe'] = [('adobe')]
dmain["wechat"] = [('wechat'),('qq')]
dmain["sanook"] = [('isanook'),('fsanook'),('sanook')]
dmain['kapook'] = [('kapook')]
dmain['shopee'] = [('shopee')]
dmain['videe'] = [('videe')]
dmain['live'] = [('live')]
dmain['outlook'] = [('outlook')]
dmain['android'] = [('android')]
dmain["pantip"] = [('pantip'),('ptcdn'),('mikelab')]
dmain['teamviewer'] = [('teamviewer')]
dmain["msn"] = [('msn'),('s-msn')]
dmain["windowsupdate"] = [('windowsupdate')]
dmain["springserve"] = [('springserve')]
dmain["chula"] = [('chula')]
dmain['lkqd'] = [('lkqd')]
dmain['ptvcdn'] = [('ptvcdn')]
dmain['gstatic'] = [('gstatic'),('gstatic')]
dmain['googlesyndication'] = [('googlesyndication')]
dmain['akami'] = [('akami')]
dmain['manager'] = [('manager')]
dmain['adobe'] = [('adobe'),('adobesc')]
dmain['mozilla'] = [('mozilla'),('mozillamessaging')]
dmain['firefox'] = [('firefoxplugin'),('firefoxusercontent'),('firefox')]
dmain['aniview'] = [('aniview'),('ani-view')]
dmain['addthis'] = [('addthis'),('addthisedge')]
dmain['tapjoy'] = [('tapjoy'),('tapjoyads')]
list_domain = []
list_not_group = []
list_save_cvs = []
for i in range(len(list_data)) :
for d in dmain :
for word in dmain[d] :
if '.'+str(word)+'.' in '.'+list_data[i]['domain']+'.' :
list_data[i]['domain'] = d
list_data = sorted(list_data, key=getKey)
print(list_data)
#sequence_gather(list_data)
check_user(list_data)
def check_user(list_data):
list_new_IP = []
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
link = [list_link2[0]]
for i in range(len(list_link2)) :
if '_' in link[-1]['IP'] :
IP = link[-1]['IP'].split("_")[0]
print(IP)
if IP == list_link2[i]['IP']:
list_link2[i]['IP'] = link[-1]['IP']
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
if link[-1]['IP'] == list_link2[i]['IP']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
#print(link)
sequence_gather(link)
def getKey(item):
return item['IP']
def cal_time(data):
time_split = data.split(".")
hour = time_split[0][-4:-2]
minute = time_split[0][-2:]
second = time_split[1]
time = (int(hour)*3600)+(int(minute)*60)+(int(second)*10)
return time
def | (list_data):
list_IP = []
list_sequence = {}
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
list_link = sorted(list_data, key=lambda x:(x['IP'],x['domain'],x['time']))
print(list_link)
link = [list_link[0]]
for i in range(1,len(list_link)):
if link[-1]['domain'] == list_link[i]['domain']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link[i]['time'])) > 60:
link.append(list_link[i])
else:
link.append(list_link[i])
link = sorted(link, key=lambda x:(x['IP'],x['time'],x['domain']))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test.txt')
with open(compath,'a') as f:
for i in list_link:
f.writelines(str(i)+'\n')
compath2 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test2.txt')
with open(compath2,'a') as f:
for i in list_link2:
f.writelines(str(i)+'\n')
compath3 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test3.txt')
with open(compath3,'a') as f:
for i in link:
f.writelines(str(i)+'\n')
list_link2=[]
list_link=[]
print("///////////////////////////////////////////")
for i in range(len(link)-1) :
print(i)
print(link[i]['domain'])
if(link[i]['IP'] == link[i+1]['IP']):
print(link[i]['IP'])
if(link[i]['domain'] != link[i+1]['domain']):
tempIP = str(link[i]['IP'])
tempdomain = link[i]['domain']
if tempIP not in list_sequence.keys():
list_sequence.update({tempIP:[tempdomain]})
else :
list_sequence[tempIP].append(tempdomain)
print(len(list_sequence))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip.txt')
count = 0
with open(compath,'a') as f:
for key in list_sequence:
temp = ''
for value in range(len(list_sequence[key])):
temp += list_sequence[key][value]+','
count +=1
temp = temp[:-1]
print(count)
f.writelines(key+" "+temp+'\n')
if __name__ == '__main__':
start = timeit.default_timer()
readFile()
#check_user([{'time': '201803120900.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.5', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121100.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803120900.3', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'sa'}])
#cal_time('201803120900.2')
stop = timeit.default_timer()
print(stop - start)
#readAllurl() | sequence_gather | identifier_name |
input_without_IP_group1min.py | import csv
import os.path
import collections
import tldextract
import itertools
from urllib.parse import urlsplit
from urllib.parse import urlparse
from datetime import datetime
import timeit
import math
PATH_WEB_LOG_FILE = '/media/moojokeubuntu/AA6259046258D6A3/AnotherDrive/Project/weblog/data/weblog-20180312_2/12/%s'
PATH_SAVE_LOG = '/home/moojokeubuntu/KU/4_2/RealProject/webpattern/Input'
def readFile():
list_folder_name = ["09%s",
"10%s",
"11%s"]
list_file_name =["/web-20180312%s"]
list_minute_name = ["00%s",
"01%s",
"02%s",
"03%s",
"04%s",
"05%s",
"06%s",
"07%s",
"08%s",
"09%s",
"10%s",
"11%s",
"12%s",
"13%s",
"14%s",
"15%s",
"16%s",
"17%s",
"18%s",
"19%s",
"20%s",
"21%s",
"22%s",
"23%s",
"24%s",
"25%s",
"26%s",
"27%s",
"28%s",
"29%s",
"30%s",
"31%s",
"32%s",
"33%s",
"34%s",
"35%s",
"36%s",
"37%s",
"38%s",
"39%s",
"40%s",
"41%s",
"42%s",
"43%s",
"44%s",
"45%s",
"46%s",
"47%s",
"48%s",
"49%s",
"50%s",
"51%s",
"52%s",
"53%s",
"54%s",
"55%s",
"56%s",
"57%s",
"58%s",
"59%s"]
list_sub_name = [".0.txt",".1.txt",".2.txt",".3.txt",".4.txt",".5.txt"]
list_data = []
list_save = []
list_unique_len = []
for folder in list_folder_name :
for file_name in list_file_name :
for minute in list_minute_name :
for sub in list_sub_name :
#print(PATH_WEB_LOG_FILE%folder%file_name%folder%minute%sub)
|
#print(list_data)
group_domain(list_data)
def get_domainname(domainname):
if 'http://' not in domainname and 'https://' not in domainname and 'ftp://' not in domainname:
domainname = 'http://' + domainname
subdomain,domain,suf = tldextract.extract(domainname)
if domain in 'edgesuite' :
subdomain,domain,suf = tldextract.extract(subdomain)
elif subdomain == 'ads' :
domain = '-'
elif domain == '':
domain = '-'
return domain
def group_domain(list_data) :
dmain = {}
dmain["facebook"] = [('fbcdn'),('facebook'),('fb')]
dmain['apple'] = [('apple'),('mail')]
dmain['ku'] = [('ku'),('kasetsart')]
dmain["line"] = [('naver'),('line-apps'),('line'),('line-cdn'),('linetv')]
dmain["baidu"] = [('baidu')]
dmain['avast'] = [('avast')]
dmain['microsoft'] = [('microsoft')]
dmain["google"] = [('google'),('google'),('google-analytics')]
dmain["steam"] = [('steamcontent'),('steamstatic'),('steampowered')]
dmain["youtube"] = [('googlevideo'),('youtube'),('youtu'),('youtube-nocookie')]
dmain["garena"] = [('garenanow')]
dmain['icould'] = [('icloud')]
dmain["twitter"] = [('twimg'),('twitter')]
dmain['instagram'] = [('instagram')]
dmain['adobe'] = [('adobe')]
dmain["wechat"] = [('wechat'),('qq')]
dmain["sanook"] = [('isanook'),('fsanook'),('sanook')]
dmain['kapook'] = [('kapook')]
dmain['shopee'] = [('shopee')]
dmain['videe'] = [('videe')]
dmain['live'] = [('live')]
dmain['outlook'] = [('outlook')]
dmain['android'] = [('android')]
dmain["pantip"] = [('pantip'),('ptcdn'),('mikelab')]
dmain['teamviewer'] = [('teamviewer')]
dmain["msn"] = [('msn'),('s-msn')]
dmain["windowsupdate"] = [('windowsupdate')]
dmain["springserve"] = [('springserve')]
dmain["chula"] = [('chula')]
dmain['lkqd'] = [('lkqd')]
dmain['ptvcdn'] = [('ptvcdn')]
dmain['gstatic'] = [('gstatic'),('gstatic')]
dmain['googlesyndication'] = [('googlesyndication')]
dmain['akami'] = [('akami')]
dmain['manager'] = [('manager')]
dmain['adobe'] = [('adobe'),('adobesc')]
dmain['mozilla'] = [('mozilla'),('mozillamessaging')]
dmain['firefox'] = [('firefoxplugin'),('firefoxusercontent'),('firefox')]
dmain['aniview'] = [('aniview'),('ani-view')]
dmain['addthis'] = [('addthis'),('addthisedge')]
dmain['tapjoy'] = [('tapjoy'),('tapjoyads')]
list_domain = []
list_not_group = []
list_save_cvs = []
for i in range(len(list_data)) :
for d in dmain :
for word in dmain[d] :
if '.'+str(word)+'.' in '.'+list_data[i]['domain']+'.' :
list_data[i]['domain'] = d
list_data = sorted(list_data, key=getKey)
print(list_data)
#sequence_gather(list_data)
check_user(list_data)
def check_user(list_data):
list_new_IP = []
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
link = [list_link2[0]]
for i in range(len(list_link2)) :
if '_' in link[-1]['IP'] :
IP = link[-1]['IP'].split("_")[0]
print(IP)
if IP == list_link2[i]['IP']:
list_link2[i]['IP'] = link[-1]['IP']
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
if link[-1]['IP'] == list_link2[i]['IP']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link2[i]['time'])) > 1800:
list_link2[i]["IP"] = list_link2[i]["IP"]+"_"+str(i)
link.append(list_link2[i])
else :
link.append(list_link2[i])
else :
link.append(list_link2[i])
#print(link)
sequence_gather(link)
def getKey(item):
return item['IP']
def cal_time(data):
time_split = data.split(".")
hour = time_split[0][-4:-2]
minute = time_split[0][-2:]
second = time_split[1]
time = (int(hour)*3600)+(int(minute)*60)+(int(second)*10)
return time
def sequence_gather(list_data):
list_IP = []
list_sequence = {}
list_link2 = sorted(list_data, key=lambda x:(x['IP'],x['time'],x['domain']))
list_link = sorted(list_data, key=lambda x:(x['IP'],x['domain'],x['time']))
print(list_link)
link = [list_link[0]]
for i in range(1,len(list_link)):
if link[-1]['domain'] == list_link[i]['domain']:
if math.fabs(cal_time(link[-1]['time'])-cal_time(list_link[i]['time'])) > 60:
link.append(list_link[i])
else:
link.append(list_link[i])
link = sorted(link, key=lambda x:(x['IP'],x['time'],x['domain']))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test.txt')
with open(compath,'a') as f:
for i in list_link:
f.writelines(str(i)+'\n')
compath2 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test2.txt')
with open(compath2,'a') as f:
for i in list_link2:
f.writelines(str(i)+'\n')
compath3 = os.path.join(PATH_SAVE_LOG, 'input_without_ip_test3.txt')
with open(compath3,'a') as f:
for i in link:
f.writelines(str(i)+'\n')
list_link2=[]
list_link=[]
print("///////////////////////////////////////////")
for i in range(len(link)-1) :
print(i)
print(link[i]['domain'])
if(link[i]['IP'] == link[i+1]['IP']):
print(link[i]['IP'])
if(link[i]['domain'] != link[i+1]['domain']):
tempIP = str(link[i]['IP'])
tempdomain = link[i]['domain']
if tempIP not in list_sequence.keys():
list_sequence.update({tempIP:[tempdomain]})
else :
list_sequence[tempIP].append(tempdomain)
print(len(list_sequence))
compath = os.path.join(PATH_SAVE_LOG, 'input_without_ip.txt')
count = 0
with open(compath,'a') as f:
for key in list_sequence:
temp = ''
for value in range(len(list_sequence[key])):
temp += list_sequence[key][value]+','
count +=1
temp = temp[:-1]
print(count)
f.writelines(key+" "+temp+'\n')
if __name__ == '__main__':
start = timeit.default_timer()
readFile()
#check_user([{'time': '201803120900.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121000.5', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803121100.2', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'ku'},{'time': '201803120900.3', 'IP': '1.0.213.87', 'Request': 'GET', 'domain': 'sa'}])
#cal_time('201803120900.2')
stop = timeit.default_timer()
print(stop - start)
#readAllurl() | with open (PATH_WEB_LOG_FILE%(folder%(file_name%(folder%(minute%(sub))))),'r',encoding='utf-8', errors='ignore') as file:
for line in file:
line_split = line.split(" ")
row_data = {}
row_data['Request'] = line_split[15]
time = file_name%(folder%(minute%(sub)))
time_after = time[5:-4]
realtime = time_after.split(".txt")[0]
row_data['time'] = time_after
row_data['domain'] = get_domainname(line_split[16])
row_data['IP'] = line_split[10]
print(time_after)
print(row_data['Request'])
if row_data['Request'] == 'GET':
#if row_data['Request'] == 'GET' or row_data['Request'] == 'HTTPS' :
if '.' in row_data['domain'] or row_data['domain'] == '-' :
pass
else :
list_data.append(row_data) | conditional_block |
ObsLightGitManager.py | #
# Copyright 2011-2012, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
'''
Created on 23 May 2012
@author: Ronan Le Martret
@author: Florent Vennetier
'''
import os
import time
import ObsLightErr
from ObsLightObject import ObsLightObject
from ObsLightSubprocess import SubprocessCrt
from ObsLightUtils import isNonEmptyString
class ObsLightGitManager(ObsLightObject):
'''
Manage the internal Git repository used to generate patches on packages.
'''
def __init__(self, projectChroot):
ObsLightObject.__init__(self)
self.__chroot = projectChroot
self.__mySubprocessCrt = SubprocessCrt()
self.initialTag = "initial-prep"
def __subprocess(self, command=None, stdout=False, noOutPut=False):
return self.__mySubprocessCrt.execSubprocess(command, stdout=stdout, noOutPut=noOutPut)
def __listSubprocess(self, command=None):
for c in command:
res = self.__mySubprocessCrt.execSubprocess(c)
return res
def ___execPipeSubprocess(self, command, command2):
return self.__mySubprocessCrt.execPipeSubprocess(command, command2)
def prepareGitCommand(self, workTree, subcommand, gitDir):
"""
Construct a Git command-line, setting its working tree to `workTree`,
and git directory to `gitDir`, and then appends `subcommand`.
Output example:
git --git-dir=<gitDir> --work-tree=<workTree> <subcommand>
"""
absWorkTree = self.__chroot.getDirectory() + workTree
absGitDir = self.__chroot.getDirectory() + gitDir
command = "git --git-dir=%s --work-tree=%s " % (absGitDir, absWorkTree)
command += subcommand
return command
def makeArchiveGitSubcommand(self, prefix, revision=u"HEAD", outputFilePath=None):
"""
Construct a Git 'archive' subcommand with auto-detected format.
If outputFilePath is None, format will be tar, and output will
be stdout.
"""
command = "archive --prefix=%s/ %s "
command = command % (prefix, revision)
if outputFilePath is not None:
command += " -o %s" % outputFilePath
return command
def checkGitUserConfig(self, workTree, gitDir):
"""
Git complains if you don't set 'user.name' and 'user.email' config
parameters. This method checks if they are set, and in case they
aren't, set them.
"""
confParams = {"user.email": "obslight@example.com", "user.name": "OBS Light"}
for param, value in confParams.iteritems():
cmd = self.prepareGitCommand(workTree, "config " + param, gitDir)
res = self.__subprocess(cmd, stdout=True, noOutPut=True)
self.logger.debug("Git parameter '%s': '%s'" % (param, res))
if not isNonEmptyString(res):
self.logger.debug(" -> Setting it to '%s'" % (value))
cmd2 = self.prepareGitCommand(workTree,
'config %s "%s"' % (param, value),
gitDir)
res2 = self.__subprocess(cmd2)
if res2 != 0:
msg = 'Failed to set git parameter "%s", next git operation may fail!'
self.logger.warning(msg % param)
def execMakeArchiveGitSubcommand(self,
packagePath,
outputFilePath,
prefix,
packageCurrentGitDirectory):
absOutputFilePath = self.__chroot.getDirectory()
# TODO: make something more generic (gz, bz2, xz...)
if outputFilePath.endswith(".tar.gz"):
# git archive does not know .tar.gz,
# we have to compress the file afterwards
absOutputFilePath += outputFilePath[:-len('.gz')]
else:
absOutputFilePath += outputFilePath
archiveSubCommand = self.makeArchiveGitSubcommand(prefix,
outputFilePath=absOutputFilePath)
command = self.prepareGitCommand(packagePath,
archiveSubCommand,
packageCurrentGitDirectory)
res = self.__subprocess(command)
if res != 0:
return res
if outputFilePath.endswith(".tar.gz"):
# Without '-f' user will be prompted if .gz file already exists
command = "gzip -f %s" % absOutputFilePath
res = self.__subprocess(command)
return res
def findEmptyDirectory(self, package):
# git ignores empty directories so we must save them into a file.
projectPath = self.__chroot.getDirectory() + package.getChrootBuildDirectory()
res = []
for root, dirs, files in os.walk(projectPath):
if len(dirs) == 0 and len(files) == 0:
res.append(root.replace(projectPath + "/", ""))
# TODO: move this file to BUILD/
with open(projectPath + "/.emptyDirectory", 'w') as f:
for d in res:
f.write(d + "\n")
def initGitWatch(self, path, package):
'''
Initialize a Git repository in the specified path, and 'git add' everything.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("Path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
pkgCurGitDir = package.getCurrentGitDirectory()
# Ensure we have access rights on the directory
res = self.__chroot.allowAccessToObslightGroup(os.path.dirname(pkgCurGitDir),
absolutePath=False)
self.findEmptyDirectory(package)
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit first commit %s\"' % timeString
# Create .gitignore file.
self.initGitignore(path, package)
if res != 0:
msg = "Failed to give access rights on '%s'. Git repository creation may fail."
self.logger.warn(msg % os.path.dirname(pkgCurGitDir))
res = self.__subprocess(self.prepareGitCommand(path, "init ", pkgCurGitDir))
if res != 0:
msg = "Creation of the git repository for %s failed. See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
self.checkGitUserConfig(path, pkgCurGitDir)
command = []
command.append(self.prepareGitCommand(path, "add " + absPath + "/\*", pkgCurGitDir))
command.append(self.prepareGitCommand(path, "commit -a -m %s" % comment, pkgCurGitDir))
command.append(self.prepareGitCommand(path, "tag %s" % self.initialTag , pkgCurGitDir))
res = self.__listSubprocess(command=command)
if res != 0:
msg = "Initialization of the git repository for %s failed. "
msg += "See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
def | (self, path, package):
pkgCurGitDir = package.getCurrentGitDirectory()
res = self.__listSubprocess([self.prepareGitCommand(path, "checkout %s" % self.initialTag , pkgCurGitDir)])
return res
def initGitignore(self, path, package):
absPath = self.__chroot.getDirectory() + path
with open(absPath + "/.gitignore", 'a') as f:
f.write("debugfiles.list\n")
f.write("debuglinks.list\n")
f.write("debugsources.list\n")
f.write(".gitignore\n")
# f.write("*.in\n")
def ignoreGitWatch(self,
package,
path=None,
commitComment="first build commit"):
'''
Add all Git untracked files of `path` to .gitignore
and commit.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (commitComment, timeString)
command = self.prepareGitCommand(path, u"status -u -s ", package.getCurrentGitDirectory())
#| sed -e 's/^[ \t]*//' " + u"| cut -d' ' -f2 >> %s/.gitignore" % absPath, package.getCurrentGitDirectory()
res = self.__subprocess(command=command, stdout=True)
# some packages modify their file rights, so we have to ensure
# this file is writable
self.__subprocess("sudo chmod -f a+w %s %s/.gitignore" % (absPath, absPath))
with open(absPath + "/.gitignore", 'a') as f:
if type(res) is not type(int()):
for line in res.split("\n"):
if len(line) > 0:
line = " ".join(line.strip(" ").split(" ")[1:])
f.write(line + "\n")
return self.__subprocess(self.prepareGitCommand(path,
u"commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
def getCommitTag(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 1 " ,
package.getCurrentGitDirectory())
result = self.__subprocess(command=command, stdout=True)
for line in result.split("\n"):
if line.startswith("commit "):
res = line.strip("commit").strip().rstrip("\n")
return res
def getListCommitTag(self, path, package):
return self.getCommitTagList(path, package)
def getCommitTagList(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 20 ",
package.getCurrentGitDirectory())
result_tmp = self.__subprocess(command=command, stdout=True)
result = []
for line in result_tmp.split("\n"):
if line.startswith("commit "):
res = line.strip("commit ").rstrip("\n")
result.append((res, "Comment"))
return result
def commitGit(self, mess, package):
packagePath = package.getChrootBuildDirectory()
command = []
if packagePath is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in commitGit for .")
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (mess, timeString)
path = self.__chroot.getDirectory() + packagePath
command.append(self.prepareGitCommand(packagePath,
" add %s/\* " % (path),
package.getCurrentGitDirectory()))
command.append(self.prepareGitCommand(packagePath,
" commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
self.__listSubprocess(command=command)
tag2 = self.getCommitTag(packagePath, package)
package.setSecondCommit(tag2)
def createPatch(self, package, packagePath, tag1, tag2, patch):
command = self.prepareGitCommand(packagePath,
"diff -p -a --binary %s %s " % (tag1, tag2),
package.getCurrentGitDirectory())
res = self.__subprocess(command=command, stdout=True)
pathPackagePackaging = package.getPackagingDirectiory()
with open(pathPackagePackaging + "/" + patch, "w'") as f:
f.write(res)
return 0
def cloneGitpackage(url, path):
cmd = "git clone %s %s" % (url, path)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def updateGitpackage(path):
cmd = "git --git-dir=%s pull" % os.path.join(path, ".git")
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def commitGitpackage(path, message):
cmd = "git --work-tree=%s --git-dir=%s commit -a -m \"%s\"" % (path,
os.path.join(path, ".git"),
message)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
| resetToPrep | identifier_name |
ObsLightGitManager.py | #
# Copyright 2011-2012, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
'''
Created on 23 May 2012
@author: Ronan Le Martret
@author: Florent Vennetier
'''
import os
import time
import ObsLightErr
from ObsLightObject import ObsLightObject
from ObsLightSubprocess import SubprocessCrt
from ObsLightUtils import isNonEmptyString
class ObsLightGitManager(ObsLightObject):
'''
Manage the internal Git repository used to generate patches on packages.
'''
def __init__(self, projectChroot):
ObsLightObject.__init__(self)
self.__chroot = projectChroot
self.__mySubprocessCrt = SubprocessCrt()
self.initialTag = "initial-prep"
def __subprocess(self, command=None, stdout=False, noOutPut=False):
return self.__mySubprocessCrt.execSubprocess(command, stdout=stdout, noOutPut=noOutPut)
def __listSubprocess(self, command=None):
for c in command:
res = self.__mySubprocessCrt.execSubprocess(c)
return res
def ___execPipeSubprocess(self, command, command2):
|
def prepareGitCommand(self, workTree, subcommand, gitDir):
"""
Construct a Git command-line, setting its working tree to `workTree`,
and git directory to `gitDir`, and then appends `subcommand`.
Output example:
git --git-dir=<gitDir> --work-tree=<workTree> <subcommand>
"""
absWorkTree = self.__chroot.getDirectory() + workTree
absGitDir = self.__chroot.getDirectory() + gitDir
command = "git --git-dir=%s --work-tree=%s " % (absGitDir, absWorkTree)
command += subcommand
return command
def makeArchiveGitSubcommand(self, prefix, revision=u"HEAD", outputFilePath=None):
"""
Construct a Git 'archive' subcommand with auto-detected format.
If outputFilePath is None, format will be tar, and output will
be stdout.
"""
command = "archive --prefix=%s/ %s "
command = command % (prefix, revision)
if outputFilePath is not None:
command += " -o %s" % outputFilePath
return command
def checkGitUserConfig(self, workTree, gitDir):
"""
Git complains if you don't set 'user.name' and 'user.email' config
parameters. This method checks if they are set, and in case they
aren't, set them.
"""
confParams = {"user.email": "obslight@example.com", "user.name": "OBS Light"}
for param, value in confParams.iteritems():
cmd = self.prepareGitCommand(workTree, "config " + param, gitDir)
res = self.__subprocess(cmd, stdout=True, noOutPut=True)
self.logger.debug("Git parameter '%s': '%s'" % (param, res))
if not isNonEmptyString(res):
self.logger.debug(" -> Setting it to '%s'" % (value))
cmd2 = self.prepareGitCommand(workTree,
'config %s "%s"' % (param, value),
gitDir)
res2 = self.__subprocess(cmd2)
if res2 != 0:
msg = 'Failed to set git parameter "%s", next git operation may fail!'
self.logger.warning(msg % param)
def execMakeArchiveGitSubcommand(self,
packagePath,
outputFilePath,
prefix,
packageCurrentGitDirectory):
absOutputFilePath = self.__chroot.getDirectory()
# TODO: make something more generic (gz, bz2, xz...)
if outputFilePath.endswith(".tar.gz"):
# git archive does not know .tar.gz,
# we have to compress the file afterwards
absOutputFilePath += outputFilePath[:-len('.gz')]
else:
absOutputFilePath += outputFilePath
archiveSubCommand = self.makeArchiveGitSubcommand(prefix,
outputFilePath=absOutputFilePath)
command = self.prepareGitCommand(packagePath,
archiveSubCommand,
packageCurrentGitDirectory)
res = self.__subprocess(command)
if res != 0:
return res
if outputFilePath.endswith(".tar.gz"):
# Without '-f' user will be prompted if .gz file already exists
command = "gzip -f %s" % absOutputFilePath
res = self.__subprocess(command)
return res
def findEmptyDirectory(self, package):
# git ignores empty directories so we must save them into a file.
projectPath = self.__chroot.getDirectory() + package.getChrootBuildDirectory()
res = []
for root, dirs, files in os.walk(projectPath):
if len(dirs) == 0 and len(files) == 0:
res.append(root.replace(projectPath + "/", ""))
# TODO: move this file to BUILD/
with open(projectPath + "/.emptyDirectory", 'w') as f:
for d in res:
f.write(d + "\n")
def initGitWatch(self, path, package):
'''
Initialize a Git repository in the specified path, and 'git add' everything.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("Path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
pkgCurGitDir = package.getCurrentGitDirectory()
# Ensure we have access rights on the directory
res = self.__chroot.allowAccessToObslightGroup(os.path.dirname(pkgCurGitDir),
absolutePath=False)
self.findEmptyDirectory(package)
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit first commit %s\"' % timeString
# Create .gitignore file.
self.initGitignore(path, package)
if res != 0:
msg = "Failed to give access rights on '%s'. Git repository creation may fail."
self.logger.warn(msg % os.path.dirname(pkgCurGitDir))
res = self.__subprocess(self.prepareGitCommand(path, "init ", pkgCurGitDir))
if res != 0:
msg = "Creation of the git repository for %s failed. See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
self.checkGitUserConfig(path, pkgCurGitDir)
command = []
command.append(self.prepareGitCommand(path, "add " + absPath + "/\*", pkgCurGitDir))
command.append(self.prepareGitCommand(path, "commit -a -m %s" % comment, pkgCurGitDir))
command.append(self.prepareGitCommand(path, "tag %s" % self.initialTag , pkgCurGitDir))
res = self.__listSubprocess(command=command)
if res != 0:
msg = "Initialization of the git repository for %s failed. "
msg += "See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
def resetToPrep(self, path, package):
pkgCurGitDir = package.getCurrentGitDirectory()
res = self.__listSubprocess([self.prepareGitCommand(path, "checkout %s" % self.initialTag , pkgCurGitDir)])
return res
def initGitignore(self, path, package):
absPath = self.__chroot.getDirectory() + path
with open(absPath + "/.gitignore", 'a') as f:
f.write("debugfiles.list\n")
f.write("debuglinks.list\n")
f.write("debugsources.list\n")
f.write(".gitignore\n")
# f.write("*.in\n")
def ignoreGitWatch(self,
package,
path=None,
commitComment="first build commit"):
'''
Add all Git untracked files of `path` to .gitignore
and commit.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (commitComment, timeString)
command = self.prepareGitCommand(path, u"status -u -s ", package.getCurrentGitDirectory())
#| sed -e 's/^[ \t]*//' " + u"| cut -d' ' -f2 >> %s/.gitignore" % absPath, package.getCurrentGitDirectory()
res = self.__subprocess(command=command, stdout=True)
# some packages modify their file rights, so we have to ensure
# this file is writable
self.__subprocess("sudo chmod -f a+w %s %s/.gitignore" % (absPath, absPath))
with open(absPath + "/.gitignore", 'a') as f:
if type(res) is not type(int()):
for line in res.split("\n"):
if len(line) > 0:
line = " ".join(line.strip(" ").split(" ")[1:])
f.write(line + "\n")
return self.__subprocess(self.prepareGitCommand(path,
u"commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
def getCommitTag(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 1 " ,
package.getCurrentGitDirectory())
result = self.__subprocess(command=command, stdout=True)
for line in result.split("\n"):
if line.startswith("commit "):
res = line.strip("commit").strip().rstrip("\n")
return res
def getListCommitTag(self, path, package):
return self.getCommitTagList(path, package)
def getCommitTagList(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 20 ",
package.getCurrentGitDirectory())
result_tmp = self.__subprocess(command=command, stdout=True)
result = []
for line in result_tmp.split("\n"):
if line.startswith("commit "):
res = line.strip("commit ").rstrip("\n")
result.append((res, "Comment"))
return result
def commitGit(self, mess, package):
packagePath = package.getChrootBuildDirectory()
command = []
if packagePath is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in commitGit for .")
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (mess, timeString)
path = self.__chroot.getDirectory() + packagePath
command.append(self.prepareGitCommand(packagePath,
" add %s/\* " % (path),
package.getCurrentGitDirectory()))
command.append(self.prepareGitCommand(packagePath,
" commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
self.__listSubprocess(command=command)
tag2 = self.getCommitTag(packagePath, package)
package.setSecondCommit(tag2)
def createPatch(self, package, packagePath, tag1, tag2, patch):
command = self.prepareGitCommand(packagePath,
"diff -p -a --binary %s %s " % (tag1, tag2),
package.getCurrentGitDirectory())
res = self.__subprocess(command=command, stdout=True)
pathPackagePackaging = package.getPackagingDirectiory()
with open(pathPackagePackaging + "/" + patch, "w'") as f:
f.write(res)
return 0
def cloneGitpackage(url, path):
cmd = "git clone %s %s" % (url, path)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def updateGitpackage(path):
cmd = "git --git-dir=%s pull" % os.path.join(path, ".git")
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def commitGitpackage(path, message):
cmd = "git --work-tree=%s --git-dir=%s commit -a -m \"%s\"" % (path,
os.path.join(path, ".git"),
message)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
| return self.__mySubprocessCrt.execPipeSubprocess(command, command2) | identifier_body |
ObsLightGitManager.py | #
# Copyright 2011-2012, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
'''
Created on 23 May 2012
@author: Ronan Le Martret
@author: Florent Vennetier
'''
import os
import time
import ObsLightErr
from ObsLightObject import ObsLightObject
from ObsLightSubprocess import SubprocessCrt
from ObsLightUtils import isNonEmptyString
class ObsLightGitManager(ObsLightObject):
'''
Manage the internal Git repository used to generate patches on packages.
'''
def __init__(self, projectChroot):
ObsLightObject.__init__(self)
self.__chroot = projectChroot
self.__mySubprocessCrt = SubprocessCrt()
self.initialTag = "initial-prep"
def __subprocess(self, command=None, stdout=False, noOutPut=False):
return self.__mySubprocessCrt.execSubprocess(command, stdout=stdout, noOutPut=noOutPut)
def __listSubprocess(self, command=None):
for c in command:
res = self.__mySubprocessCrt.execSubprocess(c)
return res
def ___execPipeSubprocess(self, command, command2):
return self.__mySubprocessCrt.execPipeSubprocess(command, command2)
def prepareGitCommand(self, workTree, subcommand, gitDir):
"""
Construct a Git command-line, setting its working tree to `workTree`,
and git directory to `gitDir`, and then appends `subcommand`.
Output example:
git --git-dir=<gitDir> --work-tree=<workTree> <subcommand>
"""
absWorkTree = self.__chroot.getDirectory() + workTree
absGitDir = self.__chroot.getDirectory() + gitDir
command = "git --git-dir=%s --work-tree=%s " % (absGitDir, absWorkTree)
command += subcommand
return command
def makeArchiveGitSubcommand(self, prefix, revision=u"HEAD", outputFilePath=None):
"""
Construct a Git 'archive' subcommand with auto-detected format.
If outputFilePath is None, format will be tar, and output will
be stdout.
"""
command = "archive --prefix=%s/ %s "
command = command % (prefix, revision)
if outputFilePath is not None:
command += " -o %s" % outputFilePath
return command
def checkGitUserConfig(self, workTree, gitDir):
"""
Git complains if you don't set 'user.name' and 'user.email' config
parameters. This method checks if they are set, and in case they
aren't, set them.
"""
confParams = {"user.email": "obslight@example.com", "user.name": "OBS Light"}
for param, value in confParams.iteritems():
cmd = self.prepareGitCommand(workTree, "config " + param, gitDir)
res = self.__subprocess(cmd, stdout=True, noOutPut=True)
self.logger.debug("Git parameter '%s': '%s'" % (param, res))
if not isNonEmptyString(res):
self.logger.debug(" -> Setting it to '%s'" % (value))
cmd2 = self.prepareGitCommand(workTree,
'config %s "%s"' % (param, value),
gitDir)
res2 = self.__subprocess(cmd2)
if res2 != 0:
msg = 'Failed to set git parameter "%s", next git operation may fail!'
self.logger.warning(msg % param)
def execMakeArchiveGitSubcommand(self,
packagePath,
outputFilePath,
prefix,
packageCurrentGitDirectory):
absOutputFilePath = self.__chroot.getDirectory()
# TODO: make something more generic (gz, bz2, xz...)
if outputFilePath.endswith(".tar.gz"):
# git archive does not know .tar.gz,
# we have to compress the file afterwards
absOutputFilePath += outputFilePath[:-len('.gz')]
else:
absOutputFilePath += outputFilePath
archiveSubCommand = self.makeArchiveGitSubcommand(prefix,
outputFilePath=absOutputFilePath)
command = self.prepareGitCommand(packagePath,
archiveSubCommand,
packageCurrentGitDirectory)
res = self.__subprocess(command)
if res != 0:
return res
if outputFilePath.endswith(".tar.gz"):
# Without '-f' user will be prompted if .gz file already exists
command = "gzip -f %s" % absOutputFilePath
res = self.__subprocess(command)
return res
def findEmptyDirectory(self, package):
# git ignores empty directories so we must save them into a file.
projectPath = self.__chroot.getDirectory() + package.getChrootBuildDirectory()
res = []
for root, dirs, files in os.walk(projectPath):
if len(dirs) == 0 and len(files) == 0:
res.append(root.replace(projectPath + "/", ""))
# TODO: move this file to BUILD/
with open(projectPath + "/.emptyDirectory", 'w') as f:
for d in res:
f.write(d + "\n")
def initGitWatch(self, path, package):
'''
Initialize a Git repository in the specified path, and 'git add' everything.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("Path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
pkgCurGitDir = package.getCurrentGitDirectory()
# Ensure we have access rights on the directory
res = self.__chroot.allowAccessToObslightGroup(os.path.dirname(pkgCurGitDir),
absolutePath=False)
self.findEmptyDirectory(package)
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit first commit %s\"' % timeString
# Create .gitignore file.
self.initGitignore(path, package)
if res != 0:
msg = "Failed to give access rights on '%s'. Git repository creation may fail."
self.logger.warn(msg % os.path.dirname(pkgCurGitDir))
res = self.__subprocess(self.prepareGitCommand(path, "init ", pkgCurGitDir))
if res != 0:
msg = "Creation of the git repository for %s failed. See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
self.checkGitUserConfig(path, pkgCurGitDir)
command = []
command.append(self.prepareGitCommand(path, "add " + absPath + "/\*", pkgCurGitDir))
command.append(self.prepareGitCommand(path, "commit -a -m %s" % comment, pkgCurGitDir))
command.append(self.prepareGitCommand(path, "tag %s" % self.initialTag , pkgCurGitDir))
res = self.__listSubprocess(command=command)
if res != 0:
|
def resetToPrep(self, path, package):
pkgCurGitDir = package.getCurrentGitDirectory()
res = self.__listSubprocess([self.prepareGitCommand(path, "checkout %s" % self.initialTag , pkgCurGitDir)])
return res
def initGitignore(self, path, package):
absPath = self.__chroot.getDirectory() + path
with open(absPath + "/.gitignore", 'a') as f:
f.write("debugfiles.list\n")
f.write("debuglinks.list\n")
f.write("debugsources.list\n")
f.write(".gitignore\n")
# f.write("*.in\n")
def ignoreGitWatch(self,
package,
path=None,
commitComment="first build commit"):
'''
Add all Git untracked files of `path` to .gitignore
and commit.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (commitComment, timeString)
command = self.prepareGitCommand(path, u"status -u -s ", package.getCurrentGitDirectory())
#| sed -e 's/^[ \t]*//' " + u"| cut -d' ' -f2 >> %s/.gitignore" % absPath, package.getCurrentGitDirectory()
res = self.__subprocess(command=command, stdout=True)
# some packages modify their file rights, so we have to ensure
# this file is writable
self.__subprocess("sudo chmod -f a+w %s %s/.gitignore" % (absPath, absPath))
with open(absPath + "/.gitignore", 'a') as f:
if type(res) is not type(int()):
for line in res.split("\n"):
if len(line) > 0:
line = " ".join(line.strip(" ").split(" ")[1:])
f.write(line + "\n")
return self.__subprocess(self.prepareGitCommand(path,
u"commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
def getCommitTag(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 1 " ,
package.getCurrentGitDirectory())
result = self.__subprocess(command=command, stdout=True)
for line in result.split("\n"):
if line.startswith("commit "):
res = line.strip("commit").strip().rstrip("\n")
return res
def getListCommitTag(self, path, package):
return self.getCommitTagList(path, package)
def getCommitTagList(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 20 ",
package.getCurrentGitDirectory())
result_tmp = self.__subprocess(command=command, stdout=True)
result = []
for line in result_tmp.split("\n"):
if line.startswith("commit "):
res = line.strip("commit ").rstrip("\n")
result.append((res, "Comment"))
return result
def commitGit(self, mess, package):
packagePath = package.getChrootBuildDirectory()
command = []
if packagePath is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in commitGit for .")
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (mess, timeString)
path = self.__chroot.getDirectory() + packagePath
command.append(self.prepareGitCommand(packagePath,
" add %s/\* " % (path),
package.getCurrentGitDirectory()))
command.append(self.prepareGitCommand(packagePath,
" commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
self.__listSubprocess(command=command)
tag2 = self.getCommitTag(packagePath, package)
package.setSecondCommit(tag2)
def createPatch(self, package, packagePath, tag1, tag2, patch):
command = self.prepareGitCommand(packagePath,
"diff -p -a --binary %s %s " % (tag1, tag2),
package.getCurrentGitDirectory())
res = self.__subprocess(command=command, stdout=True)
pathPackagePackaging = package.getPackagingDirectiory()
with open(pathPackagePackaging + "/" + patch, "w'") as f:
f.write(res)
return 0
def cloneGitpackage(url, path):
cmd = "git clone %s %s" % (url, path)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def updateGitpackage(path):
cmd = "git --git-dir=%s pull" % os.path.join(path, ".git")
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def commitGitpackage(path, message):
cmd = "git --work-tree=%s --git-dir=%s commit -a -m \"%s\"" % (path,
os.path.join(path, ".git"),
message)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
| msg = "Initialization of the git repository for %s failed. "
msg += "See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName()) | conditional_block |
ObsLightGitManager.py | #
# Copyright 2011-2012, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
'''
Created on 23 May 2012
@author: Ronan Le Martret
@author: Florent Vennetier
'''
import os
import time
import ObsLightErr
from ObsLightObject import ObsLightObject
from ObsLightSubprocess import SubprocessCrt
from ObsLightUtils import isNonEmptyString
class ObsLightGitManager(ObsLightObject):
'''
Manage the internal Git repository used to generate patches on packages.
'''
def __init__(self, projectChroot):
ObsLightObject.__init__(self)
self.__chroot = projectChroot
self.__mySubprocessCrt = SubprocessCrt()
self.initialTag = "initial-prep"
def __subprocess(self, command=None, stdout=False, noOutPut=False):
return self.__mySubprocessCrt.execSubprocess(command, stdout=stdout, noOutPut=noOutPut)
def __listSubprocess(self, command=None):
for c in command:
res = self.__mySubprocessCrt.execSubprocess(c)
return res
def ___execPipeSubprocess(self, command, command2):
return self.__mySubprocessCrt.execPipeSubprocess(command, command2)
def prepareGitCommand(self, workTree, subcommand, gitDir):
"""
Construct a Git command-line, setting its working tree to `workTree`,
and git directory to `gitDir`, and then appends `subcommand`.
Output example:
git --git-dir=<gitDir> --work-tree=<workTree> <subcommand>
"""
absWorkTree = self.__chroot.getDirectory() + workTree
absGitDir = self.__chroot.getDirectory() + gitDir
command = "git --git-dir=%s --work-tree=%s " % (absGitDir, absWorkTree)
command += subcommand
return command
def makeArchiveGitSubcommand(self, prefix, revision=u"HEAD", outputFilePath=None):
"""
Construct a Git 'archive' subcommand with auto-detected format.
If outputFilePath is None, format will be tar, and output will
be stdout.
"""
command = "archive --prefix=%s/ %s "
command = command % (prefix, revision)
if outputFilePath is not None:
command += " -o %s" % outputFilePath
return command
def checkGitUserConfig(self, workTree, gitDir):
"""
Git complains if you don't set 'user.name' and 'user.email' config
parameters. This method checks if they are set, and in case they
aren't, set them.
"""
confParams = {"user.email": "obslight@example.com", "user.name": "OBS Light"} | if not isNonEmptyString(res):
self.logger.debug(" -> Setting it to '%s'" % (value))
cmd2 = self.prepareGitCommand(workTree,
'config %s "%s"' % (param, value),
gitDir)
res2 = self.__subprocess(cmd2)
if res2 != 0:
msg = 'Failed to set git parameter "%s", next git operation may fail!'
self.logger.warning(msg % param)
def execMakeArchiveGitSubcommand(self,
packagePath,
outputFilePath,
prefix,
packageCurrentGitDirectory):
absOutputFilePath = self.__chroot.getDirectory()
# TODO: make something more generic (gz, bz2, xz...)
if outputFilePath.endswith(".tar.gz"):
# git archive does not know .tar.gz,
# we have to compress the file afterwards
absOutputFilePath += outputFilePath[:-len('.gz')]
else:
absOutputFilePath += outputFilePath
archiveSubCommand = self.makeArchiveGitSubcommand(prefix,
outputFilePath=absOutputFilePath)
command = self.prepareGitCommand(packagePath,
archiveSubCommand,
packageCurrentGitDirectory)
res = self.__subprocess(command)
if res != 0:
return res
if outputFilePath.endswith(".tar.gz"):
# Without '-f' user will be prompted if .gz file already exists
command = "gzip -f %s" % absOutputFilePath
res = self.__subprocess(command)
return res
def findEmptyDirectory(self, package):
# git ignores empty directories so we must save them into a file.
projectPath = self.__chroot.getDirectory() + package.getChrootBuildDirectory()
res = []
for root, dirs, files in os.walk(projectPath):
if len(dirs) == 0 and len(files) == 0:
res.append(root.replace(projectPath + "/", ""))
# TODO: move this file to BUILD/
with open(projectPath + "/.emptyDirectory", 'w') as f:
for d in res:
f.write(d + "\n")
def initGitWatch(self, path, package):
'''
Initialize a Git repository in the specified path, and 'git add' everything.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("Path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
pkgCurGitDir = package.getCurrentGitDirectory()
# Ensure we have access rights on the directory
res = self.__chroot.allowAccessToObslightGroup(os.path.dirname(pkgCurGitDir),
absolutePath=False)
self.findEmptyDirectory(package)
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit first commit %s\"' % timeString
# Create .gitignore file.
self.initGitignore(path, package)
if res != 0:
msg = "Failed to give access rights on '%s'. Git repository creation may fail."
self.logger.warn(msg % os.path.dirname(pkgCurGitDir))
res = self.__subprocess(self.prepareGitCommand(path, "init ", pkgCurGitDir))
if res != 0:
msg = "Creation of the git repository for %s failed. See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
self.checkGitUserConfig(path, pkgCurGitDir)
command = []
command.append(self.prepareGitCommand(path, "add " + absPath + "/\*", pkgCurGitDir))
command.append(self.prepareGitCommand(path, "commit -a -m %s" % comment, pkgCurGitDir))
command.append(self.prepareGitCommand(path, "tag %s" % self.initialTag , pkgCurGitDir))
res = self.__listSubprocess(command=command)
if res != 0:
msg = "Initialization of the git repository for %s failed. "
msg += "See the log for more information."
raise ObsLightErr.ObsLightChRootError(msg % package.getName())
def resetToPrep(self, path, package):
pkgCurGitDir = package.getCurrentGitDirectory()
res = self.__listSubprocess([self.prepareGitCommand(path, "checkout %s" % self.initialTag , pkgCurGitDir)])
return res
def initGitignore(self, path, package):
absPath = self.__chroot.getDirectory() + path
with open(absPath + "/.gitignore", 'a') as f:
f.write("debugfiles.list\n")
f.write("debuglinks.list\n")
f.write("debugsources.list\n")
f.write(".gitignore\n")
# f.write("*.in\n")
def ignoreGitWatch(self,
package,
path=None,
commitComment="first build commit"):
'''
Add all Git untracked files of `path` to .gitignore
and commit.
'''
if path is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in initGitWatch.")
absPath = self.__chroot.getDirectory() + path
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (commitComment, timeString)
command = self.prepareGitCommand(path, u"status -u -s ", package.getCurrentGitDirectory())
#| sed -e 's/^[ \t]*//' " + u"| cut -d' ' -f2 >> %s/.gitignore" % absPath, package.getCurrentGitDirectory()
res = self.__subprocess(command=command, stdout=True)
# some packages modify their file rights, so we have to ensure
# this file is writable
self.__subprocess("sudo chmod -f a+w %s %s/.gitignore" % (absPath, absPath))
with open(absPath + "/.gitignore", 'a') as f:
if type(res) is not type(int()):
for line in res.split("\n"):
if len(line) > 0:
line = " ".join(line.strip(" ").split(" ")[1:])
f.write(line + "\n")
return self.__subprocess(self.prepareGitCommand(path,
u"commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
def getCommitTag(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 1 " ,
package.getCurrentGitDirectory())
result = self.__subprocess(command=command, stdout=True)
for line in result.split("\n"):
if line.startswith("commit "):
res = line.strip("commit").strip().rstrip("\n")
return res
def getListCommitTag(self, path, package):
return self.getCommitTagList(path, package)
def getCommitTagList(self, path, package):
'''
Get the last Git commit hash.
'''
command = self.prepareGitCommand(path,
" log HEAD --pretty=short -n 20 ",
package.getCurrentGitDirectory())
result_tmp = self.__subprocess(command=command, stdout=True)
result = []
for line in result_tmp.split("\n"):
if line.startswith("commit "):
res = line.strip("commit ").rstrip("\n")
result.append((res, "Comment"))
return result
def commitGit(self, mess, package):
packagePath = package.getChrootBuildDirectory()
command = []
if packagePath is None:
raise ObsLightErr.ObsLightChRootError("path is not defined in commitGit for .")
timeString = time.strftime("%Y-%m-%d_%Hh%Mm%Ss")
comment = '\"auto commit %s %s\"' % (mess, timeString)
path = self.__chroot.getDirectory() + packagePath
command.append(self.prepareGitCommand(packagePath,
" add %s/\* " % (path),
package.getCurrentGitDirectory()))
command.append(self.prepareGitCommand(packagePath,
" commit -a -m %s" % comment,
package.getCurrentGitDirectory()))
self.__listSubprocess(command=command)
tag2 = self.getCommitTag(packagePath, package)
package.setSecondCommit(tag2)
def createPatch(self, package, packagePath, tag1, tag2, patch):
command = self.prepareGitCommand(packagePath,
"diff -p -a --binary %s %s " % (tag1, tag2),
package.getCurrentGitDirectory())
res = self.__subprocess(command=command, stdout=True)
pathPackagePackaging = package.getPackagingDirectiory()
with open(pathPackagePackaging + "/" + patch, "w'") as f:
f.write(res)
return 0
def cloneGitpackage(url, path):
cmd = "git clone %s %s" % (url, path)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def updateGitpackage(path):
cmd = "git --git-dir=%s pull" % os.path.join(path, ".git")
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd)
def commitGitpackage(path, message):
cmd = "git --work-tree=%s --git-dir=%s commit -a -m \"%s\"" % (path,
os.path.join(path, ".git"),
message)
aSubprocessCrt = SubprocessCrt()
return aSubprocessCrt.execSubprocess(cmd) | for param, value in confParams.iteritems():
cmd = self.prepareGitCommand(workTree, "config " + param, gitDir)
res = self.__subprocess(cmd, stdout=True, noOutPut=True)
self.logger.debug("Git parameter '%s': '%s'" % (param, res)) | random_line_split |
mongo.go | /*
* Copyright 2018 Intel Corporation, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db
import (
"encoding/json"
"sort"
"golang.org/x/net/context"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/config"
pkgerrors "github.com/pkg/errors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// MongoCollection defines the a subset of MongoDB operations
// Note: This interface is defined mainly for mock testing
type MongoCollection interface {
InsertOne(ctx context.Context, document interface{},
opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error)
FindOne(ctx context.Context, filter interface{},
opts ...*options.FindOneOptions) *mongo.SingleResult
FindOneAndUpdate(ctx context.Context, filter interface{},
update interface{}, opts ...*options.FindOneAndUpdateOptions) *mongo.SingleResult
DeleteOne(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
DeleteMany(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
Find(ctx context.Context, filter interface{},
opts ...*options.FindOptions) (*mongo.Cursor, error)
UpdateOne(ctx context.Context, filter interface{}, update interface{},
opts ...*options.UpdateOptions) (*mongo.UpdateResult, error)
CountDocuments(ctx context.Context, filter interface{},
opts ...*options.CountOptions) (int64, error)
}
// MongoStore is an implementation of the db.Store interface
type MongoStore struct {
db *mongo.Database
}
// This exists only for allowing us to mock the collection object
// for testing purposes
var getCollection = func(coll string, m *MongoStore) MongoCollection {
return m.db.Collection(coll)
}
// This exists only for allowing us to mock the DecodeBytes function
// Mainly because we cannot construct a SingleResult struct from our
// tests. All fields in that struct are private.
var decodeBytes = func(sr *mongo.SingleResult) (bson.Raw, error) {
return sr.DecodeBytes()
}
// These exists only for allowing us to mock the cursor.Next function
// Mainly because we cannot construct a mongo.Cursor struct from our
// tests. All fields in that struct are private and there is no public
// constructor method.
var cursorNext = func(ctx context.Context, cursor *mongo.Cursor) bool {
return cursor.Next(ctx)
}
var cursorClose = func(ctx context.Context, cursor *mongo.Cursor) error {
return cursor.Close(ctx)
}
// NewMongoStore initializes a Mongo Database with the name provided
// If a database with that name exists, it will be returned
func NewMongoStore(name string, store *mongo.Database) (Store, error) {
if store == nil {
ip := "mongodb://" + config.GetConfiguration().DatabaseIP + ":27017"
clientOptions := options.Client()
clientOptions.ApplyURI(ip)
mongoClient, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
err = mongoClient.Connect(context.Background())
if err != nil {
return nil, err
}
store = mongoClient.Database(name)
}
return &MongoStore{
db: store,
}, nil
}
// HealthCheck verifies if the database is up and running
func (m *MongoStore) HealthCheck() error {
_, err := decodeBytes(m.db.RunCommand(context.Background(), bson.D{{"serverStatus", 1}}))
if err != nil {
return pkgerrors.Wrap(err, "Error getting server status")
}
return nil
}
// validateParams checks to see if any parameters are empty
func (m *MongoStore) validateParams(args ...interface{}) bool {
for _, v := range args {
val, ok := v.(string)
if ok {
if val == "" {
return false
}
} else {
if v == nil {
return false
}
}
}
return true
}
// Unmarshal implements an unmarshaler for bson data that
// is produced from the mongo database
func (m *MongoStore) Unmarshal(inp []byte, out interface{}) error {
err := bson.Unmarshal(inp, out)
if err != nil {
return pkgerrors.Wrap(err, "Unmarshaling bson")
}
return nil
}
func (m *MongoStore) findFilter(key Key) (primitive.M, error) {
var bsonMap bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
filter := bson.M{
"$and": []bson.M{bsonMap},
}
return filter, nil
}
func (m *MongoStore) findFilterWithKey(key Key) (primitive.M, error) {
var bsonMap bson.M
var bsonMapFinal bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
bsonMapFinal = make(bson.M)
for k, v := range bsonMap {
if v == "" {
if _, ok := bsonMapFinal["key"]; !ok {
// add type of key to filter
s, err := m.createKeyField(key)
if err != nil {
return primitive.M{}, err
}
bsonMapFinal["key"] = s
}
} else {
bsonMapFinal[k] = v
}
}
filter := bson.M{
"$and": []bson.M{bsonMapFinal},
}
return filter, nil
}
func (m *MongoStore) updateFilter(key interface{}) (primitive.M, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
} | filter := bson.M{
"$set": p,
}
return filter, nil
}
func (m *MongoStore) createKeyField(key interface{}) (string, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return "", pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return "", pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
var keys []string
for k := range n {
keys = append(keys, k)
}
sort.Strings(keys)
s := "{"
for _, k := range keys {
s = s + k + ","
}
s = s + "}"
return s, nil
}
// Insert is used to insert/add element to a document
func (m *MongoStore) Insert(coll string, key Key, query interface{}, tag string, data interface{}) error {
if data == nil || !m.validateParams(coll, key, tag) {
return pkgerrors.New("No Data to store")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
// Create and add key tag
s, err := m.createKeyField(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$set", bson.D{
{tag, data},
{"key", s},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error updating master table: %s", err.Error())
}
if query == nil {
return nil
}
// Update to add Query fields
update, err := m.updateFilter(query)
if err != nil {
return err
}
_, err = c.UpdateOne(
ctx,
filter,
update)
if err != nil {
return pkgerrors.Errorf("Error updating Query fields: %s", err.Error())
}
return nil
}
// Find method returns the data stored for this key and for this particular tag
func (m *MongoStore) Find(coll string, key Key, tag string) ([][]byte, error) {
//result, err := m.findInternal(coll, key, tag, "")
//return result, err
if !m.validateParams(coll, key, tag) {
return nil, pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return nil, err
}
// Find only the field requested
projection := bson.D{
{tag, 1},
{"_id", 0},
}
cursor, err := c.Find(context.Background(), filter, options.Find().SetProjection(projection))
if err != nil {
return nil, pkgerrors.Errorf("Error finding element: %s", err.Error())
}
defer cursorClose(ctx, cursor)
var data []byte
var result [][]byte
for cursorNext(ctx, cursor) {
d := cursor.Current
switch d.Lookup(tag).Type {
case bson.TypeString:
data = []byte(d.Lookup(tag).StringValue())
default:
r, err := d.LookupErr(tag)
if err != nil {
// Throw error if not found
pkgerrors.New("Unable to read data ")
}
data = r.Value
}
result = append(result, data)
}
return result, nil
}
// RemoveAll method to removes all the documet matching key
func (m *MongoStore) RemoveAll(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return err
}
_, err = c.DeleteMany(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// Remove method to remove the documet by key if no child references
func (m *MongoStore) Remove(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
count, err := c.CountDocuments(context.Background(), filter)
if err != nil {
return pkgerrors.Errorf("Error finding: %s", err.Error())
}
if count > 1 {
return pkgerrors.Errorf("Can't delete parent without deleting child references first")
}
_, err = c.DeleteOne(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// RemoveTag is used to remove an element from a document
func (m *MongoStore) RemoveTag(coll string, key Key, tag string) error {
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$unset", bson.D{
{tag, ""},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error removing tag: %s", err.Error())
}
return nil
} | p := make(bson.M, len(n))
for k, v := range n {
p[k] = v
} | random_line_split |
mongo.go | /*
* Copyright 2018 Intel Corporation, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db
import (
"encoding/json"
"sort"
"golang.org/x/net/context"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/config"
pkgerrors "github.com/pkg/errors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// MongoCollection defines the a subset of MongoDB operations
// Note: This interface is defined mainly for mock testing
type MongoCollection interface {
InsertOne(ctx context.Context, document interface{},
opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error)
FindOne(ctx context.Context, filter interface{},
opts ...*options.FindOneOptions) *mongo.SingleResult
FindOneAndUpdate(ctx context.Context, filter interface{},
update interface{}, opts ...*options.FindOneAndUpdateOptions) *mongo.SingleResult
DeleteOne(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
DeleteMany(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
Find(ctx context.Context, filter interface{},
opts ...*options.FindOptions) (*mongo.Cursor, error)
UpdateOne(ctx context.Context, filter interface{}, update interface{},
opts ...*options.UpdateOptions) (*mongo.UpdateResult, error)
CountDocuments(ctx context.Context, filter interface{},
opts ...*options.CountOptions) (int64, error)
}
// MongoStore is an implementation of the db.Store interface
type MongoStore struct {
db *mongo.Database
}
// This exists only for allowing us to mock the collection object
// for testing purposes
var getCollection = func(coll string, m *MongoStore) MongoCollection {
return m.db.Collection(coll)
}
// This exists only for allowing us to mock the DecodeBytes function
// Mainly because we cannot construct a SingleResult struct from our
// tests. All fields in that struct are private.
var decodeBytes = func(sr *mongo.SingleResult) (bson.Raw, error) {
return sr.DecodeBytes()
}
// These exists only for allowing us to mock the cursor.Next function
// Mainly because we cannot construct a mongo.Cursor struct from our
// tests. All fields in that struct are private and there is no public
// constructor method.
var cursorNext = func(ctx context.Context, cursor *mongo.Cursor) bool {
return cursor.Next(ctx)
}
var cursorClose = func(ctx context.Context, cursor *mongo.Cursor) error {
return cursor.Close(ctx)
}
// NewMongoStore initializes a Mongo Database with the name provided
// If a database with that name exists, it will be returned
func NewMongoStore(name string, store *mongo.Database) (Store, error) {
if store == nil {
ip := "mongodb://" + config.GetConfiguration().DatabaseIP + ":27017"
clientOptions := options.Client()
clientOptions.ApplyURI(ip)
mongoClient, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
err = mongoClient.Connect(context.Background())
if err != nil {
return nil, err
}
store = mongoClient.Database(name)
}
return &MongoStore{
db: store,
}, nil
}
// HealthCheck verifies if the database is up and running
func (m *MongoStore) HealthCheck() error {
_, err := decodeBytes(m.db.RunCommand(context.Background(), bson.D{{"serverStatus", 1}}))
if err != nil {
return pkgerrors.Wrap(err, "Error getting server status")
}
return nil
}
// validateParams checks to see if any parameters are empty
func (m *MongoStore) validateParams(args ...interface{}) bool |
// Unmarshal implements an unmarshaler for bson data that
// is produced from the mongo database
func (m *MongoStore) Unmarshal(inp []byte, out interface{}) error {
err := bson.Unmarshal(inp, out)
if err != nil {
return pkgerrors.Wrap(err, "Unmarshaling bson")
}
return nil
}
func (m *MongoStore) findFilter(key Key) (primitive.M, error) {
var bsonMap bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
filter := bson.M{
"$and": []bson.M{bsonMap},
}
return filter, nil
}
func (m *MongoStore) findFilterWithKey(key Key) (primitive.M, error) {
var bsonMap bson.M
var bsonMapFinal bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
bsonMapFinal = make(bson.M)
for k, v := range bsonMap {
if v == "" {
if _, ok := bsonMapFinal["key"]; !ok {
// add type of key to filter
s, err := m.createKeyField(key)
if err != nil {
return primitive.M{}, err
}
bsonMapFinal["key"] = s
}
} else {
bsonMapFinal[k] = v
}
}
filter := bson.M{
"$and": []bson.M{bsonMapFinal},
}
return filter, nil
}
func (m *MongoStore) updateFilter(key interface{}) (primitive.M, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
p := make(bson.M, len(n))
for k, v := range n {
p[k] = v
}
filter := bson.M{
"$set": p,
}
return filter, nil
}
func (m *MongoStore) createKeyField(key interface{}) (string, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return "", pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return "", pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
var keys []string
for k := range n {
keys = append(keys, k)
}
sort.Strings(keys)
s := "{"
for _, k := range keys {
s = s + k + ","
}
s = s + "}"
return s, nil
}
// Insert is used to insert/add element to a document
func (m *MongoStore) Insert(coll string, key Key, query interface{}, tag string, data interface{}) error {
if data == nil || !m.validateParams(coll, key, tag) {
return pkgerrors.New("No Data to store")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
// Create and add key tag
s, err := m.createKeyField(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$set", bson.D{
{tag, data},
{"key", s},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error updating master table: %s", err.Error())
}
if query == nil {
return nil
}
// Update to add Query fields
update, err := m.updateFilter(query)
if err != nil {
return err
}
_, err = c.UpdateOne(
ctx,
filter,
update)
if err != nil {
return pkgerrors.Errorf("Error updating Query fields: %s", err.Error())
}
return nil
}
// Find method returns the data stored for this key and for this particular tag
func (m *MongoStore) Find(coll string, key Key, tag string) ([][]byte, error) {
//result, err := m.findInternal(coll, key, tag, "")
//return result, err
if !m.validateParams(coll, key, tag) {
return nil, pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return nil, err
}
// Find only the field requested
projection := bson.D{
{tag, 1},
{"_id", 0},
}
cursor, err := c.Find(context.Background(), filter, options.Find().SetProjection(projection))
if err != nil {
return nil, pkgerrors.Errorf("Error finding element: %s", err.Error())
}
defer cursorClose(ctx, cursor)
var data []byte
var result [][]byte
for cursorNext(ctx, cursor) {
d := cursor.Current
switch d.Lookup(tag).Type {
case bson.TypeString:
data = []byte(d.Lookup(tag).StringValue())
default:
r, err := d.LookupErr(tag)
if err != nil {
// Throw error if not found
pkgerrors.New("Unable to read data ")
}
data = r.Value
}
result = append(result, data)
}
return result, nil
}
// RemoveAll method to removes all the documet matching key
func (m *MongoStore) RemoveAll(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return err
}
_, err = c.DeleteMany(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// Remove method to remove the documet by key if no child references
func (m *MongoStore) Remove(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
count, err := c.CountDocuments(context.Background(), filter)
if err != nil {
return pkgerrors.Errorf("Error finding: %s", err.Error())
}
if count > 1 {
return pkgerrors.Errorf("Can't delete parent without deleting child references first")
}
_, err = c.DeleteOne(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// RemoveTag is used to remove an element from a document
func (m *MongoStore) RemoveTag(coll string, key Key, tag string) error {
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$unset", bson.D{
{tag, ""},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error removing tag: %s", err.Error())
}
return nil
}
| {
for _, v := range args {
val, ok := v.(string)
if ok {
if val == "" {
return false
}
} else {
if v == nil {
return false
}
}
}
return true
} | identifier_body |
mongo.go | /*
* Copyright 2018 Intel Corporation, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db
import (
"encoding/json"
"sort"
"golang.org/x/net/context"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/config"
pkgerrors "github.com/pkg/errors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// MongoCollection defines the a subset of MongoDB operations
// Note: This interface is defined mainly for mock testing
type MongoCollection interface {
InsertOne(ctx context.Context, document interface{},
opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error)
FindOne(ctx context.Context, filter interface{},
opts ...*options.FindOneOptions) *mongo.SingleResult
FindOneAndUpdate(ctx context.Context, filter interface{},
update interface{}, opts ...*options.FindOneAndUpdateOptions) *mongo.SingleResult
DeleteOne(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
DeleteMany(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
Find(ctx context.Context, filter interface{},
opts ...*options.FindOptions) (*mongo.Cursor, error)
UpdateOne(ctx context.Context, filter interface{}, update interface{},
opts ...*options.UpdateOptions) (*mongo.UpdateResult, error)
CountDocuments(ctx context.Context, filter interface{},
opts ...*options.CountOptions) (int64, error)
}
// MongoStore is an implementation of the db.Store interface
type MongoStore struct {
db *mongo.Database
}
// This exists only for allowing us to mock the collection object
// for testing purposes
var getCollection = func(coll string, m *MongoStore) MongoCollection {
return m.db.Collection(coll)
}
// This exists only for allowing us to mock the DecodeBytes function
// Mainly because we cannot construct a SingleResult struct from our
// tests. All fields in that struct are private.
var decodeBytes = func(sr *mongo.SingleResult) (bson.Raw, error) {
return sr.DecodeBytes()
}
// These exists only for allowing us to mock the cursor.Next function
// Mainly because we cannot construct a mongo.Cursor struct from our
// tests. All fields in that struct are private and there is no public
// constructor method.
var cursorNext = func(ctx context.Context, cursor *mongo.Cursor) bool {
return cursor.Next(ctx)
}
var cursorClose = func(ctx context.Context, cursor *mongo.Cursor) error {
return cursor.Close(ctx)
}
// NewMongoStore initializes a Mongo Database with the name provided
// If a database with that name exists, it will be returned
func NewMongoStore(name string, store *mongo.Database) (Store, error) {
if store == nil {
ip := "mongodb://" + config.GetConfiguration().DatabaseIP + ":27017"
clientOptions := options.Client()
clientOptions.ApplyURI(ip)
mongoClient, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
err = mongoClient.Connect(context.Background())
if err != nil {
return nil, err
}
store = mongoClient.Database(name)
}
return &MongoStore{
db: store,
}, nil
}
// HealthCheck verifies if the database is up and running
func (m *MongoStore) HealthCheck() error {
_, err := decodeBytes(m.db.RunCommand(context.Background(), bson.D{{"serverStatus", 1}}))
if err != nil {
return pkgerrors.Wrap(err, "Error getting server status")
}
return nil
}
// validateParams checks to see if any parameters are empty
func (m *MongoStore) validateParams(args ...interface{}) bool {
for _, v := range args {
val, ok := v.(string)
if ok {
if val == "" {
return false
}
} else {
if v == nil {
return false
}
}
}
return true
}
// Unmarshal implements an unmarshaler for bson data that
// is produced from the mongo database
func (m *MongoStore) Unmarshal(inp []byte, out interface{}) error {
err := bson.Unmarshal(inp, out)
if err != nil {
return pkgerrors.Wrap(err, "Unmarshaling bson")
}
return nil
}
func (m *MongoStore) | (key Key) (primitive.M, error) {
var bsonMap bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
filter := bson.M{
"$and": []bson.M{bsonMap},
}
return filter, nil
}
func (m *MongoStore) findFilterWithKey(key Key) (primitive.M, error) {
var bsonMap bson.M
var bsonMapFinal bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
bsonMapFinal = make(bson.M)
for k, v := range bsonMap {
if v == "" {
if _, ok := bsonMapFinal["key"]; !ok {
// add type of key to filter
s, err := m.createKeyField(key)
if err != nil {
return primitive.M{}, err
}
bsonMapFinal["key"] = s
}
} else {
bsonMapFinal[k] = v
}
}
filter := bson.M{
"$and": []bson.M{bsonMapFinal},
}
return filter, nil
}
func (m *MongoStore) updateFilter(key interface{}) (primitive.M, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
p := make(bson.M, len(n))
for k, v := range n {
p[k] = v
}
filter := bson.M{
"$set": p,
}
return filter, nil
}
func (m *MongoStore) createKeyField(key interface{}) (string, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return "", pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return "", pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
var keys []string
for k := range n {
keys = append(keys, k)
}
sort.Strings(keys)
s := "{"
for _, k := range keys {
s = s + k + ","
}
s = s + "}"
return s, nil
}
// Insert is used to insert/add element to a document
func (m *MongoStore) Insert(coll string, key Key, query interface{}, tag string, data interface{}) error {
if data == nil || !m.validateParams(coll, key, tag) {
return pkgerrors.New("No Data to store")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
// Create and add key tag
s, err := m.createKeyField(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$set", bson.D{
{tag, data},
{"key", s},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error updating master table: %s", err.Error())
}
if query == nil {
return nil
}
// Update to add Query fields
update, err := m.updateFilter(query)
if err != nil {
return err
}
_, err = c.UpdateOne(
ctx,
filter,
update)
if err != nil {
return pkgerrors.Errorf("Error updating Query fields: %s", err.Error())
}
return nil
}
// Find method returns the data stored for this key and for this particular tag
func (m *MongoStore) Find(coll string, key Key, tag string) ([][]byte, error) {
//result, err := m.findInternal(coll, key, tag, "")
//return result, err
if !m.validateParams(coll, key, tag) {
return nil, pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return nil, err
}
// Find only the field requested
projection := bson.D{
{tag, 1},
{"_id", 0},
}
cursor, err := c.Find(context.Background(), filter, options.Find().SetProjection(projection))
if err != nil {
return nil, pkgerrors.Errorf("Error finding element: %s", err.Error())
}
defer cursorClose(ctx, cursor)
var data []byte
var result [][]byte
for cursorNext(ctx, cursor) {
d := cursor.Current
switch d.Lookup(tag).Type {
case bson.TypeString:
data = []byte(d.Lookup(tag).StringValue())
default:
r, err := d.LookupErr(tag)
if err != nil {
// Throw error if not found
pkgerrors.New("Unable to read data ")
}
data = r.Value
}
result = append(result, data)
}
return result, nil
}
// RemoveAll method to removes all the documet matching key
func (m *MongoStore) RemoveAll(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return err
}
_, err = c.DeleteMany(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// Remove method to remove the documet by key if no child references
func (m *MongoStore) Remove(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
count, err := c.CountDocuments(context.Background(), filter)
if err != nil {
return pkgerrors.Errorf("Error finding: %s", err.Error())
}
if count > 1 {
return pkgerrors.Errorf("Can't delete parent without deleting child references first")
}
_, err = c.DeleteOne(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// RemoveTag is used to remove an element from a document
func (m *MongoStore) RemoveTag(coll string, key Key, tag string) error {
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$unset", bson.D{
{tag, ""},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error removing tag: %s", err.Error())
}
return nil
}
| findFilter | identifier_name |
mongo.go | /*
* Copyright 2018 Intel Corporation, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db
import (
"encoding/json"
"sort"
"golang.org/x/net/context"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/config"
pkgerrors "github.com/pkg/errors"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// MongoCollection defines the a subset of MongoDB operations
// Note: This interface is defined mainly for mock testing
type MongoCollection interface {
InsertOne(ctx context.Context, document interface{},
opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error)
FindOne(ctx context.Context, filter interface{},
opts ...*options.FindOneOptions) *mongo.SingleResult
FindOneAndUpdate(ctx context.Context, filter interface{},
update interface{}, opts ...*options.FindOneAndUpdateOptions) *mongo.SingleResult
DeleteOne(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
DeleteMany(ctx context.Context, filter interface{},
opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
Find(ctx context.Context, filter interface{},
opts ...*options.FindOptions) (*mongo.Cursor, error)
UpdateOne(ctx context.Context, filter interface{}, update interface{},
opts ...*options.UpdateOptions) (*mongo.UpdateResult, error)
CountDocuments(ctx context.Context, filter interface{},
opts ...*options.CountOptions) (int64, error)
}
// MongoStore is an implementation of the db.Store interface
type MongoStore struct {
db *mongo.Database
}
// This exists only for allowing us to mock the collection object
// for testing purposes
var getCollection = func(coll string, m *MongoStore) MongoCollection {
return m.db.Collection(coll)
}
// This exists only for allowing us to mock the DecodeBytes function
// Mainly because we cannot construct a SingleResult struct from our
// tests. All fields in that struct are private.
var decodeBytes = func(sr *mongo.SingleResult) (bson.Raw, error) {
return sr.DecodeBytes()
}
// These exists only for allowing us to mock the cursor.Next function
// Mainly because we cannot construct a mongo.Cursor struct from our
// tests. All fields in that struct are private and there is no public
// constructor method.
var cursorNext = func(ctx context.Context, cursor *mongo.Cursor) bool {
return cursor.Next(ctx)
}
var cursorClose = func(ctx context.Context, cursor *mongo.Cursor) error {
return cursor.Close(ctx)
}
// NewMongoStore initializes a Mongo Database with the name provided
// If a database with that name exists, it will be returned
func NewMongoStore(name string, store *mongo.Database) (Store, error) {
if store == nil {
ip := "mongodb://" + config.GetConfiguration().DatabaseIP + ":27017"
clientOptions := options.Client()
clientOptions.ApplyURI(ip)
mongoClient, err := mongo.NewClient(clientOptions)
if err != nil {
return nil, err
}
err = mongoClient.Connect(context.Background())
if err != nil {
return nil, err
}
store = mongoClient.Database(name)
}
return &MongoStore{
db: store,
}, nil
}
// HealthCheck verifies if the database is up and running
func (m *MongoStore) HealthCheck() error {
_, err := decodeBytes(m.db.RunCommand(context.Background(), bson.D{{"serverStatus", 1}}))
if err != nil {
return pkgerrors.Wrap(err, "Error getting server status")
}
return nil
}
// validateParams checks to see if any parameters are empty
func (m *MongoStore) validateParams(args ...interface{}) bool {
for _, v := range args {
val, ok := v.(string)
if ok {
if val == "" {
return false
}
} else {
if v == nil {
return false
}
}
}
return true
}
// Unmarshal implements an unmarshaler for bson data that
// is produced from the mongo database
func (m *MongoStore) Unmarshal(inp []byte, out interface{}) error {
err := bson.Unmarshal(inp, out)
if err != nil {
return pkgerrors.Wrap(err, "Unmarshaling bson")
}
return nil
}
func (m *MongoStore) findFilter(key Key) (primitive.M, error) {
var bsonMap bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
filter := bson.M{
"$and": []bson.M{bsonMap},
}
return filter, nil
}
func (m *MongoStore) findFilterWithKey(key Key) (primitive.M, error) {
var bsonMap bson.M
var bsonMapFinal bson.M
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &bsonMap)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
bsonMapFinal = make(bson.M)
for k, v := range bsonMap {
if v == "" {
if _, ok := bsonMapFinal["key"]; !ok {
// add type of key to filter
s, err := m.createKeyField(key)
if err != nil {
return primitive.M{}, err
}
bsonMapFinal["key"] = s
}
} else {
bsonMapFinal[k] = v
}
}
filter := bson.M{
"$and": []bson.M{bsonMapFinal},
}
return filter, nil
}
func (m *MongoStore) updateFilter(key interface{}) (primitive.M, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return primitive.M{}, pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
p := make(bson.M, len(n))
for k, v := range n {
p[k] = v
}
filter := bson.M{
"$set": p,
}
return filter, nil
}
func (m *MongoStore) createKeyField(key interface{}) (string, error) {
var n map[string]string
st, err := json.Marshal(key)
if err != nil {
return "", pkgerrors.Errorf("Error Marshalling key: %s", err.Error())
}
err = json.Unmarshal([]byte(st), &n)
if err != nil {
return "", pkgerrors.Errorf("Error Unmarshalling key to Bson Map: %s", err.Error())
}
var keys []string
for k := range n {
keys = append(keys, k)
}
sort.Strings(keys)
s := "{"
for _, k := range keys |
s = s + "}"
return s, nil
}
// Insert is used to insert/add element to a document
func (m *MongoStore) Insert(coll string, key Key, query interface{}, tag string, data interface{}) error {
if data == nil || !m.validateParams(coll, key, tag) {
return pkgerrors.New("No Data to store")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
// Create and add key tag
s, err := m.createKeyField(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$set", bson.D{
{tag, data},
{"key", s},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error updating master table: %s", err.Error())
}
if query == nil {
return nil
}
// Update to add Query fields
update, err := m.updateFilter(query)
if err != nil {
return err
}
_, err = c.UpdateOne(
ctx,
filter,
update)
if err != nil {
return pkgerrors.Errorf("Error updating Query fields: %s", err.Error())
}
return nil
}
// Find method returns the data stored for this key and for this particular tag
func (m *MongoStore) Find(coll string, key Key, tag string) ([][]byte, error) {
//result, err := m.findInternal(coll, key, tag, "")
//return result, err
if !m.validateParams(coll, key, tag) {
return nil, pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return nil, err
}
// Find only the field requested
projection := bson.D{
{tag, 1},
{"_id", 0},
}
cursor, err := c.Find(context.Background(), filter, options.Find().SetProjection(projection))
if err != nil {
return nil, pkgerrors.Errorf("Error finding element: %s", err.Error())
}
defer cursorClose(ctx, cursor)
var data []byte
var result [][]byte
for cursorNext(ctx, cursor) {
d := cursor.Current
switch d.Lookup(tag).Type {
case bson.TypeString:
data = []byte(d.Lookup(tag).StringValue())
default:
r, err := d.LookupErr(tag)
if err != nil {
// Throw error if not found
pkgerrors.New("Unable to read data ")
}
data = r.Value
}
result = append(result, data)
}
return result, nil
}
// RemoveAll method to removes all the documet matching key
func (m *MongoStore) RemoveAll(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilterWithKey(key)
if err != nil {
return err
}
_, err = c.DeleteMany(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// Remove method to remove the documet by key if no child references
func (m *MongoStore) Remove(coll string, key Key) error {
if !m.validateParams(coll, key) {
return pkgerrors.New("Mandatory fields are missing")
}
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
count, err := c.CountDocuments(context.Background(), filter)
if err != nil {
return pkgerrors.Errorf("Error finding: %s", err.Error())
}
if count > 1 {
return pkgerrors.Errorf("Can't delete parent without deleting child references first")
}
_, err = c.DeleteOne(ctx, filter)
if err != nil {
return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
}
return nil
}
// RemoveTag is used to remove an element from a document
func (m *MongoStore) RemoveTag(coll string, key Key, tag string) error {
c := getCollection(coll, m)
ctx := context.Background()
filter, err := m.findFilter(key)
if err != nil {
return err
}
_, err = decodeBytes(
c.FindOneAndUpdate(
ctx,
filter,
bson.D{
{"$unset", bson.D{
{tag, ""},
}},
},
options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
if err != nil {
return pkgerrors.Errorf("Error removing tag: %s", err.Error())
}
return nil
}
| {
s = s + k + ","
} | conditional_block |
conftest.py | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import re
import functools
from os import environ
from random import choice
from string import ascii_uppercase
import pytest
_del_test_prefix = functools.partial(re.compile(r'^[Tt][Ee][Ss][Tt]_?').sub, '')
# local imports in the fixtures to make this file loadable in e.g. client tests
pytest_plugins = ('tests.ruciopytest.artifacts_plugin', )
def pytest_configure(config):
config.addinivalue_line('markers', 'dirty: marks test as dirty, i.e. tests are leaving structures behind')
config.addinivalue_line('markers', 'noparallel(reason, groups): marks test being unable to run in parallel to other tests' )
if config.pluginmanager.hasplugin("xdist"):
from .ruciopytest import xdist_noparallel_scheduler
config.pluginmanager.register(xdist_noparallel_scheduler)
def pytest_make_parametrize_id(config, val, argname):
if argname == 'file_config_mock':
cfg = {}
for section, option, value in val['overrides']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
if argname == 'core_config_mock':
cfg = {}
for section, option, value in val['table_content']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
# return None to let pytest handle the formatting
return None
@pytest.fixture(scope='session')
def session_scope_prefix():
"""
Generate a name prefix to be shared by objects created during this pytest session
"""
return ''.join(choice(ascii_uppercase) for _ in range(6)) + '-'
@pytest.fixture(scope='module')
def module_scope_prefix(request, session_scope_prefix):
"""
Generate a name prefix to be shared by objects created during this pytest module
Relies on pytest's builtin fixture "request"
https://docs.pytest.org/en/6.2.x/reference.html#std-fixture-request
"""
return session_scope_prefix + _del_test_prefix(request.module.__name__.split('.')[-1]) + '-'
@pytest.fixture(scope='class')
def class_scope_prefix(request, module_scope_prefix):
if not request.cls:
return module_scope_prefix
return module_scope_prefix + _del_test_prefix(request.cls.__name__) + '-'
@pytest.fixture(scope='function')
def function_scope_prefix(request, class_scope_prefix):
return class_scope_prefix + _del_test_prefix(request.node.originalname) + '-'
@pytest.fixture(scope='session')
def vo():
if environ.get('SUITE', 'remote_dbs') != 'client':
# Server test, we can use short VO via DB for internal tests
from rucio.tests.common_server import get_vo
return get_vo()
else:
# Client-only test, only use config with no DB config
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='session')
def second_vo():
from rucio.common.config import config_get_bool
from rucio.core.vo import vo_exists, add_vo
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
pytest.skip('multi_vo mode is not enabled. Running multi_vo tests in single_vo mode would result in failures.')
new_vo = 'new'
if not vo_exists(vo=new_vo):
add_vo(vo=new_vo, description='Test', email='rucio@email.com')
return new_vo
@pytest.fixture(scope='session')
def long_vo():
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='module')
def account_client():
from rucio.client.accountclient import AccountClient
return AccountClient()
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture(scope='module')
def rse_client():
from rucio.client.rseclient import RSEClient |
@pytest.fixture(scope='module')
def scope_client():
from rucio.client.scopeclient import ScopeClient
return ScopeClient()
@pytest.fixture(scope='module')
def dirac_client():
from rucio.client.diracclient import DiracClient
return DiracClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, long_vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(long_vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope='module')
def jdoe_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('jdoe', vo=vo)
@pytest.fixture
def random_account(vo):
import random
import string
from rucio.common.types import InternalAccount
from rucio.core.account import add_account, del_account
from rucio.db.sqla import models
from rucio.db.sqla.constants import AccountType
from rucio.tests.common_server import cleanup_db_deps
account = InternalAccount(''.join(random.choice(string.ascii_uppercase) for _ in range(10)), vo=vo)
add_account(account=account, type_=AccountType.USER, email=f'{account.external}@email.com')
yield account
cleanup_db_deps(model=models.Account, select_rows_stmt=models.Account.account == account)
del_account(account)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if "ssh" in rse_obj['rse'].lower()]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(request, vo, function_scope_prefix):
from .temp_factories import TemporaryRSEFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryRSEFactory(vo=vo, name_prefix=function_scope_prefix, db_session=session) as factory:
yield factory
@pytest.fixture(scope="class")
def rse_factory_unittest(request, vo, class_scope_prefix):
"""
unittest classes can get access to rse_factory fixture via this fixture
"""
from .temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo, name_prefix=class_scope_prefix) as factory:
request.cls.rse_factory = factory
yield factory
@pytest.fixture
def did_factory(request, vo, mock_scope, function_scope_prefix, file_factory, root_account):
from .temp_factories import TemporaryDidFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryDidFactory(vo=vo, default_scope=mock_scope, name_prefix=function_scope_prefix, file_factory=file_factory,
default_account=root_account, db_session=session) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from .temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
@pytest.fixture
def scope_factory():
from rucio.common.utils import generate_uuid
from rucio.core.scope import add_scope
from rucio.common.types import InternalAccount, InternalScope
def create_scopes(vos, account_name=None):
scope_uuid = str(generate_uuid()).lower()[:16]
scope_name = 'shr_%s' % scope_uuid
created_scopes = []
for vo in vos:
scope = InternalScope(scope_name, vo=vo)
add_scope(scope, InternalAccount(account_name if account_name else 'root', vo=vo))
created_scopes.append(scope)
return scope_name, created_scopes
return create_scopes
class _TagFactory:
def __init__(self, prefix):
self.prefix = prefix
self.index = 0
def new_tag(self):
self.index += 1
return f'{self.prefix}-{self.index}'
@pytest.fixture
def tag_factory(function_scope_prefix):
return _TagFactory(prefix=f'{function_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture(scope='class')
def tag_factory_class(class_scope_prefix):
return _TagFactory(prefix=f'{class_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture
def db_session():
from rucio.db.sqla import session
db_session = session.get_session()
yield db_session
db_session.commit()
db_session.close()
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
def __create_in_memory_db_table(name, *columns, **kwargs):
"""
Create an in-memory temporary table using the sqlite memory driver.
Make sqlalchemy aware of that table by registering it via a
declarative base.
"""
import datetime
from sqlalchemy import Column, DateTime, CheckConstraint
from sqlalchemy.pool import StaticPool
from sqlalchemy.schema import Table
from sqlalchemy.orm import registry
from rucio.db.sqla.models import ModelBase
from rucio.db.sqla.session import get_maker, create_engine
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
# Create a class which inherits from ModelBase. This will allow us to use the rucio-specific methods like .save()
DeclarativeObj = type('DeclarativeObj{}'.format(name), (ModelBase,), {})
# Create a new declarative base and map the previously created object into the base
mapper_registry = registry()
InMemoryBase = mapper_registry.generate_base(name='InMemoryBase{}'.format(name))
table_args = tuple(columns) + tuple(kwargs.get('table_args', ())) + (
Column("created_at", DateTime, default=datetime.datetime.utcnow),
Column("updated_at", DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
CheckConstraint('CREATED_AT IS NOT NULL', name=name.upper() + '_CREATED_NN'),
CheckConstraint('UPDATED_AT IS NOT NULL', name=name.upper() + '_UPDATED_NN'),
)
table = Table(
name,
InMemoryBase.metadata,
*table_args
)
mapper_registry.map_imperatively(DeclarativeObj, table)
# Performa actual creation of the in-memory table
InMemoryBase.metadata.create_all(engine)
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
senssionmaker = get_maker()
senssionmaker.kw.setdefault('binds', {}).update({DeclarativeObj: engine})
return DeclarativeObj
@pytest.fixture
def message_mock():
"""
Fixture which overrides the Message table with a private instance
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint, CheckConstraint, Text, Index, GUID
InMemoryMessage = __create_in_memory_db_table(
'message_' + generate_uuid(),
Column('id', GUID(), default=generate_uuid),
Column('event_type', String(256)),
Column('payload', String(4000)),
Column('payload_nolimit', Text),
Column('services', String(256)),
table_args=(PrimaryKeyConstraint('id', name='MESSAGES_ID_PK'),
CheckConstraint('EVENT_TYPE IS NOT NULL', name='MESSAGES_EVENT_TYPE_NN'),
CheckConstraint('PAYLOAD IS NOT NULL', name='MESSAGES_PAYLOAD_NN'),
Index('MESSAGES_SERVICES_IDX', 'services', 'event_type'))
)
with mock.patch('rucio.core.message.Message', new=InMemoryMessage):
yield
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
InMemoryConfig = __create_in_memory_db_table(
'configs_' + generate_uuid(),
Column('section', String(128)),
Column('opt', String(128)),
Column('value', String(4000)),
table_args=(PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),),
)
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def file_config_mock(request):
"""
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
"""
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
# Get the fixture parameters
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get("overrides", overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=lambda: parser):
for section, option, value in (overrides or []):
if not config_has_section(section):
config_add_section(section)
config_set(section, option, value)
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
expiration_time = 600
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
expiration_time = params.get("expiration_time", expiration_time)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=expiration_time)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
yield mocked_caches
@pytest.fixture
def metrics_mock():
"""
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
"""
from unittest import mock
from prometheus_client import CollectorRegistry, values
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, \
mock.patch('rucio.core.monitor.COUNTERS', new={}), \
mock.patch('rucio.core.monitor.GAUGES', new={}), \
mock.patch('rucio.core.monitor.TIMINGS', new={}), \
mock.patch('prometheus_client.values.ValueClass', new=values.MutexValue):
yield registry |
return RSEClient() | random_line_split |
conftest.py | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import re
import functools
from os import environ
from random import choice
from string import ascii_uppercase
import pytest
_del_test_prefix = functools.partial(re.compile(r'^[Tt][Ee][Ss][Tt]_?').sub, '')
# local imports in the fixtures to make this file loadable in e.g. client tests
pytest_plugins = ('tests.ruciopytest.artifacts_plugin', )
def pytest_configure(config):
config.addinivalue_line('markers', 'dirty: marks test as dirty, i.e. tests are leaving structures behind')
config.addinivalue_line('markers', 'noparallel(reason, groups): marks test being unable to run in parallel to other tests' )
if config.pluginmanager.hasplugin("xdist"):
from .ruciopytest import xdist_noparallel_scheduler
config.pluginmanager.register(xdist_noparallel_scheduler)
def pytest_make_parametrize_id(config, val, argname):
if argname == 'file_config_mock':
cfg = {}
for section, option, value in val['overrides']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
if argname == 'core_config_mock':
cfg = {}
for section, option, value in val['table_content']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
# return None to let pytest handle the formatting
return None
@pytest.fixture(scope='session')
def session_scope_prefix():
"""
Generate a name prefix to be shared by objects created during this pytest session
"""
return ''.join(choice(ascii_uppercase) for _ in range(6)) + '-'
@pytest.fixture(scope='module')
def module_scope_prefix(request, session_scope_prefix):
"""
Generate a name prefix to be shared by objects created during this pytest module
Relies on pytest's builtin fixture "request"
https://docs.pytest.org/en/6.2.x/reference.html#std-fixture-request
"""
return session_scope_prefix + _del_test_prefix(request.module.__name__.split('.')[-1]) + '-'
@pytest.fixture(scope='class')
def class_scope_prefix(request, module_scope_prefix):
if not request.cls:
return module_scope_prefix
return module_scope_prefix + _del_test_prefix(request.cls.__name__) + '-'
@pytest.fixture(scope='function')
def function_scope_prefix(request, class_scope_prefix):
return class_scope_prefix + _del_test_prefix(request.node.originalname) + '-'
@pytest.fixture(scope='session')
def vo():
if environ.get('SUITE', 'remote_dbs') != 'client':
# Server test, we can use short VO via DB for internal tests
from rucio.tests.common_server import get_vo
return get_vo()
else:
# Client-only test, only use config with no DB config
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='session')
def second_vo():
from rucio.common.config import config_get_bool
from rucio.core.vo import vo_exists, add_vo
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
pytest.skip('multi_vo mode is not enabled. Running multi_vo tests in single_vo mode would result in failures.')
new_vo = 'new'
if not vo_exists(vo=new_vo):
add_vo(vo=new_vo, description='Test', email='rucio@email.com')
return new_vo
@pytest.fixture(scope='session')
def long_vo():
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='module')
def account_client():
from rucio.client.accountclient import AccountClient
return AccountClient()
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture(scope='module')
def rse_client():
from rucio.client.rseclient import RSEClient
return RSEClient()
@pytest.fixture(scope='module')
def scope_client():
from rucio.client.scopeclient import ScopeClient
return ScopeClient()
@pytest.fixture(scope='module')
def dirac_client():
from rucio.client.diracclient import DiracClient
return DiracClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, long_vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(long_vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def | (vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope='module')
def jdoe_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('jdoe', vo=vo)
@pytest.fixture
def random_account(vo):
import random
import string
from rucio.common.types import InternalAccount
from rucio.core.account import add_account, del_account
from rucio.db.sqla import models
from rucio.db.sqla.constants import AccountType
from rucio.tests.common_server import cleanup_db_deps
account = InternalAccount(''.join(random.choice(string.ascii_uppercase) for _ in range(10)), vo=vo)
add_account(account=account, type_=AccountType.USER, email=f'{account.external}@email.com')
yield account
cleanup_db_deps(model=models.Account, select_rows_stmt=models.Account.account == account)
del_account(account)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if "ssh" in rse_obj['rse'].lower()]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(request, vo, function_scope_prefix):
from .temp_factories import TemporaryRSEFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryRSEFactory(vo=vo, name_prefix=function_scope_prefix, db_session=session) as factory:
yield factory
@pytest.fixture(scope="class")
def rse_factory_unittest(request, vo, class_scope_prefix):
"""
unittest classes can get access to rse_factory fixture via this fixture
"""
from .temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo, name_prefix=class_scope_prefix) as factory:
request.cls.rse_factory = factory
yield factory
@pytest.fixture
def did_factory(request, vo, mock_scope, function_scope_prefix, file_factory, root_account):
from .temp_factories import TemporaryDidFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryDidFactory(vo=vo, default_scope=mock_scope, name_prefix=function_scope_prefix, file_factory=file_factory,
default_account=root_account, db_session=session) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from .temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
@pytest.fixture
def scope_factory():
from rucio.common.utils import generate_uuid
from rucio.core.scope import add_scope
from rucio.common.types import InternalAccount, InternalScope
def create_scopes(vos, account_name=None):
scope_uuid = str(generate_uuid()).lower()[:16]
scope_name = 'shr_%s' % scope_uuid
created_scopes = []
for vo in vos:
scope = InternalScope(scope_name, vo=vo)
add_scope(scope, InternalAccount(account_name if account_name else 'root', vo=vo))
created_scopes.append(scope)
return scope_name, created_scopes
return create_scopes
class _TagFactory:
def __init__(self, prefix):
self.prefix = prefix
self.index = 0
def new_tag(self):
self.index += 1
return f'{self.prefix}-{self.index}'
@pytest.fixture
def tag_factory(function_scope_prefix):
return _TagFactory(prefix=f'{function_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture(scope='class')
def tag_factory_class(class_scope_prefix):
return _TagFactory(prefix=f'{class_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture
def db_session():
from rucio.db.sqla import session
db_session = session.get_session()
yield db_session
db_session.commit()
db_session.close()
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
def __create_in_memory_db_table(name, *columns, **kwargs):
"""
Create an in-memory temporary table using the sqlite memory driver.
Make sqlalchemy aware of that table by registering it via a
declarative base.
"""
import datetime
from sqlalchemy import Column, DateTime, CheckConstraint
from sqlalchemy.pool import StaticPool
from sqlalchemy.schema import Table
from sqlalchemy.orm import registry
from rucio.db.sqla.models import ModelBase
from rucio.db.sqla.session import get_maker, create_engine
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
# Create a class which inherits from ModelBase. This will allow us to use the rucio-specific methods like .save()
DeclarativeObj = type('DeclarativeObj{}'.format(name), (ModelBase,), {})
# Create a new declarative base and map the previously created object into the base
mapper_registry = registry()
InMemoryBase = mapper_registry.generate_base(name='InMemoryBase{}'.format(name))
table_args = tuple(columns) + tuple(kwargs.get('table_args', ())) + (
Column("created_at", DateTime, default=datetime.datetime.utcnow),
Column("updated_at", DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
CheckConstraint('CREATED_AT IS NOT NULL', name=name.upper() + '_CREATED_NN'),
CheckConstraint('UPDATED_AT IS NOT NULL', name=name.upper() + '_UPDATED_NN'),
)
table = Table(
name,
InMemoryBase.metadata,
*table_args
)
mapper_registry.map_imperatively(DeclarativeObj, table)
# Performa actual creation of the in-memory table
InMemoryBase.metadata.create_all(engine)
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
senssionmaker = get_maker()
senssionmaker.kw.setdefault('binds', {}).update({DeclarativeObj: engine})
return DeclarativeObj
@pytest.fixture
def message_mock():
"""
Fixture which overrides the Message table with a private instance
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint, CheckConstraint, Text, Index, GUID
InMemoryMessage = __create_in_memory_db_table(
'message_' + generate_uuid(),
Column('id', GUID(), default=generate_uuid),
Column('event_type', String(256)),
Column('payload', String(4000)),
Column('payload_nolimit', Text),
Column('services', String(256)),
table_args=(PrimaryKeyConstraint('id', name='MESSAGES_ID_PK'),
CheckConstraint('EVENT_TYPE IS NOT NULL', name='MESSAGES_EVENT_TYPE_NN'),
CheckConstraint('PAYLOAD IS NOT NULL', name='MESSAGES_PAYLOAD_NN'),
Index('MESSAGES_SERVICES_IDX', 'services', 'event_type'))
)
with mock.patch('rucio.core.message.Message', new=InMemoryMessage):
yield
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
InMemoryConfig = __create_in_memory_db_table(
'configs_' + generate_uuid(),
Column('section', String(128)),
Column('opt', String(128)),
Column('value', String(4000)),
table_args=(PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),),
)
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def file_config_mock(request):
"""
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
"""
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
# Get the fixture parameters
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get("overrides", overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=lambda: parser):
for section, option, value in (overrides or []):
if not config_has_section(section):
config_add_section(section)
config_set(section, option, value)
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
expiration_time = 600
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
expiration_time = params.get("expiration_time", expiration_time)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=expiration_time)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
yield mocked_caches
@pytest.fixture
def metrics_mock():
"""
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
"""
from unittest import mock
from prometheus_client import CollectorRegistry, values
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, \
mock.patch('rucio.core.monitor.COUNTERS', new={}), \
mock.patch('rucio.core.monitor.GAUGES', new={}), \
mock.patch('rucio.core.monitor.TIMINGS', new={}), \
mock.patch('prometheus_client.values.ValueClass', new=values.MutexValue):
yield registry
| test_scope | identifier_name |
conftest.py | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import re
import functools
from os import environ
from random import choice
from string import ascii_uppercase
import pytest
_del_test_prefix = functools.partial(re.compile(r'^[Tt][Ee][Ss][Tt]_?').sub, '')
# local imports in the fixtures to make this file loadable in e.g. client tests
pytest_plugins = ('tests.ruciopytest.artifacts_plugin', )
def pytest_configure(config):
config.addinivalue_line('markers', 'dirty: marks test as dirty, i.e. tests are leaving structures behind')
config.addinivalue_line('markers', 'noparallel(reason, groups): marks test being unable to run in parallel to other tests' )
if config.pluginmanager.hasplugin("xdist"):
from .ruciopytest import xdist_noparallel_scheduler
config.pluginmanager.register(xdist_noparallel_scheduler)
def pytest_make_parametrize_id(config, val, argname):
if argname == 'file_config_mock':
cfg = {}
for section, option, value in val['overrides']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
if argname == 'core_config_mock':
cfg = {}
for section, option, value in val['table_content']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
# return None to let pytest handle the formatting
return None
@pytest.fixture(scope='session')
def session_scope_prefix():
"""
Generate a name prefix to be shared by objects created during this pytest session
"""
return ''.join(choice(ascii_uppercase) for _ in range(6)) + '-'
@pytest.fixture(scope='module')
def module_scope_prefix(request, session_scope_prefix):
"""
Generate a name prefix to be shared by objects created during this pytest module
Relies on pytest's builtin fixture "request"
https://docs.pytest.org/en/6.2.x/reference.html#std-fixture-request
"""
return session_scope_prefix + _del_test_prefix(request.module.__name__.split('.')[-1]) + '-'
@pytest.fixture(scope='class')
def class_scope_prefix(request, module_scope_prefix):
|
@pytest.fixture(scope='function')
def function_scope_prefix(request, class_scope_prefix):
return class_scope_prefix + _del_test_prefix(request.node.originalname) + '-'
@pytest.fixture(scope='session')
def vo():
if environ.get('SUITE', 'remote_dbs') != 'client':
# Server test, we can use short VO via DB for internal tests
from rucio.tests.common_server import get_vo
return get_vo()
else:
# Client-only test, only use config with no DB config
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='session')
def second_vo():
from rucio.common.config import config_get_bool
from rucio.core.vo import vo_exists, add_vo
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
pytest.skip('multi_vo mode is not enabled. Running multi_vo tests in single_vo mode would result in failures.')
new_vo = 'new'
if not vo_exists(vo=new_vo):
add_vo(vo=new_vo, description='Test', email='rucio@email.com')
return new_vo
@pytest.fixture(scope='session')
def long_vo():
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='module')
def account_client():
from rucio.client.accountclient import AccountClient
return AccountClient()
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture(scope='module')
def rse_client():
from rucio.client.rseclient import RSEClient
return RSEClient()
@pytest.fixture(scope='module')
def scope_client():
from rucio.client.scopeclient import ScopeClient
return ScopeClient()
@pytest.fixture(scope='module')
def dirac_client():
from rucio.client.diracclient import DiracClient
return DiracClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, long_vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(long_vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope='module')
def jdoe_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('jdoe', vo=vo)
@pytest.fixture
def random_account(vo):
import random
import string
from rucio.common.types import InternalAccount
from rucio.core.account import add_account, del_account
from rucio.db.sqla import models
from rucio.db.sqla.constants import AccountType
from rucio.tests.common_server import cleanup_db_deps
account = InternalAccount(''.join(random.choice(string.ascii_uppercase) for _ in range(10)), vo=vo)
add_account(account=account, type_=AccountType.USER, email=f'{account.external}@email.com')
yield account
cleanup_db_deps(model=models.Account, select_rows_stmt=models.Account.account == account)
del_account(account)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if "ssh" in rse_obj['rse'].lower()]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(request, vo, function_scope_prefix):
from .temp_factories import TemporaryRSEFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryRSEFactory(vo=vo, name_prefix=function_scope_prefix, db_session=session) as factory:
yield factory
@pytest.fixture(scope="class")
def rse_factory_unittest(request, vo, class_scope_prefix):
"""
unittest classes can get access to rse_factory fixture via this fixture
"""
from .temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo, name_prefix=class_scope_prefix) as factory:
request.cls.rse_factory = factory
yield factory
@pytest.fixture
def did_factory(request, vo, mock_scope, function_scope_prefix, file_factory, root_account):
from .temp_factories import TemporaryDidFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryDidFactory(vo=vo, default_scope=mock_scope, name_prefix=function_scope_prefix, file_factory=file_factory,
default_account=root_account, db_session=session) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from .temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
@pytest.fixture
def scope_factory():
from rucio.common.utils import generate_uuid
from rucio.core.scope import add_scope
from rucio.common.types import InternalAccount, InternalScope
def create_scopes(vos, account_name=None):
scope_uuid = str(generate_uuid()).lower()[:16]
scope_name = 'shr_%s' % scope_uuid
created_scopes = []
for vo in vos:
scope = InternalScope(scope_name, vo=vo)
add_scope(scope, InternalAccount(account_name if account_name else 'root', vo=vo))
created_scopes.append(scope)
return scope_name, created_scopes
return create_scopes
class _TagFactory:
def __init__(self, prefix):
self.prefix = prefix
self.index = 0
def new_tag(self):
self.index += 1
return f'{self.prefix}-{self.index}'
@pytest.fixture
def tag_factory(function_scope_prefix):
return _TagFactory(prefix=f'{function_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture(scope='class')
def tag_factory_class(class_scope_prefix):
return _TagFactory(prefix=f'{class_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture
def db_session():
from rucio.db.sqla import session
db_session = session.get_session()
yield db_session
db_session.commit()
db_session.close()
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
def __create_in_memory_db_table(name, *columns, **kwargs):
"""
Create an in-memory temporary table using the sqlite memory driver.
Make sqlalchemy aware of that table by registering it via a
declarative base.
"""
import datetime
from sqlalchemy import Column, DateTime, CheckConstraint
from sqlalchemy.pool import StaticPool
from sqlalchemy.schema import Table
from sqlalchemy.orm import registry
from rucio.db.sqla.models import ModelBase
from rucio.db.sqla.session import get_maker, create_engine
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
# Create a class which inherits from ModelBase. This will allow us to use the rucio-specific methods like .save()
DeclarativeObj = type('DeclarativeObj{}'.format(name), (ModelBase,), {})
# Create a new declarative base and map the previously created object into the base
mapper_registry = registry()
InMemoryBase = mapper_registry.generate_base(name='InMemoryBase{}'.format(name))
table_args = tuple(columns) + tuple(kwargs.get('table_args', ())) + (
Column("created_at", DateTime, default=datetime.datetime.utcnow),
Column("updated_at", DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
CheckConstraint('CREATED_AT IS NOT NULL', name=name.upper() + '_CREATED_NN'),
CheckConstraint('UPDATED_AT IS NOT NULL', name=name.upper() + '_UPDATED_NN'),
)
table = Table(
name,
InMemoryBase.metadata,
*table_args
)
mapper_registry.map_imperatively(DeclarativeObj, table)
# Performa actual creation of the in-memory table
InMemoryBase.metadata.create_all(engine)
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
senssionmaker = get_maker()
senssionmaker.kw.setdefault('binds', {}).update({DeclarativeObj: engine})
return DeclarativeObj
@pytest.fixture
def message_mock():
"""
Fixture which overrides the Message table with a private instance
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint, CheckConstraint, Text, Index, GUID
InMemoryMessage = __create_in_memory_db_table(
'message_' + generate_uuid(),
Column('id', GUID(), default=generate_uuid),
Column('event_type', String(256)),
Column('payload', String(4000)),
Column('payload_nolimit', Text),
Column('services', String(256)),
table_args=(PrimaryKeyConstraint('id', name='MESSAGES_ID_PK'),
CheckConstraint('EVENT_TYPE IS NOT NULL', name='MESSAGES_EVENT_TYPE_NN'),
CheckConstraint('PAYLOAD IS NOT NULL', name='MESSAGES_PAYLOAD_NN'),
Index('MESSAGES_SERVICES_IDX', 'services', 'event_type'))
)
with mock.patch('rucio.core.message.Message', new=InMemoryMessage):
yield
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
InMemoryConfig = __create_in_memory_db_table(
'configs_' + generate_uuid(),
Column('section', String(128)),
Column('opt', String(128)),
Column('value', String(4000)),
table_args=(PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),),
)
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def file_config_mock(request):
"""
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
"""
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
# Get the fixture parameters
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get("overrides", overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=lambda: parser):
for section, option, value in (overrides or []):
if not config_has_section(section):
config_add_section(section)
config_set(section, option, value)
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
expiration_time = 600
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
expiration_time = params.get("expiration_time", expiration_time)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=expiration_time)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
yield mocked_caches
@pytest.fixture
def metrics_mock():
"""
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
"""
from unittest import mock
from prometheus_client import CollectorRegistry, values
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, \
mock.patch('rucio.core.monitor.COUNTERS', new={}), \
mock.patch('rucio.core.monitor.GAUGES', new={}), \
mock.patch('rucio.core.monitor.TIMINGS', new={}), \
mock.patch('prometheus_client.values.ValueClass', new=values.MutexValue):
yield registry
| if not request.cls:
return module_scope_prefix
return module_scope_prefix + _del_test_prefix(request.cls.__name__) + '-' | identifier_body |
conftest.py | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import re
import functools
from os import environ
from random import choice
from string import ascii_uppercase
import pytest
_del_test_prefix = functools.partial(re.compile(r'^[Tt][Ee][Ss][Tt]_?').sub, '')
# local imports in the fixtures to make this file loadable in e.g. client tests
pytest_plugins = ('tests.ruciopytest.artifacts_plugin', )
def pytest_configure(config):
config.addinivalue_line('markers', 'dirty: marks test as dirty, i.e. tests are leaving structures behind')
config.addinivalue_line('markers', 'noparallel(reason, groups): marks test being unable to run in parallel to other tests' )
if config.pluginmanager.hasplugin("xdist"):
from .ruciopytest import xdist_noparallel_scheduler
config.pluginmanager.register(xdist_noparallel_scheduler)
def pytest_make_parametrize_id(config, val, argname):
if argname == 'file_config_mock':
cfg = {}
for section, option, value in val['overrides']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
if argname == 'core_config_mock':
cfg = {}
for section, option, value in val['table_content']:
cfg.setdefault(section, {})[option] = value
return argname + str(cfg)
# return None to let pytest handle the formatting
return None
@pytest.fixture(scope='session')
def session_scope_prefix():
"""
Generate a name prefix to be shared by objects created during this pytest session
"""
return ''.join(choice(ascii_uppercase) for _ in range(6)) + '-'
@pytest.fixture(scope='module')
def module_scope_prefix(request, session_scope_prefix):
"""
Generate a name prefix to be shared by objects created during this pytest module
Relies on pytest's builtin fixture "request"
https://docs.pytest.org/en/6.2.x/reference.html#std-fixture-request
"""
return session_scope_prefix + _del_test_prefix(request.module.__name__.split('.')[-1]) + '-'
@pytest.fixture(scope='class')
def class_scope_prefix(request, module_scope_prefix):
if not request.cls:
return module_scope_prefix
return module_scope_prefix + _del_test_prefix(request.cls.__name__) + '-'
@pytest.fixture(scope='function')
def function_scope_prefix(request, class_scope_prefix):
return class_scope_prefix + _del_test_prefix(request.node.originalname) + '-'
@pytest.fixture(scope='session')
def vo():
if environ.get('SUITE', 'remote_dbs') != 'client':
# Server test, we can use short VO via DB for internal tests
|
else:
# Client-only test, only use config with no DB config
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='session')
def second_vo():
from rucio.common.config import config_get_bool
from rucio.core.vo import vo_exists, add_vo
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
pytest.skip('multi_vo mode is not enabled. Running multi_vo tests in single_vo mode would result in failures.')
new_vo = 'new'
if not vo_exists(vo=new_vo):
add_vo(vo=new_vo, description='Test', email='rucio@email.com')
return new_vo
@pytest.fixture(scope='session')
def long_vo():
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='module')
def account_client():
from rucio.client.accountclient import AccountClient
return AccountClient()
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture(scope='module')
def rse_client():
from rucio.client.rseclient import RSEClient
return RSEClient()
@pytest.fixture(scope='module')
def scope_client():
from rucio.client.scopeclient import ScopeClient
return ScopeClient()
@pytest.fixture(scope='module')
def dirac_client():
from rucio.client.diracclient import DiracClient
return DiracClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, long_vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(long_vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope='module')
def jdoe_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('jdoe', vo=vo)
@pytest.fixture
def random_account(vo):
import random
import string
from rucio.common.types import InternalAccount
from rucio.core.account import add_account, del_account
from rucio.db.sqla import models
from rucio.db.sqla.constants import AccountType
from rucio.tests.common_server import cleanup_db_deps
account = InternalAccount(''.join(random.choice(string.ascii_uppercase) for _ in range(10)), vo=vo)
add_account(account=account, type_=AccountType.USER, email=f'{account.external}@email.com')
yield account
cleanup_db_deps(model=models.Account, select_rows_stmt=models.Account.account == account)
del_account(account)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if "ssh" in rse_obj['rse'].lower()]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(request, vo, function_scope_prefix):
from .temp_factories import TemporaryRSEFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryRSEFactory(vo=vo, name_prefix=function_scope_prefix, db_session=session) as factory:
yield factory
@pytest.fixture(scope="class")
def rse_factory_unittest(request, vo, class_scope_prefix):
"""
unittest classes can get access to rse_factory fixture via this fixture
"""
from .temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo, name_prefix=class_scope_prefix) as factory:
request.cls.rse_factory = factory
yield factory
@pytest.fixture
def did_factory(request, vo, mock_scope, function_scope_prefix, file_factory, root_account):
from .temp_factories import TemporaryDidFactory
session = None
if 'db_session' in request.fixturenames:
session = request.getfixturevalue('db_session')
with TemporaryDidFactory(vo=vo, default_scope=mock_scope, name_prefix=function_scope_prefix, file_factory=file_factory,
default_account=root_account, db_session=session) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from .temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
@pytest.fixture
def scope_factory():
from rucio.common.utils import generate_uuid
from rucio.core.scope import add_scope
from rucio.common.types import InternalAccount, InternalScope
def create_scopes(vos, account_name=None):
scope_uuid = str(generate_uuid()).lower()[:16]
scope_name = 'shr_%s' % scope_uuid
created_scopes = []
for vo in vos:
scope = InternalScope(scope_name, vo=vo)
add_scope(scope, InternalAccount(account_name if account_name else 'root', vo=vo))
created_scopes.append(scope)
return scope_name, created_scopes
return create_scopes
class _TagFactory:
def __init__(self, prefix):
self.prefix = prefix
self.index = 0
def new_tag(self):
self.index += 1
return f'{self.prefix}-{self.index}'
@pytest.fixture
def tag_factory(function_scope_prefix):
return _TagFactory(prefix=f'{function_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture(scope='class')
def tag_factory_class(class_scope_prefix):
return _TagFactory(prefix=f'{class_scope_prefix}{"".join(choice(ascii_uppercase) for _ in range(6))}'.replace('_', '-'))
@pytest.fixture
def db_session():
from rucio.db.sqla import session
db_session = session.get_session()
yield db_session
db_session.commit()
db_session.close()
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
def __create_in_memory_db_table(name, *columns, **kwargs):
"""
Create an in-memory temporary table using the sqlite memory driver.
Make sqlalchemy aware of that table by registering it via a
declarative base.
"""
import datetime
from sqlalchemy import Column, DateTime, CheckConstraint
from sqlalchemy.pool import StaticPool
from sqlalchemy.schema import Table
from sqlalchemy.orm import registry
from rucio.db.sqla.models import ModelBase
from rucio.db.sqla.session import get_maker, create_engine
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
# Create a class which inherits from ModelBase. This will allow us to use the rucio-specific methods like .save()
DeclarativeObj = type('DeclarativeObj{}'.format(name), (ModelBase,), {})
# Create a new declarative base and map the previously created object into the base
mapper_registry = registry()
InMemoryBase = mapper_registry.generate_base(name='InMemoryBase{}'.format(name))
table_args = tuple(columns) + tuple(kwargs.get('table_args', ())) + (
Column("created_at", DateTime, default=datetime.datetime.utcnow),
Column("updated_at", DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
CheckConstraint('CREATED_AT IS NOT NULL', name=name.upper() + '_CREATED_NN'),
CheckConstraint('UPDATED_AT IS NOT NULL', name=name.upper() + '_UPDATED_NN'),
)
table = Table(
name,
InMemoryBase.metadata,
*table_args
)
mapper_registry.map_imperatively(DeclarativeObj, table)
# Performa actual creation of the in-memory table
InMemoryBase.metadata.create_all(engine)
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
senssionmaker = get_maker()
senssionmaker.kw.setdefault('binds', {}).update({DeclarativeObj: engine})
return DeclarativeObj
@pytest.fixture
def message_mock():
"""
Fixture which overrides the Message table with a private instance
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint, CheckConstraint, Text, Index, GUID
InMemoryMessage = __create_in_memory_db_table(
'message_' + generate_uuid(),
Column('id', GUID(), default=generate_uuid),
Column('event_type', String(256)),
Column('payload', String(4000)),
Column('payload_nolimit', Text),
Column('services', String(256)),
table_args=(PrimaryKeyConstraint('id', name='MESSAGES_ID_PK'),
CheckConstraint('EVENT_TYPE IS NOT NULL', name='MESSAGES_EVENT_TYPE_NN'),
CheckConstraint('PAYLOAD IS NOT NULL', name='MESSAGES_PAYLOAD_NN'),
Index('MESSAGES_SERVICES_IDX', 'services', 'event_type'))
)
with mock.patch('rucio.core.message.Message', new=InMemoryMessage):
yield
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from sqlalchemy import Column
from rucio.common.utils import generate_uuid
from rucio.db.sqla.models import String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
InMemoryConfig = __create_in_memory_db_table(
'configs_' + generate_uuid(),
Column('section', String(128)),
Column('opt', String(128)),
Column('value', String(4000)),
table_args=(PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'),),
)
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def file_config_mock(request):
"""
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
"""
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
# Get the fixture parameters
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get("overrides", overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=lambda: parser):
for section, option, value in (overrides or []):
if not config_has_section(section):
config_add_section(section)
config_set(section, option, value)
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
expiration_time = 600
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
expiration_time = params.get("expiration_time", expiration_time)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=expiration_time)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
yield mocked_caches
@pytest.fixture
def metrics_mock():
"""
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
"""
from unittest import mock
from prometheus_client import CollectorRegistry, values
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, \
mock.patch('rucio.core.monitor.COUNTERS', new={}), \
mock.patch('rucio.core.monitor.GAUGES', new={}), \
mock.patch('rucio.core.monitor.TIMINGS', new={}), \
mock.patch('prometheus_client.values.ValueClass', new=values.MutexValue):
yield registry
| from rucio.tests.common_server import get_vo
return get_vo() | conditional_block |
cmonkeyobj.py | import os
import cStringIO
import sqlite3 as sql3
import gzip,bz2
import cPickle as pickle
import ConfigParser
import pandas as pd
import numpy as np
from numpy import nan as NA
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from Bio import motifs
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio import SeqIO
import weblogolib as wl
import utils as ut
"""
from cmonkeyobj import cMonkey2 as cm2
b = cm2('eco-out-001/cmonkey_run.db')
pd.Series([b.get_cluster_info(k)['residual'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20)
pd.DataFrame([b.get_cluster_info(k)['pclusts'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20,stacked=True)
"""
## TBD: plotting motif locations relative to gene start. See
## http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc211
## https://www.biostars.org/p/96470/#97713
class cMonkey2:
dbfile = '' #None
tables = {} #None
iteration = 2001
k_clust = 999 #None
organism = '' #'eco'
species = '' #'Escherichia_coli_K12' #None
taxon_id = None
ratios = pd.DataFrame()
config = ConfigParser.ConfigParser() #None
stats = None
def __init__( self, dbfile ):
self.dbfile = dbfile
conn = sql3.connect( dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats', conn) ##last_iteration from run_infos', conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( dbfile, iteration=self.iteration )
#self.iteration = max(self.tables['motif_infos'].iteration)
self.k_clust = self.tables['run_infos'].num_clusters[0] ##max(self.tables['row_members'].cluster)
self.organism = self.tables['run_infos'].organism[0]
self.species = self.tables['run_infos'].species[0]
self.config = self.load_config()
def __read_all_tables( self, dbfile, iteration=2000 ): #limit=None ):
"""read out all tables in the sql3 db file into a dict of pandas dataframes"""
conn = sql3.connect( dbfile )
tnames = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name", conn)
tables = {}
for tname in tnames.name.values:
#print tname
tmp = pd.read_sql( 'select * from %s limit 3' % tname, conn )
if tname != 'motif_infos' and 'iteration' in tmp.columns.values.tolist():
query = 'select * from %s where iteration=' + str(iteration)
else:
query = 'select * from %s'
table = pd.read_sql(query % tname, conn)
if tname == 'motif_infos':
table = table[ table.iteration == iteration ]
tables[ tname ] = table
conn.close()
table = tables[ 'meme_motif_sites' ]
table = table.ix[ np.in1d( table.motif_info_id, tables[ 'motif_infos' ].index.values ) ]
tables[ 'meme_motif_sites' ] = table
return tables
def reload( self ):
conn = sql3.connect( self.dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats',conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( self.dbfile, iteration=self.iteration )
self.stats = None
def get_feature_names( self ):
feature_names_file = './cache/' + self.species + '_feature_names'
feature_names = pd.read_table( feature_names_file, sep='\t', header=None, skiprows=4 )
feature_names.columns = ['id','names','type']
#feature_names = feature_names.set_index( 'names' )
return feature_names
def get_features( self ):
features_file = './cache/' + self.species + '_features'
features = pd.read_table( features_file, sep='\t', header=0, skiprows=16 )
cols = features.columns.values; cols[0] = 'id'; features.columns = cols
#features = features.set_index( 'od' )
return features
| contigs = np.unique( features.contig )
seqs = {}
for contig in contigs:
genome_file = './cache/' + self.species + '_' + contig
seq = ut.readLines( genome_file )[0].strip().upper()
seqs[contig] = seq
return seqs
def get_networks( self, include_operons=True, include_string=True ):
networks = {}
taxid = self.load_taxon_id()
if include_operons and os.path.exists('./cache/gnc' + str(taxid) + '.named'):
op_file = './cache/gnc' + str(taxid) + '.named'
operons = pd.read_table( op_file )
networks['operons'] = operons
if include_string and os.path.exists('./cache/' + str(taxid) + '.gz'):
string_file = './cache/' + str(taxid) + '.gz'
string = pd.read_table( gzip.GzipFile( string_file ), header=None )
networks['string'] = string
return networks
## see http://www.kegg.jp/kegg/rest/keggapi.html
## and http://biopython.org/DIST/docs/api/Bio.KEGG.REST-module.html
## another option: http://www.genome.jp/kegg-bin/show_organism?org=eco
def load_taxon_id( self, in_code=None ):
''' lets try getting it directly from KEGG based on inputted organism 3-letter code
a bit hairy but it works! TODO: cache the org_table and gen_table in cache/'''
if self.taxon_id is not None:
return self.taxon_id
import Bio.KEGG.REST as kegg ## requires BioPython 1.65 or later!
if in_code is None:
in_code = self.tables['run_infos'].organism[0]
org_table = kegg.kegg_list('organism').readlines()
org_table = ''.join( org_table )
buf = cStringIO.StringIO( org_table )
org_table = pd.read_table( buf, sep='\t', header=None )
#full_org_name = org_table.ix[org_table[1]==in_code][2].values[0]
buf.close()
kegg_code = org_table.ix[org_table[1]==in_code][0].values[0]
gen_table = kegg.kegg_list('genome').readlines()
gen_table = ''.join( gen_table )
buf = cStringIO.StringIO( gen_table )
gen_table = pd.read_table( buf, sep='\t', header=None )
buf.close()
taxon_id = int(gen_table.ix[ gen_table[0] == 'genome:'+kegg_code ][1].values[0].split(', ')[2].split('; ')[0])
self.taxon_id = taxon_id
return taxon_id
def load_ratios( self, ratios_file=None ):
if ratios_file is None:
ratios_file = os.path.dirname(self.dbfile) + '/ratios.tsv.gz'
if self.ratios is None:
self.ratios = pd.read_table( gzip.GzipFile( ratios_file ), sep='\t' )
return self.ratios
def load_config( self, config_file=None ):
"""then can do e.g., b.config.getfloat('Rows', 'scaling_constant')
or simply, dict(b.config.items('Rows'))"""
if config_file is None:
config_file = os.path.dirname(self.dbfile) + '/final.ini'
config_parser = ConfigParser.ConfigParser()
config_parser.read( config_file )
self.config = config_parser
return self.config
def pickle_all( self, outfile=None, include_genome=False, include_networks=False ):
'''Try to pickle up ALL relevant info from the cmonkey run
can load it via b = pickle.load(gzip.GzipFile(outfile)) '''
## another thing to try is to load the
feature_names = self.get_feature_names()
features = self.get_features()
genome = None
if include_genome:
genome = self.get_genome_seqs()
networks = None
if include_networks:
networks = self.get_networks()
self.load_ratios()
self.load_config()
self.get_stats()
## do pickling here
if outfile is None:
outfile = gzip.GzipFile( os.path.dirname(self.dbfile) + '/dump.pkl.gz', 'wb' )
obj = { 'b': self,
'feature_names': feature_names,
'features': features,
'genome': genome,
'networks': networks }
print outfile
pickle.dump( obj, outfile )
outfile.close()
def get_rows( self, k ):
t1 = self.tables['row_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['row_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_cols( self, k ):
t1 = self.tables['column_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['column_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_ratios( self, k=None, rows=None, cols=None, included=True ):
"""Extract submatrix of ratios for cluster or rows/cols.
If ~included, extract submatrix of ratios for conditions NOT in cluster."""
if self.ratios is None:
ratios = self.load_ratios()
if k is not None:
if rows is None:
rows = self.get_rows( k )
if cols is None:
cols = self.get_cols( k )
if not included:
cols = ratios.columns.values[ np.in1d( ratios.columns.values, cols, invert=True ) ]
rats = self.ratios.ix[ rows, cols ]
return rats
def plot_ratios( self, k=None, rows=None, cols=None, included=True, kind='line' ):
## see http://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html -- cool!
## can use kind = 'box' too!
rats = self.get_ratios( k, rows, cols, included )
rats = rats.transpose()
if kind == 'box': ## sort by mean of columns
means = rats.mean(1)
tmp = pd.concat( [rats, means], 1 )
cols = tmp.columns.values; cols[-1] = 'MEANS'; tmp.columns = cols
tmp = tmp.sort( ['MEANS'] )
tmp = tmp.drop( 'MEANS', 1 )
rats = tmp.transpose()
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False, sym='.')
else:
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False)
## use plt.close() to close the window
def get_cluster_info( self, k ):
t1 = self.tables['cluster_stats']
t1 = t1[ t1.cluster == k ]
#t1 = t1.drop( ['iteration', 'cluster'], 1 )
t2 = self.tables['motif_infos']
t2 = t2[ t2.cluster == k ]
#t2 = t2.drop( ['iteration', 'cluster'], 1 )
## Extract it.
out = {'residual':t1.residual.values[0],
'nrows':t1.num_rows.values[0],
'ncols':t1.num_cols.values[0],
'e_values':t2.evalue.values}
## Also get p-clust
pclusts = np.array([self.get_motif_pclust(k,i) for i in range(1,t2.shape[0]+1)])
out['pclusts'] = pclusts
return out
def get_cluster_networks( self, k ):
networks = self.get_networks()
genes = self.get_rows( k )
out_nets = {}
if 'string' in networks.keys():
string = networks['string']
string = string.ix[ np.in1d(string[0], genes) ] ## slow!
string = string.ix[ np.in1d(string[1], genes) ]
out_nets['string'] = string
if 'operons' in networks.keys():
ops = networks['operons']
ops = ops.ix[ np.in1d(ops.SysName1, genes) | np.in1d(ops.SysName2, genes) ]
ops = ops.ix[ ops.bOp == True ]
out_nets['operons'] = ops
return out_nets
## see https://www.udacity.com/wiki/creating-network-graphs-with-python
def plot_cluster_networks( self, k ):
import networkx as nx
out_nets = self.get_cluster_networks( k )
gr = nx.Graph()
if 'string' in out_nets.keys():
strng = out_nets[ 'string' ]
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
strng.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr = nx.read_weighted_edgelist( buf )
buf.close()
if 'operons' in out_nets.keys():
ops = out_nets[ 'operons' ]
ops = ops.ix[ ops.bOp == True ]
ops = ops[ ['SysName1','SysName2','pOp'] ]
ops.pOp = ops.pOp * 1000.
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
ops.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr2 = nx.read_weighted_edgelist( buf )
buf.close()
#gr2 = nx.Graph( [ tuple(x) for x in ops[['SysName1','SysName2']].to_records(index=False) ],
# weight=ops.pOp.values*1000, typ='operons' )
## from https://stackoverflow.com/questions/11758774/merging-two-network-maps-in-networkx-by-unique-labels :
gr.add_nodes_from(gr2.nodes(data=True))
gr.add_edges_from(gr2.edges(data=True)) #, weight=gr2.graph['weight'], type=gr2.graph['type'])
pos = nx.spring_layout(gr, k=0.9, iterations=2000)
## requires installation of graphviz-dev and pygraphviz:
##from networkx import graphviz_layout
##pos = nx.graphviz_layout( gr, prog='neato'
pos2 = { i:k for i,k in pos.items() if i in gr2.nodes() }
nx.draw_networkx_edges(gr2, pos2, edge_color='r', width=4, alpha=0.5)
nx.draw_networkx(gr, pos, node_size=50, node_color='b', edge_color='b', font_size=7, width=2, alpha=0.3)
def clusters_w_genes( self, genes ):
t1 = self.tables['row_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['row_names']
t2 = t2[ np.in1d(t2.name, genes) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def clusters_w_conds( self, conds ):
t1 = self.tables['column_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['column_names']
t2 = t2[ np.in1d(t2.name, conds) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def cluster_summary( self ):
tab = self.tables['cluster_stats']
infos = { k: self.get_cluster_info(k+1) for k in range(self.k_clust) }
tab[ 'e_value1' ] = pd.Series( [ infos[k]['e_values'][0] if
len(infos[k]['e_values']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'e_value2' ] = pd.Series( [ infos[k]['e_values'][1] if
len(infos[k]['e_values']) > 1 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust1' ] = pd.Series( [ infos[k]['pclusts'][0] if
len(infos[k]['pclusts']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust2' ] = pd.Series( [ infos[k]['pclusts'][1] if
len(infos[k]['pclusts']) > 1 else NA for k in range(self.k_clust) ] )
tab = tab.set_index( tab.cluster )
tab = tab.drop( ['iteration', 'cluster'], axis=1 )
return tab
def get_stats( self ):
if self.stats is not None:
return self.stats
conn = sql3.connect( self.dbfile )
table = pd.read_sql('select * from iteration_stats', conn)
conn.close()
tmp = self.tables['statstypes'].copy()
tmp.index = tmp.index + 1
table = pd.merge(table,tmp,left_on='statstype',right_index=True)
tmp = table.groupby( 'name' )
tmp = { name:df for name,df in tmp }
for name in tmp.keys():
tmp2 = tmp[name]
tmp2.index = tmp2.iteration
tmp2 = tmp2.drop( ['statstype', 'category', 'name', 'iteration'], axis=1 )
tmp2.columns=[name]
tmp[name] = tmp2
#if 'SetEnrichment' in tmp.keys():
# pvs = pd.read_csv( os.path.dirname(self.dbfile) + '/setEnrichment_pvalue.csv', index_col=0 )
# pvs = pvs.fillna( 1.0 )
# tmp['SetEnrichment'] = np.log10(pvs+1e-30).median(1) ##.plot()
table = pd.concat( tmp, axis=1 )
table.columns = [i[0] for i in table.columns.values]
self.stats = table
return table
def plot_stats( self ):
table = self.get_stats()
ut.setup_text_plots( usetex=False )
if 'SetEnrichment' in table.columns.values:
table.SetEnrichment.replace( 0, NA, inplace=True )
table.plot( subplots=True, layout=[3,-1], sharex=True, legend=True, fontsize=8 )
#fig, axes = plt.subplots(nrows=3, ncols=3, sharex=True)
#for i, c in enumerate(table.columns):
# table[c].plot( ax=axes[i/3][i%3], title=c )
def __get_motif_id(self, cluster_num, motif_num):
motif_infos = self.tables['motif_infos']
rowid = motif_infos[(motif_infos.iteration==self.iteration) &
(motif_infos.cluster==cluster_num) &
(motif_infos.motif_num==motif_num)].index.values[0]+1
return rowid
#motif_id = self.tables['meme_motif_sites'].ix[rowid].motif_info_id
#return motif_id
def get_motif_pssm(self, cluster_num, motif_num):
"""export the specified motif to a pandas dataframe
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm.drop( ['motif_info_id', 'iteration', 'row'], 1, inplace=True )
return pssm
def get_motif_sites(self, cluster_num, motif_num=None):
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
print rowid
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
sites = sites.drop( ['motif_info_id'], 1 )
feature_names = self.get_feature_names()
tmp = pd.merge( sites, feature_names, left_on='seq_name', right_on='id' )
tmp = tmp[ np.in1d( tmp.names.values, self.tables['row_names'].name.values ) ]
tmp = tmp.drop( ['seq_name', 'type'], 1 )
tmp = tmp.drop_duplicates()
return tmp ## need to update genes based on synonyms
def plot_motif_sites(self, cluster_num, motif_num):
"""THIS NEEDS MORE WORK but has the beginnings of something...
TODO: multiple motifs on same tracks, include ALL genes (i.e. in operons that were not included),
do reverse-complement positioning correctly (based on gene strand),
use MAST scan output (from b.tables['motif_annotations'])
"""
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.Graphics import GenomeDiagram
from reportlab.lib.units import cm
from reportlab.lib import colors
"""To get this to work: download http://www.reportlab.com/ftp/fonts/pfbfer.zip
and unzip it into /usr/lib/python2.7/dist-packages/reportlab/fonts/
"""
motif_sites = self.get_motif_sites(cluster_num, motif_num)
pv_range = np.max(-np.log10(motif_sites.pvalue.values)) - 4 ## divide -log10(pval) by this to get alpha to use
len_range = np.max(motif_sites.start.values) + 10
gdd = GenomeDiagram.Diagram('Motif sites: %d, %d' % (cluster_num, motif_num))
for i in range(motif_sites.shape[0]):
gdt_features = gdd.new_track(1, start=0, end=len_range, greytrack=True, greytrack_labels=1,
name=motif_sites.names.values[i], scale=True, greytrack_fontsize=4)
gds_features = gdt_features.new_set()
col = colors.red.clone()
col.alpha = ( -np.log10(motif_sites.pvalue.values[i]) - 4 ) / pv_range
m_start = motif_sites.start.values[i]
m_len = len(motif_sites.seq.values[i])
m_strand = motif_sites.reverse.values[i]
if m_strand == 0:
m_strand = -1
feature = SeqFeature(FeatureLocation(m_start, m_start+m_len-1), strand=m_strand)
gds_features.add_feature(feature, name=str(i+1), label=False, color=col)
gdd.draw(format='linear', pagesize=(15*cm,motif_sites.shape[0]*cm/2), fragments=1, start=0, end=len_range+10)
##gdd.write("GD_labels_default.pdf", "pdf") ## looks like only output is to file, so do this:
#output = cStringIO.StringIO()
#gdd.write(output, 'png', dpi=300)
#output.seek(0)
output = gdd.write_to_string(output='png', dpi=300)
output = cStringIO.StringIO(output)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img, interpolation='bicubic' )
output.close()
return gdd
def get_motif_pclust(self, cluster_num, motif_num):
rowid = self.__get_motif_id(cluster_num, motif_num)
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
#sites = sites.drop( ['motif_info_id'], 1 )
return np.mean( np.log10(sites.pvalue.values) )
def get_biop_motif(self, cluster_num, motif_num, option='sites'):
##import egrin2.export_motifs as em
"""export the specified motif to a biopython motif object
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
- option of how to translate - sites: jaspar 'sites' file; pfm: jaspar 'pfm' file
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
output = cStringIO.StringIO()
## ONE WAY TO TRY -- but Bio.motifs cant parse the incomplete MEME file
##output.write(em.MEME_FILE_HEADER % (0.25, 0.25, 0.25, 0.25))
##em.write_pssm(output, cursor, os.path.dirname(self.dbfile), cluster_num, rowid,
## motif_num, mot_info['evalue'][0], 10)
##output.seek(0)
##mot = motifs.read( output, 'meme' )
## Second way - create a jaspar 'pfm' file from the pssm
if option == 'pfm':
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm = pssm.drop( ['motif_info_id', 'iteration', 'row'], 1 )
counts = np.round( pssm * mot_sites.shape[0] ).transpose()
counts.to_string(output, header=False, index=False )
output.seek(0)
mot = motifs.read( output, 'pfm' )
## Third way - create a jaspar 'sites' file
elif option == 'sites':
seqs = {}
for i in mot_sites.index.values:
name = mot_sites.ix[i].seq_name
flank_left = mot_sites.ix[i].flank_left
flank_left = Seq(flank_left if flank_left is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
seq = Seq(mot_sites.ix[i].seq, IUPAC.IUPACAmbiguousDNA())
flank_right = mot_sites.ix[i].flank_right
flank_right = Seq(flank_right if flank_right is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
full_seq = flank_left + seq + flank_right
bs = SeqRecord( full_seq, id=name )
seqs[i] = bs
SeqIO.write(seqs.values(), output, 'fasta')
output.seek(0)
mot = motifs.read( output, 'sites' )
output.close()
## Note Bio.motifs.weblogo() uses the weblogo server (slow? requires connection.)
#kwargs = dict(color_scheme='classic')
#mot.weblogo('file.png', color_scheme='color_classic') ## note, can use format='PDF'
#img = mpimg.imread('file.png')
#imgplot = plt.imshow( img )
#plt.show()
return mot
## This uses weblogolib package to create files directly (installed as weblogo via pip)
## https://code.google.com/p/weblogo/
def plot_motif( self, cluster_num, motif_num, img_format='png' ):
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
ldata = wl.LogoData.from_seqs(wl.SeqList(mot_sites.seq.values.tolist(), wl.unambiguous_dna_alphabet))
options = wl.LogoOptions()
options.fineprint = os.path.dirname(self.dbfile) + ' %03d %03d' % ( cluster_num, motif_num )
format = wl.LogoFormat(ldata, options)
format.color_scheme = wl.classic
format.resolution = 150
if img_format == 'png':
tmp = wl.png_formatter( ldata, format )
output = cStringIO.StringIO(tmp)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img )
#plt.show()
return plt
elif img_format == 'svg':
tmp = wl.svg_formatter( ldata, format )
return tmp
## note then can do e.g. ut.writeLines(svg.split('\n'),'test.svg') | def get_genome_seqs( self ):
features = self.get_features() | random_line_split |
cmonkeyobj.py | import os
import cStringIO
import sqlite3 as sql3
import gzip,bz2
import cPickle as pickle
import ConfigParser
import pandas as pd
import numpy as np
from numpy import nan as NA
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from Bio import motifs
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio import SeqIO
import weblogolib as wl
import utils as ut
"""
from cmonkeyobj import cMonkey2 as cm2
b = cm2('eco-out-001/cmonkey_run.db')
pd.Series([b.get_cluster_info(k)['residual'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20)
pd.DataFrame([b.get_cluster_info(k)['pclusts'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20,stacked=True)
"""
## TBD: plotting motif locations relative to gene start. See
## http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc211
## https://www.biostars.org/p/96470/#97713
class cMonkey2:
dbfile = '' #None
tables = {} #None
iteration = 2001
k_clust = 999 #None
organism = '' #'eco'
species = '' #'Escherichia_coli_K12' #None
taxon_id = None
ratios = pd.DataFrame()
config = ConfigParser.ConfigParser() #None
stats = None
def __init__( self, dbfile ):
self.dbfile = dbfile
conn = sql3.connect( dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats', conn) ##last_iteration from run_infos', conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( dbfile, iteration=self.iteration )
#self.iteration = max(self.tables['motif_infos'].iteration)
self.k_clust = self.tables['run_infos'].num_clusters[0] ##max(self.tables['row_members'].cluster)
self.organism = self.tables['run_infos'].organism[0]
self.species = self.tables['run_infos'].species[0]
self.config = self.load_config()
def __read_all_tables( self, dbfile, iteration=2000 ): #limit=None ):
"""read out all tables in the sql3 db file into a dict of pandas dataframes"""
conn = sql3.connect( dbfile )
tnames = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name", conn)
tables = {}
for tname in tnames.name.values:
#print tname
tmp = pd.read_sql( 'select * from %s limit 3' % tname, conn )
if tname != 'motif_infos' and 'iteration' in tmp.columns.values.tolist():
query = 'select * from %s where iteration=' + str(iteration)
else:
query = 'select * from %s'
table = pd.read_sql(query % tname, conn)
if tname == 'motif_infos':
table = table[ table.iteration == iteration ]
tables[ tname ] = table
conn.close()
table = tables[ 'meme_motif_sites' ]
table = table.ix[ np.in1d( table.motif_info_id, tables[ 'motif_infos' ].index.values ) ]
tables[ 'meme_motif_sites' ] = table
return tables
def reload( self ):
conn = sql3.connect( self.dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats',conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( self.dbfile, iteration=self.iteration )
self.stats = None
def get_feature_names( self ):
feature_names_file = './cache/' + self.species + '_feature_names'
feature_names = pd.read_table( feature_names_file, sep='\t', header=None, skiprows=4 )
feature_names.columns = ['id','names','type']
#feature_names = feature_names.set_index( 'names' )
return feature_names
def get_features( self ):
features_file = './cache/' + self.species + '_features'
features = pd.read_table( features_file, sep='\t', header=0, skiprows=16 )
cols = features.columns.values; cols[0] = 'id'; features.columns = cols
#features = features.set_index( 'od' )
return features
def get_genome_seqs( self ):
features = self.get_features()
contigs = np.unique( features.contig )
seqs = {}
for contig in contigs:
genome_file = './cache/' + self.species + '_' + contig
seq = ut.readLines( genome_file )[0].strip().upper()
seqs[contig] = seq
return seqs
def get_networks( self, include_operons=True, include_string=True ):
networks = {}
taxid = self.load_taxon_id()
if include_operons and os.path.exists('./cache/gnc' + str(taxid) + '.named'):
op_file = './cache/gnc' + str(taxid) + '.named'
operons = pd.read_table( op_file )
networks['operons'] = operons
if include_string and os.path.exists('./cache/' + str(taxid) + '.gz'):
string_file = './cache/' + str(taxid) + '.gz'
string = pd.read_table( gzip.GzipFile( string_file ), header=None )
networks['string'] = string
return networks
## see http://www.kegg.jp/kegg/rest/keggapi.html
## and http://biopython.org/DIST/docs/api/Bio.KEGG.REST-module.html
## another option: http://www.genome.jp/kegg-bin/show_organism?org=eco
def load_taxon_id( self, in_code=None ):
''' lets try getting it directly from KEGG based on inputted organism 3-letter code
a bit hairy but it works! TODO: cache the org_table and gen_table in cache/'''
if self.taxon_id is not None:
return self.taxon_id
import Bio.KEGG.REST as kegg ## requires BioPython 1.65 or later!
if in_code is None:
in_code = self.tables['run_infos'].organism[0]
org_table = kegg.kegg_list('organism').readlines()
org_table = ''.join( org_table )
buf = cStringIO.StringIO( org_table )
org_table = pd.read_table( buf, sep='\t', header=None )
#full_org_name = org_table.ix[org_table[1]==in_code][2].values[0]
buf.close()
kegg_code = org_table.ix[org_table[1]==in_code][0].values[0]
gen_table = kegg.kegg_list('genome').readlines()
gen_table = ''.join( gen_table )
buf = cStringIO.StringIO( gen_table )
gen_table = pd.read_table( buf, sep='\t', header=None )
buf.close()
taxon_id = int(gen_table.ix[ gen_table[0] == 'genome:'+kegg_code ][1].values[0].split(', ')[2].split('; ')[0])
self.taxon_id = taxon_id
return taxon_id
def load_ratios( self, ratios_file=None ):
if ratios_file is None:
ratios_file = os.path.dirname(self.dbfile) + '/ratios.tsv.gz'
if self.ratios is None:
self.ratios = pd.read_table( gzip.GzipFile( ratios_file ), sep='\t' )
return self.ratios
def load_config( self, config_file=None ):
"""then can do e.g., b.config.getfloat('Rows', 'scaling_constant')
or simply, dict(b.config.items('Rows'))"""
if config_file is None:
config_file = os.path.dirname(self.dbfile) + '/final.ini'
config_parser = ConfigParser.ConfigParser()
config_parser.read( config_file )
self.config = config_parser
return self.config
def pickle_all( self, outfile=None, include_genome=False, include_networks=False ):
'''Try to pickle up ALL relevant info from the cmonkey run
can load it via b = pickle.load(gzip.GzipFile(outfile)) '''
## another thing to try is to load the
feature_names = self.get_feature_names()
features = self.get_features()
genome = None
if include_genome:
genome = self.get_genome_seqs()
networks = None
if include_networks:
networks = self.get_networks()
self.load_ratios()
self.load_config()
self.get_stats()
## do pickling here
if outfile is None:
outfile = gzip.GzipFile( os.path.dirname(self.dbfile) + '/dump.pkl.gz', 'wb' )
obj = { 'b': self,
'feature_names': feature_names,
'features': features,
'genome': genome,
'networks': networks }
print outfile
pickle.dump( obj, outfile )
outfile.close()
def get_rows( self, k ):
t1 = self.tables['row_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['row_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_cols( self, k ):
t1 = self.tables['column_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['column_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_ratios( self, k=None, rows=None, cols=None, included=True ):
"""Extract submatrix of ratios for cluster or rows/cols.
If ~included, extract submatrix of ratios for conditions NOT in cluster."""
if self.ratios is None:
ratios = self.load_ratios()
if k is not None:
if rows is None:
rows = self.get_rows( k )
if cols is None:
cols = self.get_cols( k )
if not included:
cols = ratios.columns.values[ np.in1d( ratios.columns.values, cols, invert=True ) ]
rats = self.ratios.ix[ rows, cols ]
return rats
def plot_ratios( self, k=None, rows=None, cols=None, included=True, kind='line' ):
## see http://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html -- cool!
## can use kind = 'box' too!
rats = self.get_ratios( k, rows, cols, included )
rats = rats.transpose()
if kind == 'box': ## sort by mean of columns
means = rats.mean(1)
tmp = pd.concat( [rats, means], 1 )
cols = tmp.columns.values; cols[-1] = 'MEANS'; tmp.columns = cols
tmp = tmp.sort( ['MEANS'] )
tmp = tmp.drop( 'MEANS', 1 )
rats = tmp.transpose()
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False, sym='.')
else:
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False)
## use plt.close() to close the window
def get_cluster_info( self, k ):
t1 = self.tables['cluster_stats']
t1 = t1[ t1.cluster == k ]
#t1 = t1.drop( ['iteration', 'cluster'], 1 )
t2 = self.tables['motif_infos']
t2 = t2[ t2.cluster == k ]
#t2 = t2.drop( ['iteration', 'cluster'], 1 )
## Extract it.
out = {'residual':t1.residual.values[0],
'nrows':t1.num_rows.values[0],
'ncols':t1.num_cols.values[0],
'e_values':t2.evalue.values}
## Also get p-clust
pclusts = np.array([self.get_motif_pclust(k,i) for i in range(1,t2.shape[0]+1)])
out['pclusts'] = pclusts
return out
def get_cluster_networks( self, k ):
networks = self.get_networks()
genes = self.get_rows( k )
out_nets = {}
if 'string' in networks.keys():
string = networks['string']
string = string.ix[ np.in1d(string[0], genes) ] ## slow!
string = string.ix[ np.in1d(string[1], genes) ]
out_nets['string'] = string
if 'operons' in networks.keys():
ops = networks['operons']
ops = ops.ix[ np.in1d(ops.SysName1, genes) | np.in1d(ops.SysName2, genes) ]
ops = ops.ix[ ops.bOp == True ]
out_nets['operons'] = ops
return out_nets
## see https://www.udacity.com/wiki/creating-network-graphs-with-python
def plot_cluster_networks( self, k ):
import networkx as nx
out_nets = self.get_cluster_networks( k )
gr = nx.Graph()
if 'string' in out_nets.keys():
strng = out_nets[ 'string' ]
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
strng.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr = nx.read_weighted_edgelist( buf )
buf.close()
if 'operons' in out_nets.keys():
|
pos = nx.spring_layout(gr, k=0.9, iterations=2000)
## requires installation of graphviz-dev and pygraphviz:
##from networkx import graphviz_layout
##pos = nx.graphviz_layout( gr, prog='neato'
pos2 = { i:k for i,k in pos.items() if i in gr2.nodes() }
nx.draw_networkx_edges(gr2, pos2, edge_color='r', width=4, alpha=0.5)
nx.draw_networkx(gr, pos, node_size=50, node_color='b', edge_color='b', font_size=7, width=2, alpha=0.3)
def clusters_w_genes( self, genes ):
t1 = self.tables['row_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['row_names']
t2 = t2[ np.in1d(t2.name, genes) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def clusters_w_conds( self, conds ):
t1 = self.tables['column_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['column_names']
t2 = t2[ np.in1d(t2.name, conds) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def cluster_summary( self ):
tab = self.tables['cluster_stats']
infos = { k: self.get_cluster_info(k+1) for k in range(self.k_clust) }
tab[ 'e_value1' ] = pd.Series( [ infos[k]['e_values'][0] if
len(infos[k]['e_values']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'e_value2' ] = pd.Series( [ infos[k]['e_values'][1] if
len(infos[k]['e_values']) > 1 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust1' ] = pd.Series( [ infos[k]['pclusts'][0] if
len(infos[k]['pclusts']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust2' ] = pd.Series( [ infos[k]['pclusts'][1] if
len(infos[k]['pclusts']) > 1 else NA for k in range(self.k_clust) ] )
tab = tab.set_index( tab.cluster )
tab = tab.drop( ['iteration', 'cluster'], axis=1 )
return tab
def get_stats( self ):
if self.stats is not None:
return self.stats
conn = sql3.connect( self.dbfile )
table = pd.read_sql('select * from iteration_stats', conn)
conn.close()
tmp = self.tables['statstypes'].copy()
tmp.index = tmp.index + 1
table = pd.merge(table,tmp,left_on='statstype',right_index=True)
tmp = table.groupby( 'name' )
tmp = { name:df for name,df in tmp }
for name in tmp.keys():
tmp2 = tmp[name]
tmp2.index = tmp2.iteration
tmp2 = tmp2.drop( ['statstype', 'category', 'name', 'iteration'], axis=1 )
tmp2.columns=[name]
tmp[name] = tmp2
#if 'SetEnrichment' in tmp.keys():
# pvs = pd.read_csv( os.path.dirname(self.dbfile) + '/setEnrichment_pvalue.csv', index_col=0 )
# pvs = pvs.fillna( 1.0 )
# tmp['SetEnrichment'] = np.log10(pvs+1e-30).median(1) ##.plot()
table = pd.concat( tmp, axis=1 )
table.columns = [i[0] for i in table.columns.values]
self.stats = table
return table
def plot_stats( self ):
table = self.get_stats()
ut.setup_text_plots( usetex=False )
if 'SetEnrichment' in table.columns.values:
table.SetEnrichment.replace( 0, NA, inplace=True )
table.plot( subplots=True, layout=[3,-1], sharex=True, legend=True, fontsize=8 )
#fig, axes = plt.subplots(nrows=3, ncols=3, sharex=True)
#for i, c in enumerate(table.columns):
# table[c].plot( ax=axes[i/3][i%3], title=c )
def __get_motif_id(self, cluster_num, motif_num):
motif_infos = self.tables['motif_infos']
rowid = motif_infos[(motif_infos.iteration==self.iteration) &
(motif_infos.cluster==cluster_num) &
(motif_infos.motif_num==motif_num)].index.values[0]+1
return rowid
#motif_id = self.tables['meme_motif_sites'].ix[rowid].motif_info_id
#return motif_id
def get_motif_pssm(self, cluster_num, motif_num):
"""export the specified motif to a pandas dataframe
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm.drop( ['motif_info_id', 'iteration', 'row'], 1, inplace=True )
return pssm
def get_motif_sites(self, cluster_num, motif_num=None):
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
print rowid
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
sites = sites.drop( ['motif_info_id'], 1 )
feature_names = self.get_feature_names()
tmp = pd.merge( sites, feature_names, left_on='seq_name', right_on='id' )
tmp = tmp[ np.in1d( tmp.names.values, self.tables['row_names'].name.values ) ]
tmp = tmp.drop( ['seq_name', 'type'], 1 )
tmp = tmp.drop_duplicates()
return tmp ## need to update genes based on synonyms
def plot_motif_sites(self, cluster_num, motif_num):
"""THIS NEEDS MORE WORK but has the beginnings of something...
TODO: multiple motifs on same tracks, include ALL genes (i.e. in operons that were not included),
do reverse-complement positioning correctly (based on gene strand),
use MAST scan output (from b.tables['motif_annotations'])
"""
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.Graphics import GenomeDiagram
from reportlab.lib.units import cm
from reportlab.lib import colors
"""To get this to work: download http://www.reportlab.com/ftp/fonts/pfbfer.zip
and unzip it into /usr/lib/python2.7/dist-packages/reportlab/fonts/
"""
motif_sites = self.get_motif_sites(cluster_num, motif_num)
pv_range = np.max(-np.log10(motif_sites.pvalue.values)) - 4 ## divide -log10(pval) by this to get alpha to use
len_range = np.max(motif_sites.start.values) + 10
gdd = GenomeDiagram.Diagram('Motif sites: %d, %d' % (cluster_num, motif_num))
for i in range(motif_sites.shape[0]):
gdt_features = gdd.new_track(1, start=0, end=len_range, greytrack=True, greytrack_labels=1,
name=motif_sites.names.values[i], scale=True, greytrack_fontsize=4)
gds_features = gdt_features.new_set()
col = colors.red.clone()
col.alpha = ( -np.log10(motif_sites.pvalue.values[i]) - 4 ) / pv_range
m_start = motif_sites.start.values[i]
m_len = len(motif_sites.seq.values[i])
m_strand = motif_sites.reverse.values[i]
if m_strand == 0:
m_strand = -1
feature = SeqFeature(FeatureLocation(m_start, m_start+m_len-1), strand=m_strand)
gds_features.add_feature(feature, name=str(i+1), label=False, color=col)
gdd.draw(format='linear', pagesize=(15*cm,motif_sites.shape[0]*cm/2), fragments=1, start=0, end=len_range+10)
##gdd.write("GD_labels_default.pdf", "pdf") ## looks like only output is to file, so do this:
#output = cStringIO.StringIO()
#gdd.write(output, 'png', dpi=300)
#output.seek(0)
output = gdd.write_to_string(output='png', dpi=300)
output = cStringIO.StringIO(output)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img, interpolation='bicubic' )
output.close()
return gdd
def get_motif_pclust(self, cluster_num, motif_num):
rowid = self.__get_motif_id(cluster_num, motif_num)
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
#sites = sites.drop( ['motif_info_id'], 1 )
return np.mean( np.log10(sites.pvalue.values) )
def get_biop_motif(self, cluster_num, motif_num, option='sites'):
##import egrin2.export_motifs as em
"""export the specified motif to a biopython motif object
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
- option of how to translate - sites: jaspar 'sites' file; pfm: jaspar 'pfm' file
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
output = cStringIO.StringIO()
## ONE WAY TO TRY -- but Bio.motifs cant parse the incomplete MEME file
##output.write(em.MEME_FILE_HEADER % (0.25, 0.25, 0.25, 0.25))
##em.write_pssm(output, cursor, os.path.dirname(self.dbfile), cluster_num, rowid,
## motif_num, mot_info['evalue'][0], 10)
##output.seek(0)
##mot = motifs.read( output, 'meme' )
## Second way - create a jaspar 'pfm' file from the pssm
if option == 'pfm':
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm = pssm.drop( ['motif_info_id', 'iteration', 'row'], 1 )
counts = np.round( pssm * mot_sites.shape[0] ).transpose()
counts.to_string(output, header=False, index=False )
output.seek(0)
mot = motifs.read( output, 'pfm' )
## Third way - create a jaspar 'sites' file
elif option == 'sites':
seqs = {}
for i in mot_sites.index.values:
name = mot_sites.ix[i].seq_name
flank_left = mot_sites.ix[i].flank_left
flank_left = Seq(flank_left if flank_left is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
seq = Seq(mot_sites.ix[i].seq, IUPAC.IUPACAmbiguousDNA())
flank_right = mot_sites.ix[i].flank_right
flank_right = Seq(flank_right if flank_right is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
full_seq = flank_left + seq + flank_right
bs = SeqRecord( full_seq, id=name )
seqs[i] = bs
SeqIO.write(seqs.values(), output, 'fasta')
output.seek(0)
mot = motifs.read( output, 'sites' )
output.close()
## Note Bio.motifs.weblogo() uses the weblogo server (slow? requires connection.)
#kwargs = dict(color_scheme='classic')
#mot.weblogo('file.png', color_scheme='color_classic') ## note, can use format='PDF'
#img = mpimg.imread('file.png')
#imgplot = plt.imshow( img )
#plt.show()
return mot
## This uses weblogolib package to create files directly (installed as weblogo via pip)
## https://code.google.com/p/weblogo/
def plot_motif( self, cluster_num, motif_num, img_format='png' ):
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
ldata = wl.LogoData.from_seqs(wl.SeqList(mot_sites.seq.values.tolist(), wl.unambiguous_dna_alphabet))
options = wl.LogoOptions()
options.fineprint = os.path.dirname(self.dbfile) + ' %03d %03d' % ( cluster_num, motif_num )
format = wl.LogoFormat(ldata, options)
format.color_scheme = wl.classic
format.resolution = 150
if img_format == 'png':
tmp = wl.png_formatter( ldata, format )
output = cStringIO.StringIO(tmp)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img )
#plt.show()
return plt
elif img_format == 'svg':
tmp = wl.svg_formatter( ldata, format )
return tmp
## note then can do e.g. ut.writeLines(svg.split('\n'),'test.svg')
| ops = out_nets[ 'operons' ]
ops = ops.ix[ ops.bOp == True ]
ops = ops[ ['SysName1','SysName2','pOp'] ]
ops.pOp = ops.pOp * 1000.
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
ops.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr2 = nx.read_weighted_edgelist( buf )
buf.close()
#gr2 = nx.Graph( [ tuple(x) for x in ops[['SysName1','SysName2']].to_records(index=False) ],
# weight=ops.pOp.values*1000, typ='operons' )
## from https://stackoverflow.com/questions/11758774/merging-two-network-maps-in-networkx-by-unique-labels :
gr.add_nodes_from(gr2.nodes(data=True))
gr.add_edges_from(gr2.edges(data=True)) #, weight=gr2.graph['weight'], type=gr2.graph['type']) | conditional_block |
cmonkeyobj.py | import os
import cStringIO
import sqlite3 as sql3
import gzip,bz2
import cPickle as pickle
import ConfigParser
import pandas as pd
import numpy as np
from numpy import nan as NA
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from Bio import motifs
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio import SeqIO
import weblogolib as wl
import utils as ut
"""
from cmonkeyobj import cMonkey2 as cm2
b = cm2('eco-out-001/cmonkey_run.db')
pd.Series([b.get_cluster_info(k)['residual'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20)
pd.DataFrame([b.get_cluster_info(k)['pclusts'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20,stacked=True)
"""
## TBD: plotting motif locations relative to gene start. See
## http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc211
## https://www.biostars.org/p/96470/#97713
class cMonkey2:
dbfile = '' #None
tables = {} #None
iteration = 2001
k_clust = 999 #None
organism = '' #'eco'
species = '' #'Escherichia_coli_K12' #None
taxon_id = None
ratios = pd.DataFrame()
config = ConfigParser.ConfigParser() #None
stats = None
def __init__( self, dbfile ):
self.dbfile = dbfile
conn = sql3.connect( dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats', conn) ##last_iteration from run_infos', conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( dbfile, iteration=self.iteration )
#self.iteration = max(self.tables['motif_infos'].iteration)
self.k_clust = self.tables['run_infos'].num_clusters[0] ##max(self.tables['row_members'].cluster)
self.organism = self.tables['run_infos'].organism[0]
self.species = self.tables['run_infos'].species[0]
self.config = self.load_config()
def __read_all_tables( self, dbfile, iteration=2000 ): #limit=None ):
"""read out all tables in the sql3 db file into a dict of pandas dataframes"""
conn = sql3.connect( dbfile )
tnames = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name", conn)
tables = {}
for tname in tnames.name.values:
#print tname
tmp = pd.read_sql( 'select * from %s limit 3' % tname, conn )
if tname != 'motif_infos' and 'iteration' in tmp.columns.values.tolist():
query = 'select * from %s where iteration=' + str(iteration)
else:
query = 'select * from %s'
table = pd.read_sql(query % tname, conn)
if tname == 'motif_infos':
table = table[ table.iteration == iteration ]
tables[ tname ] = table
conn.close()
table = tables[ 'meme_motif_sites' ]
table = table.ix[ np.in1d( table.motif_info_id, tables[ 'motif_infos' ].index.values ) ]
tables[ 'meme_motif_sites' ] = table
return tables
def reload( self ):
conn = sql3.connect( self.dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats',conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( self.dbfile, iteration=self.iteration )
self.stats = None
def get_feature_names( self ):
feature_names_file = './cache/' + self.species + '_feature_names'
feature_names = pd.read_table( feature_names_file, sep='\t', header=None, skiprows=4 )
feature_names.columns = ['id','names','type']
#feature_names = feature_names.set_index( 'names' )
return feature_names
def get_features( self ):
features_file = './cache/' + self.species + '_features'
features = pd.read_table( features_file, sep='\t', header=0, skiprows=16 )
cols = features.columns.values; cols[0] = 'id'; features.columns = cols
#features = features.set_index( 'od' )
return features
def get_genome_seqs( self ):
features = self.get_features()
contigs = np.unique( features.contig )
seqs = {}
for contig in contigs:
genome_file = './cache/' + self.species + '_' + contig
seq = ut.readLines( genome_file )[0].strip().upper()
seqs[contig] = seq
return seqs
def get_networks( self, include_operons=True, include_string=True ):
networks = {}
taxid = self.load_taxon_id()
if include_operons and os.path.exists('./cache/gnc' + str(taxid) + '.named'):
op_file = './cache/gnc' + str(taxid) + '.named'
operons = pd.read_table( op_file )
networks['operons'] = operons
if include_string and os.path.exists('./cache/' + str(taxid) + '.gz'):
string_file = './cache/' + str(taxid) + '.gz'
string = pd.read_table( gzip.GzipFile( string_file ), header=None )
networks['string'] = string
return networks
## see http://www.kegg.jp/kegg/rest/keggapi.html
## and http://biopython.org/DIST/docs/api/Bio.KEGG.REST-module.html
## another option: http://www.genome.jp/kegg-bin/show_organism?org=eco
def load_taxon_id( self, in_code=None ):
''' lets try getting it directly from KEGG based on inputted organism 3-letter code
a bit hairy but it works! TODO: cache the org_table and gen_table in cache/'''
if self.taxon_id is not None:
return self.taxon_id
import Bio.KEGG.REST as kegg ## requires BioPython 1.65 or later!
if in_code is None:
in_code = self.tables['run_infos'].organism[0]
org_table = kegg.kegg_list('organism').readlines()
org_table = ''.join( org_table )
buf = cStringIO.StringIO( org_table )
org_table = pd.read_table( buf, sep='\t', header=None )
#full_org_name = org_table.ix[org_table[1]==in_code][2].values[0]
buf.close()
kegg_code = org_table.ix[org_table[1]==in_code][0].values[0]
gen_table = kegg.kegg_list('genome').readlines()
gen_table = ''.join( gen_table )
buf = cStringIO.StringIO( gen_table )
gen_table = pd.read_table( buf, sep='\t', header=None )
buf.close()
taxon_id = int(gen_table.ix[ gen_table[0] == 'genome:'+kegg_code ][1].values[0].split(', ')[2].split('; ')[0])
self.taxon_id = taxon_id
return taxon_id
def load_ratios( self, ratios_file=None ):
if ratios_file is None:
ratios_file = os.path.dirname(self.dbfile) + '/ratios.tsv.gz'
if self.ratios is None:
self.ratios = pd.read_table( gzip.GzipFile( ratios_file ), sep='\t' )
return self.ratios
def load_config( self, config_file=None ):
"""then can do e.g., b.config.getfloat('Rows', 'scaling_constant')
or simply, dict(b.config.items('Rows'))"""
if config_file is None:
config_file = os.path.dirname(self.dbfile) + '/final.ini'
config_parser = ConfigParser.ConfigParser()
config_parser.read( config_file )
self.config = config_parser
return self.config
def pickle_all( self, outfile=None, include_genome=False, include_networks=False ):
'''Try to pickle up ALL relevant info from the cmonkey run
can load it via b = pickle.load(gzip.GzipFile(outfile)) '''
## another thing to try is to load the
feature_names = self.get_feature_names()
features = self.get_features()
genome = None
if include_genome:
genome = self.get_genome_seqs()
networks = None
if include_networks:
networks = self.get_networks()
self.load_ratios()
self.load_config()
self.get_stats()
## do pickling here
if outfile is None:
outfile = gzip.GzipFile( os.path.dirname(self.dbfile) + '/dump.pkl.gz', 'wb' )
obj = { 'b': self,
'feature_names': feature_names,
'features': features,
'genome': genome,
'networks': networks }
print outfile
pickle.dump( obj, outfile )
outfile.close()
def get_rows( self, k ):
t1 = self.tables['row_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['row_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_cols( self, k ):
t1 = self.tables['column_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['column_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_ratios( self, k=None, rows=None, cols=None, included=True ):
"""Extract submatrix of ratios for cluster or rows/cols.
If ~included, extract submatrix of ratios for conditions NOT in cluster."""
if self.ratios is None:
ratios = self.load_ratios()
if k is not None:
if rows is None:
rows = self.get_rows( k )
if cols is None:
cols = self.get_cols( k )
if not included:
cols = ratios.columns.values[ np.in1d( ratios.columns.values, cols, invert=True ) ]
rats = self.ratios.ix[ rows, cols ]
return rats
def plot_ratios( self, k=None, rows=None, cols=None, included=True, kind='line' ):
## see http://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html -- cool!
## can use kind = 'box' too!
|
def get_cluster_info( self, k ):
t1 = self.tables['cluster_stats']
t1 = t1[ t1.cluster == k ]
#t1 = t1.drop( ['iteration', 'cluster'], 1 )
t2 = self.tables['motif_infos']
t2 = t2[ t2.cluster == k ]
#t2 = t2.drop( ['iteration', 'cluster'], 1 )
## Extract it.
out = {'residual':t1.residual.values[0],
'nrows':t1.num_rows.values[0],
'ncols':t1.num_cols.values[0],
'e_values':t2.evalue.values}
## Also get p-clust
pclusts = np.array([self.get_motif_pclust(k,i) for i in range(1,t2.shape[0]+1)])
out['pclusts'] = pclusts
return out
def get_cluster_networks( self, k ):
networks = self.get_networks()
genes = self.get_rows( k )
out_nets = {}
if 'string' in networks.keys():
string = networks['string']
string = string.ix[ np.in1d(string[0], genes) ] ## slow!
string = string.ix[ np.in1d(string[1], genes) ]
out_nets['string'] = string
if 'operons' in networks.keys():
ops = networks['operons']
ops = ops.ix[ np.in1d(ops.SysName1, genes) | np.in1d(ops.SysName2, genes) ]
ops = ops.ix[ ops.bOp == True ]
out_nets['operons'] = ops
return out_nets
## see https://www.udacity.com/wiki/creating-network-graphs-with-python
def plot_cluster_networks( self, k ):
import networkx as nx
out_nets = self.get_cluster_networks( k )
gr = nx.Graph()
if 'string' in out_nets.keys():
strng = out_nets[ 'string' ]
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
strng.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr = nx.read_weighted_edgelist( buf )
buf.close()
if 'operons' in out_nets.keys():
ops = out_nets[ 'operons' ]
ops = ops.ix[ ops.bOp == True ]
ops = ops[ ['SysName1','SysName2','pOp'] ]
ops.pOp = ops.pOp * 1000.
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
ops.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr2 = nx.read_weighted_edgelist( buf )
buf.close()
#gr2 = nx.Graph( [ tuple(x) for x in ops[['SysName1','SysName2']].to_records(index=False) ],
# weight=ops.pOp.values*1000, typ='operons' )
## from https://stackoverflow.com/questions/11758774/merging-two-network-maps-in-networkx-by-unique-labels :
gr.add_nodes_from(gr2.nodes(data=True))
gr.add_edges_from(gr2.edges(data=True)) #, weight=gr2.graph['weight'], type=gr2.graph['type'])
pos = nx.spring_layout(gr, k=0.9, iterations=2000)
## requires installation of graphviz-dev and pygraphviz:
##from networkx import graphviz_layout
##pos = nx.graphviz_layout( gr, prog='neato'
pos2 = { i:k for i,k in pos.items() if i in gr2.nodes() }
nx.draw_networkx_edges(gr2, pos2, edge_color='r', width=4, alpha=0.5)
nx.draw_networkx(gr, pos, node_size=50, node_color='b', edge_color='b', font_size=7, width=2, alpha=0.3)
def clusters_w_genes( self, genes ):
t1 = self.tables['row_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['row_names']
t2 = t2[ np.in1d(t2.name, genes) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def clusters_w_conds( self, conds ):
t1 = self.tables['column_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['column_names']
t2 = t2[ np.in1d(t2.name, conds) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def cluster_summary( self ):
tab = self.tables['cluster_stats']
infos = { k: self.get_cluster_info(k+1) for k in range(self.k_clust) }
tab[ 'e_value1' ] = pd.Series( [ infos[k]['e_values'][0] if
len(infos[k]['e_values']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'e_value2' ] = pd.Series( [ infos[k]['e_values'][1] if
len(infos[k]['e_values']) > 1 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust1' ] = pd.Series( [ infos[k]['pclusts'][0] if
len(infos[k]['pclusts']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust2' ] = pd.Series( [ infos[k]['pclusts'][1] if
len(infos[k]['pclusts']) > 1 else NA for k in range(self.k_clust) ] )
tab = tab.set_index( tab.cluster )
tab = tab.drop( ['iteration', 'cluster'], axis=1 )
return tab
def get_stats( self ):
if self.stats is not None:
return self.stats
conn = sql3.connect( self.dbfile )
table = pd.read_sql('select * from iteration_stats', conn)
conn.close()
tmp = self.tables['statstypes'].copy()
tmp.index = tmp.index + 1
table = pd.merge(table,tmp,left_on='statstype',right_index=True)
tmp = table.groupby( 'name' )
tmp = { name:df for name,df in tmp }
for name in tmp.keys():
tmp2 = tmp[name]
tmp2.index = tmp2.iteration
tmp2 = tmp2.drop( ['statstype', 'category', 'name', 'iteration'], axis=1 )
tmp2.columns=[name]
tmp[name] = tmp2
#if 'SetEnrichment' in tmp.keys():
# pvs = pd.read_csv( os.path.dirname(self.dbfile) + '/setEnrichment_pvalue.csv', index_col=0 )
# pvs = pvs.fillna( 1.0 )
# tmp['SetEnrichment'] = np.log10(pvs+1e-30).median(1) ##.plot()
table = pd.concat( tmp, axis=1 )
table.columns = [i[0] for i in table.columns.values]
self.stats = table
return table
def plot_stats( self ):
table = self.get_stats()
ut.setup_text_plots( usetex=False )
if 'SetEnrichment' in table.columns.values:
table.SetEnrichment.replace( 0, NA, inplace=True )
table.plot( subplots=True, layout=[3,-1], sharex=True, legend=True, fontsize=8 )
#fig, axes = plt.subplots(nrows=3, ncols=3, sharex=True)
#for i, c in enumerate(table.columns):
# table[c].plot( ax=axes[i/3][i%3], title=c )
def __get_motif_id(self, cluster_num, motif_num):
motif_infos = self.tables['motif_infos']
rowid = motif_infos[(motif_infos.iteration==self.iteration) &
(motif_infos.cluster==cluster_num) &
(motif_infos.motif_num==motif_num)].index.values[0]+1
return rowid
#motif_id = self.tables['meme_motif_sites'].ix[rowid].motif_info_id
#return motif_id
def get_motif_pssm(self, cluster_num, motif_num):
"""export the specified motif to a pandas dataframe
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm.drop( ['motif_info_id', 'iteration', 'row'], 1, inplace=True )
return pssm
def get_motif_sites(self, cluster_num, motif_num=None):
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
print rowid
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
sites = sites.drop( ['motif_info_id'], 1 )
feature_names = self.get_feature_names()
tmp = pd.merge( sites, feature_names, left_on='seq_name', right_on='id' )
tmp = tmp[ np.in1d( tmp.names.values, self.tables['row_names'].name.values ) ]
tmp = tmp.drop( ['seq_name', 'type'], 1 )
tmp = tmp.drop_duplicates()
return tmp ## need to update genes based on synonyms
def plot_motif_sites(self, cluster_num, motif_num):
"""THIS NEEDS MORE WORK but has the beginnings of something...
TODO: multiple motifs on same tracks, include ALL genes (i.e. in operons that were not included),
do reverse-complement positioning correctly (based on gene strand),
use MAST scan output (from b.tables['motif_annotations'])
"""
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.Graphics import GenomeDiagram
from reportlab.lib.units import cm
from reportlab.lib import colors
"""To get this to work: download http://www.reportlab.com/ftp/fonts/pfbfer.zip
and unzip it into /usr/lib/python2.7/dist-packages/reportlab/fonts/
"""
motif_sites = self.get_motif_sites(cluster_num, motif_num)
pv_range = np.max(-np.log10(motif_sites.pvalue.values)) - 4 ## divide -log10(pval) by this to get alpha to use
len_range = np.max(motif_sites.start.values) + 10
gdd = GenomeDiagram.Diagram('Motif sites: %d, %d' % (cluster_num, motif_num))
for i in range(motif_sites.shape[0]):
gdt_features = gdd.new_track(1, start=0, end=len_range, greytrack=True, greytrack_labels=1,
name=motif_sites.names.values[i], scale=True, greytrack_fontsize=4)
gds_features = gdt_features.new_set()
col = colors.red.clone()
col.alpha = ( -np.log10(motif_sites.pvalue.values[i]) - 4 ) / pv_range
m_start = motif_sites.start.values[i]
m_len = len(motif_sites.seq.values[i])
m_strand = motif_sites.reverse.values[i]
if m_strand == 0:
m_strand = -1
feature = SeqFeature(FeatureLocation(m_start, m_start+m_len-1), strand=m_strand)
gds_features.add_feature(feature, name=str(i+1), label=False, color=col)
gdd.draw(format='linear', pagesize=(15*cm,motif_sites.shape[0]*cm/2), fragments=1, start=0, end=len_range+10)
##gdd.write("GD_labels_default.pdf", "pdf") ## looks like only output is to file, so do this:
#output = cStringIO.StringIO()
#gdd.write(output, 'png', dpi=300)
#output.seek(0)
output = gdd.write_to_string(output='png', dpi=300)
output = cStringIO.StringIO(output)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img, interpolation='bicubic' )
output.close()
return gdd
def get_motif_pclust(self, cluster_num, motif_num):
rowid = self.__get_motif_id(cluster_num, motif_num)
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
#sites = sites.drop( ['motif_info_id'], 1 )
return np.mean( np.log10(sites.pvalue.values) )
def get_biop_motif(self, cluster_num, motif_num, option='sites'):
##import egrin2.export_motifs as em
"""export the specified motif to a biopython motif object
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
- option of how to translate - sites: jaspar 'sites' file; pfm: jaspar 'pfm' file
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
output = cStringIO.StringIO()
## ONE WAY TO TRY -- but Bio.motifs cant parse the incomplete MEME file
##output.write(em.MEME_FILE_HEADER % (0.25, 0.25, 0.25, 0.25))
##em.write_pssm(output, cursor, os.path.dirname(self.dbfile), cluster_num, rowid,
## motif_num, mot_info['evalue'][0], 10)
##output.seek(0)
##mot = motifs.read( output, 'meme' )
## Second way - create a jaspar 'pfm' file from the pssm
if option == 'pfm':
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm = pssm.drop( ['motif_info_id', 'iteration', 'row'], 1 )
counts = np.round( pssm * mot_sites.shape[0] ).transpose()
counts.to_string(output, header=False, index=False )
output.seek(0)
mot = motifs.read( output, 'pfm' )
## Third way - create a jaspar 'sites' file
elif option == 'sites':
seqs = {}
for i in mot_sites.index.values:
name = mot_sites.ix[i].seq_name
flank_left = mot_sites.ix[i].flank_left
flank_left = Seq(flank_left if flank_left is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
seq = Seq(mot_sites.ix[i].seq, IUPAC.IUPACAmbiguousDNA())
flank_right = mot_sites.ix[i].flank_right
flank_right = Seq(flank_right if flank_right is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
full_seq = flank_left + seq + flank_right
bs = SeqRecord( full_seq, id=name )
seqs[i] = bs
SeqIO.write(seqs.values(), output, 'fasta')
output.seek(0)
mot = motifs.read( output, 'sites' )
output.close()
## Note Bio.motifs.weblogo() uses the weblogo server (slow? requires connection.)
#kwargs = dict(color_scheme='classic')
#mot.weblogo('file.png', color_scheme='color_classic') ## note, can use format='PDF'
#img = mpimg.imread('file.png')
#imgplot = plt.imshow( img )
#plt.show()
return mot
## This uses weblogolib package to create files directly (installed as weblogo via pip)
## https://code.google.com/p/weblogo/
def plot_motif( self, cluster_num, motif_num, img_format='png' ):
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
ldata = wl.LogoData.from_seqs(wl.SeqList(mot_sites.seq.values.tolist(), wl.unambiguous_dna_alphabet))
options = wl.LogoOptions()
options.fineprint = os.path.dirname(self.dbfile) + ' %03d %03d' % ( cluster_num, motif_num )
format = wl.LogoFormat(ldata, options)
format.color_scheme = wl.classic
format.resolution = 150
if img_format == 'png':
tmp = wl.png_formatter( ldata, format )
output = cStringIO.StringIO(tmp)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img )
#plt.show()
return plt
elif img_format == 'svg':
tmp = wl.svg_formatter( ldata, format )
return tmp
## note then can do e.g. ut.writeLines(svg.split('\n'),'test.svg')
| rats = self.get_ratios( k, rows, cols, included )
rats = rats.transpose()
if kind == 'box': ## sort by mean of columns
means = rats.mean(1)
tmp = pd.concat( [rats, means], 1 )
cols = tmp.columns.values; cols[-1] = 'MEANS'; tmp.columns = cols
tmp = tmp.sort( ['MEANS'] )
tmp = tmp.drop( 'MEANS', 1 )
rats = tmp.transpose()
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False, sym='.')
else:
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False)
## use plt.close() to close the window | identifier_body |
cmonkeyobj.py | import os
import cStringIO
import sqlite3 as sql3
import gzip,bz2
import cPickle as pickle
import ConfigParser
import pandas as pd
import numpy as np
from numpy import nan as NA
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from Bio import motifs
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from Bio import SeqIO
import weblogolib as wl
import utils as ut
"""
from cmonkeyobj import cMonkey2 as cm2
b = cm2('eco-out-001/cmonkey_run.db')
pd.Series([b.get_cluster_info(k)['residual'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20)
pd.DataFrame([b.get_cluster_info(k)['pclusts'] for k in range(1,b.k_clust)]).plot(kind='hist',bins=20,stacked=True)
"""
## TBD: plotting motif locations relative to gene start. See
## http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc211
## https://www.biostars.org/p/96470/#97713
class cMonkey2:
dbfile = '' #None
tables = {} #None
iteration = 2001
k_clust = 999 #None
organism = '' #'eco'
species = '' #'Escherichia_coli_K12' #None
taxon_id = None
ratios = pd.DataFrame()
config = ConfigParser.ConfigParser() #None
stats = None
def __init__( self, dbfile ):
self.dbfile = dbfile
conn = sql3.connect( dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats', conn) ##last_iteration from run_infos', conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( dbfile, iteration=self.iteration )
#self.iteration = max(self.tables['motif_infos'].iteration)
self.k_clust = self.tables['run_infos'].num_clusters[0] ##max(self.tables['row_members'].cluster)
self.organism = self.tables['run_infos'].organism[0]
self.species = self.tables['run_infos'].species[0]
self.config = self.load_config()
def __read_all_tables( self, dbfile, iteration=2000 ): #limit=None ):
"""read out all tables in the sql3 db file into a dict of pandas dataframes"""
conn = sql3.connect( dbfile )
tnames = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name", conn)
tables = {}
for tname in tnames.name.values:
#print tname
tmp = pd.read_sql( 'select * from %s limit 3' % tname, conn )
if tname != 'motif_infos' and 'iteration' in tmp.columns.values.tolist():
query = 'select * from %s where iteration=' + str(iteration)
else:
query = 'select * from %s'
table = pd.read_sql(query % tname, conn)
if tname == 'motif_infos':
table = table[ table.iteration == iteration ]
tables[ tname ] = table
conn.close()
table = tables[ 'meme_motif_sites' ]
table = table.ix[ np.in1d( table.motif_info_id, tables[ 'motif_infos' ].index.values ) ]
tables[ 'meme_motif_sites' ] = table
return tables
def reload( self ):
conn = sql3.connect( self.dbfile )
tmp = pd.read_sql('select max(iteration) from iteration_stats',conn)
conn.close()
self.iteration = tmp.max()[0] ## get iteration
print 'iteration =', self.iteration
self.tables = self.__read_all_tables( self.dbfile, iteration=self.iteration )
self.stats = None
def get_feature_names( self ):
feature_names_file = './cache/' + self.species + '_feature_names'
feature_names = pd.read_table( feature_names_file, sep='\t', header=None, skiprows=4 )
feature_names.columns = ['id','names','type']
#feature_names = feature_names.set_index( 'names' )
return feature_names
def get_features( self ):
features_file = './cache/' + self.species + '_features'
features = pd.read_table( features_file, sep='\t', header=0, skiprows=16 )
cols = features.columns.values; cols[0] = 'id'; features.columns = cols
#features = features.set_index( 'od' )
return features
def get_genome_seqs( self ):
features = self.get_features()
contigs = np.unique( features.contig )
seqs = {}
for contig in contigs:
genome_file = './cache/' + self.species + '_' + contig
seq = ut.readLines( genome_file )[0].strip().upper()
seqs[contig] = seq
return seqs
def get_networks( self, include_operons=True, include_string=True ):
networks = {}
taxid = self.load_taxon_id()
if include_operons and os.path.exists('./cache/gnc' + str(taxid) + '.named'):
op_file = './cache/gnc' + str(taxid) + '.named'
operons = pd.read_table( op_file )
networks['operons'] = operons
if include_string and os.path.exists('./cache/' + str(taxid) + '.gz'):
string_file = './cache/' + str(taxid) + '.gz'
string = pd.read_table( gzip.GzipFile( string_file ), header=None )
networks['string'] = string
return networks
## see http://www.kegg.jp/kegg/rest/keggapi.html
## and http://biopython.org/DIST/docs/api/Bio.KEGG.REST-module.html
## another option: http://www.genome.jp/kegg-bin/show_organism?org=eco
def load_taxon_id( self, in_code=None ):
''' lets try getting it directly from KEGG based on inputted organism 3-letter code
a bit hairy but it works! TODO: cache the org_table and gen_table in cache/'''
if self.taxon_id is not None:
return self.taxon_id
import Bio.KEGG.REST as kegg ## requires BioPython 1.65 or later!
if in_code is None:
in_code = self.tables['run_infos'].organism[0]
org_table = kegg.kegg_list('organism').readlines()
org_table = ''.join( org_table )
buf = cStringIO.StringIO( org_table )
org_table = pd.read_table( buf, sep='\t', header=None )
#full_org_name = org_table.ix[org_table[1]==in_code][2].values[0]
buf.close()
kegg_code = org_table.ix[org_table[1]==in_code][0].values[0]
gen_table = kegg.kegg_list('genome').readlines()
gen_table = ''.join( gen_table )
buf = cStringIO.StringIO( gen_table )
gen_table = pd.read_table( buf, sep='\t', header=None )
buf.close()
taxon_id = int(gen_table.ix[ gen_table[0] == 'genome:'+kegg_code ][1].values[0].split(', ')[2].split('; ')[0])
self.taxon_id = taxon_id
return taxon_id
def load_ratios( self, ratios_file=None ):
if ratios_file is None:
ratios_file = os.path.dirname(self.dbfile) + '/ratios.tsv.gz'
if self.ratios is None:
self.ratios = pd.read_table( gzip.GzipFile( ratios_file ), sep='\t' )
return self.ratios
def load_config( self, config_file=None ):
"""then can do e.g., b.config.getfloat('Rows', 'scaling_constant')
or simply, dict(b.config.items('Rows'))"""
if config_file is None:
config_file = os.path.dirname(self.dbfile) + '/final.ini'
config_parser = ConfigParser.ConfigParser()
config_parser.read( config_file )
self.config = config_parser
return self.config
def pickle_all( self, outfile=None, include_genome=False, include_networks=False ):
'''Try to pickle up ALL relevant info from the cmonkey run
can load it via b = pickle.load(gzip.GzipFile(outfile)) '''
## another thing to try is to load the
feature_names = self.get_feature_names()
features = self.get_features()
genome = None
if include_genome:
genome = self.get_genome_seqs()
networks = None
if include_networks:
networks = self.get_networks()
self.load_ratios()
self.load_config()
self.get_stats()
## do pickling here
if outfile is None:
outfile = gzip.GzipFile( os.path.dirname(self.dbfile) + '/dump.pkl.gz', 'wb' )
obj = { 'b': self,
'feature_names': feature_names,
'features': features,
'genome': genome,
'networks': networks }
print outfile
pickle.dump( obj, outfile )
outfile.close()
def get_rows( self, k ):
t1 = self.tables['row_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['row_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def | ( self, k ):
t1 = self.tables['column_members']
t1 = t1[ t1.iteration == self.iteration ]
t1 = t1[ t1.cluster == k ]
t2 = self.tables['column_names']
t2 = pd.merge( t1, t2, on='order_num' )
return t2.name.values
def get_ratios( self, k=None, rows=None, cols=None, included=True ):
"""Extract submatrix of ratios for cluster or rows/cols.
If ~included, extract submatrix of ratios for conditions NOT in cluster."""
if self.ratios is None:
ratios = self.load_ratios()
if k is not None:
if rows is None:
rows = self.get_rows( k )
if cols is None:
cols = self.get_cols( k )
if not included:
cols = ratios.columns.values[ np.in1d( ratios.columns.values, cols, invert=True ) ]
rats = self.ratios.ix[ rows, cols ]
return rats
def plot_ratios( self, k=None, rows=None, cols=None, included=True, kind='line' ):
## see http://pandas.pydata.org/pandas-docs/version/0.15.0/visualization.html -- cool!
## can use kind = 'box' too!
rats = self.get_ratios( k, rows, cols, included )
rats = rats.transpose()
if kind == 'box': ## sort by mean of columns
means = rats.mean(1)
tmp = pd.concat( [rats, means], 1 )
cols = tmp.columns.values; cols[-1] = 'MEANS'; tmp.columns = cols
tmp = tmp.sort( ['MEANS'] )
tmp = tmp.drop( 'MEANS', 1 )
rats = tmp.transpose()
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False, sym='.')
else:
rats.plot(kind=kind, use_index=False, title='Cluster %d'%(k), legend=False)
## use plt.close() to close the window
def get_cluster_info( self, k ):
t1 = self.tables['cluster_stats']
t1 = t1[ t1.cluster == k ]
#t1 = t1.drop( ['iteration', 'cluster'], 1 )
t2 = self.tables['motif_infos']
t2 = t2[ t2.cluster == k ]
#t2 = t2.drop( ['iteration', 'cluster'], 1 )
## Extract it.
out = {'residual':t1.residual.values[0],
'nrows':t1.num_rows.values[0],
'ncols':t1.num_cols.values[0],
'e_values':t2.evalue.values}
## Also get p-clust
pclusts = np.array([self.get_motif_pclust(k,i) for i in range(1,t2.shape[0]+1)])
out['pclusts'] = pclusts
return out
def get_cluster_networks( self, k ):
networks = self.get_networks()
genes = self.get_rows( k )
out_nets = {}
if 'string' in networks.keys():
string = networks['string']
string = string.ix[ np.in1d(string[0], genes) ] ## slow!
string = string.ix[ np.in1d(string[1], genes) ]
out_nets['string'] = string
if 'operons' in networks.keys():
ops = networks['operons']
ops = ops.ix[ np.in1d(ops.SysName1, genes) | np.in1d(ops.SysName2, genes) ]
ops = ops.ix[ ops.bOp == True ]
out_nets['operons'] = ops
return out_nets
## see https://www.udacity.com/wiki/creating-network-graphs-with-python
def plot_cluster_networks( self, k ):
import networkx as nx
out_nets = self.get_cluster_networks( k )
gr = nx.Graph()
if 'string' in out_nets.keys():
strng = out_nets[ 'string' ]
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
strng.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr = nx.read_weighted_edgelist( buf )
buf.close()
if 'operons' in out_nets.keys():
ops = out_nets[ 'operons' ]
ops = ops.ix[ ops.bOp == True ]
ops = ops[ ['SysName1','SysName2','pOp'] ]
ops.pOp = ops.pOp * 1000.
buf = cStringIO.StringIO() ## round-about way to do it but wtf?
ops.to_csv( buf, sep='\t', header=False, index=False )
buf.flush(); buf.seek(0)
gr2 = nx.read_weighted_edgelist( buf )
buf.close()
#gr2 = nx.Graph( [ tuple(x) for x in ops[['SysName1','SysName2']].to_records(index=False) ],
# weight=ops.pOp.values*1000, typ='operons' )
## from https://stackoverflow.com/questions/11758774/merging-two-network-maps-in-networkx-by-unique-labels :
gr.add_nodes_from(gr2.nodes(data=True))
gr.add_edges_from(gr2.edges(data=True)) #, weight=gr2.graph['weight'], type=gr2.graph['type'])
pos = nx.spring_layout(gr, k=0.9, iterations=2000)
## requires installation of graphviz-dev and pygraphviz:
##from networkx import graphviz_layout
##pos = nx.graphviz_layout( gr, prog='neato'
pos2 = { i:k for i,k in pos.items() if i in gr2.nodes() }
nx.draw_networkx_edges(gr2, pos2, edge_color='r', width=4, alpha=0.5)
nx.draw_networkx(gr, pos, node_size=50, node_color='b', edge_color='b', font_size=7, width=2, alpha=0.3)
def clusters_w_genes( self, genes ):
t1 = self.tables['row_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['row_names']
t2 = t2[ np.in1d(t2.name, genes) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def clusters_w_conds( self, conds ):
t1 = self.tables['column_members']
t1 = t1[ (t1.iteration == self.iteration) ]
t2 = self.tables['column_names']
t2 = t2[ np.in1d(t2.name, conds) ]
t2 = pd.merge( t1, t2, on='order_num' )
t2 = t2.drop( ['iteration', 'order_num'], 1 )
return t2
def cluster_summary( self ):
tab = self.tables['cluster_stats']
infos = { k: self.get_cluster_info(k+1) for k in range(self.k_clust) }
tab[ 'e_value1' ] = pd.Series( [ infos[k]['e_values'][0] if
len(infos[k]['e_values']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'e_value2' ] = pd.Series( [ infos[k]['e_values'][1] if
len(infos[k]['e_values']) > 1 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust1' ] = pd.Series( [ infos[k]['pclusts'][0] if
len(infos[k]['pclusts']) > 0 else NA for k in range(self.k_clust) ] )
tab[ 'p_clust2' ] = pd.Series( [ infos[k]['pclusts'][1] if
len(infos[k]['pclusts']) > 1 else NA for k in range(self.k_clust) ] )
tab = tab.set_index( tab.cluster )
tab = tab.drop( ['iteration', 'cluster'], axis=1 )
return tab
def get_stats( self ):
if self.stats is not None:
return self.stats
conn = sql3.connect( self.dbfile )
table = pd.read_sql('select * from iteration_stats', conn)
conn.close()
tmp = self.tables['statstypes'].copy()
tmp.index = tmp.index + 1
table = pd.merge(table,tmp,left_on='statstype',right_index=True)
tmp = table.groupby( 'name' )
tmp = { name:df for name,df in tmp }
for name in tmp.keys():
tmp2 = tmp[name]
tmp2.index = tmp2.iteration
tmp2 = tmp2.drop( ['statstype', 'category', 'name', 'iteration'], axis=1 )
tmp2.columns=[name]
tmp[name] = tmp2
#if 'SetEnrichment' in tmp.keys():
# pvs = pd.read_csv( os.path.dirname(self.dbfile) + '/setEnrichment_pvalue.csv', index_col=0 )
# pvs = pvs.fillna( 1.0 )
# tmp['SetEnrichment'] = np.log10(pvs+1e-30).median(1) ##.plot()
table = pd.concat( tmp, axis=1 )
table.columns = [i[0] for i in table.columns.values]
self.stats = table
return table
def plot_stats( self ):
table = self.get_stats()
ut.setup_text_plots( usetex=False )
if 'SetEnrichment' in table.columns.values:
table.SetEnrichment.replace( 0, NA, inplace=True )
table.plot( subplots=True, layout=[3,-1], sharex=True, legend=True, fontsize=8 )
#fig, axes = plt.subplots(nrows=3, ncols=3, sharex=True)
#for i, c in enumerate(table.columns):
# table[c].plot( ax=axes[i/3][i%3], title=c )
def __get_motif_id(self, cluster_num, motif_num):
motif_infos = self.tables['motif_infos']
rowid = motif_infos[(motif_infos.iteration==self.iteration) &
(motif_infos.cluster==cluster_num) &
(motif_infos.motif_num==motif_num)].index.values[0]+1
return rowid
#motif_id = self.tables['meme_motif_sites'].ix[rowid].motif_info_id
#return motif_id
def get_motif_pssm(self, cluster_num, motif_num):
"""export the specified motif to a pandas dataframe
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm.drop( ['motif_info_id', 'iteration', 'row'], 1, inplace=True )
return pssm
def get_motif_sites(self, cluster_num, motif_num=None):
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
print rowid
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
sites = sites.drop( ['motif_info_id'], 1 )
feature_names = self.get_feature_names()
tmp = pd.merge( sites, feature_names, left_on='seq_name', right_on='id' )
tmp = tmp[ np.in1d( tmp.names.values, self.tables['row_names'].name.values ) ]
tmp = tmp.drop( ['seq_name', 'type'], 1 )
tmp = tmp.drop_duplicates()
return tmp ## need to update genes based on synonyms
def plot_motif_sites(self, cluster_num, motif_num):
"""THIS NEEDS MORE WORK but has the beginnings of something...
TODO: multiple motifs on same tracks, include ALL genes (i.e. in operons that were not included),
do reverse-complement positioning correctly (based on gene strand),
use MAST scan output (from b.tables['motif_annotations'])
"""
from Bio.SeqFeature import SeqFeature, FeatureLocation
from Bio.Graphics import GenomeDiagram
from reportlab.lib.units import cm
from reportlab.lib import colors
"""To get this to work: download http://www.reportlab.com/ftp/fonts/pfbfer.zip
and unzip it into /usr/lib/python2.7/dist-packages/reportlab/fonts/
"""
motif_sites = self.get_motif_sites(cluster_num, motif_num)
pv_range = np.max(-np.log10(motif_sites.pvalue.values)) - 4 ## divide -log10(pval) by this to get alpha to use
len_range = np.max(motif_sites.start.values) + 10
gdd = GenomeDiagram.Diagram('Motif sites: %d, %d' % (cluster_num, motif_num))
for i in range(motif_sites.shape[0]):
gdt_features = gdd.new_track(1, start=0, end=len_range, greytrack=True, greytrack_labels=1,
name=motif_sites.names.values[i], scale=True, greytrack_fontsize=4)
gds_features = gdt_features.new_set()
col = colors.red.clone()
col.alpha = ( -np.log10(motif_sites.pvalue.values[i]) - 4 ) / pv_range
m_start = motif_sites.start.values[i]
m_len = len(motif_sites.seq.values[i])
m_strand = motif_sites.reverse.values[i]
if m_strand == 0:
m_strand = -1
feature = SeqFeature(FeatureLocation(m_start, m_start+m_len-1), strand=m_strand)
gds_features.add_feature(feature, name=str(i+1), label=False, color=col)
gdd.draw(format='linear', pagesize=(15*cm,motif_sites.shape[0]*cm/2), fragments=1, start=0, end=len_range+10)
##gdd.write("GD_labels_default.pdf", "pdf") ## looks like only output is to file, so do this:
#output = cStringIO.StringIO()
#gdd.write(output, 'png', dpi=300)
#output.seek(0)
output = gdd.write_to_string(output='png', dpi=300)
output = cStringIO.StringIO(output)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img, interpolation='bicubic' )
output.close()
return gdd
def get_motif_pclust(self, cluster_num, motif_num):
rowid = self.__get_motif_id(cluster_num, motif_num)
sites = self.tables['meme_motif_sites']
sites = sites[ sites.motif_info_id == rowid ]
#sites = sites.drop( ['motif_info_id'], 1 )
return np.mean( np.log10(sites.pvalue.values) )
def get_biop_motif(self, cluster_num, motif_num, option='sites'):
##import egrin2.export_motifs as em
"""export the specified motif to a biopython motif object
Parameters:
- cluster_num: bicluster number
- motif_num: motif number
- option of how to translate - sites: jaspar 'sites' file; pfm: jaspar 'pfm' file
"""
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
output = cStringIO.StringIO()
## ONE WAY TO TRY -- but Bio.motifs cant parse the incomplete MEME file
##output.write(em.MEME_FILE_HEADER % (0.25, 0.25, 0.25, 0.25))
##em.write_pssm(output, cursor, os.path.dirname(self.dbfile), cluster_num, rowid,
## motif_num, mot_info['evalue'][0], 10)
##output.seek(0)
##mot = motifs.read( output, 'meme' )
## Second way - create a jaspar 'pfm' file from the pssm
if option == 'pfm':
#query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'
#params = [self.iteration, rowid]
#pssm = pd.read_sql( query, conn, params=params )
motif_pssm_rows = self.tables['motif_pssm_rows']
pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]
pssm = pssm.drop( ['motif_info_id', 'iteration', 'row'], 1 )
counts = np.round( pssm * mot_sites.shape[0] ).transpose()
counts.to_string(output, header=False, index=False )
output.seek(0)
mot = motifs.read( output, 'pfm' )
## Third way - create a jaspar 'sites' file
elif option == 'sites':
seqs = {}
for i in mot_sites.index.values:
name = mot_sites.ix[i].seq_name
flank_left = mot_sites.ix[i].flank_left
flank_left = Seq(flank_left if flank_left is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
seq = Seq(mot_sites.ix[i].seq, IUPAC.IUPACAmbiguousDNA())
flank_right = mot_sites.ix[i].flank_right
flank_right = Seq(flank_right if flank_right is not None else "", IUPAC.IUPACAmbiguousDNA()).lower()
full_seq = flank_left + seq + flank_right
bs = SeqRecord( full_seq, id=name )
seqs[i] = bs
SeqIO.write(seqs.values(), output, 'fasta')
output.seek(0)
mot = motifs.read( output, 'sites' )
output.close()
## Note Bio.motifs.weblogo() uses the weblogo server (slow? requires connection.)
#kwargs = dict(color_scheme='classic')
#mot.weblogo('file.png', color_scheme='color_classic') ## note, can use format='PDF'
#img = mpimg.imread('file.png')
#imgplot = plt.imshow( img )
#plt.show()
return mot
## This uses weblogolib package to create files directly (installed as weblogo via pip)
## https://code.google.com/p/weblogo/
def plot_motif( self, cluster_num, motif_num, img_format='png' ):
#conn = sql3.connect(self.dbfile)
#cursor = conn.cursor()
#cursor.execute('select max(iteration) from motif_infos')
#iteration = cursor.fetchone()[0]
#query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'
#params = [self.iteration, cluster_num, motif_num]
#cursor.execute(query, params)
#rowid = cursor.fetchone()[0]
#mot_info = pd.read_sql('select * from motif_infos where rowid=?', conn, params=[rowid])
#mot_sites = pd.read_sql('select * from meme_motif_sites where motif_info_id=?', conn, params=[rowid])
#motif_infos = self.tables['motif_infos']
#rowid = motif_infos[(motif_infos.iteration==self.iteration) &
# (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1
rowid = self.__get_motif_id(cluster_num, motif_num)
mot_sites = self.tables['meme_motif_sites'][self.tables['meme_motif_sites'].motif_info_id == rowid]
ldata = wl.LogoData.from_seqs(wl.SeqList(mot_sites.seq.values.tolist(), wl.unambiguous_dna_alphabet))
options = wl.LogoOptions()
options.fineprint = os.path.dirname(self.dbfile) + ' %03d %03d' % ( cluster_num, motif_num )
format = wl.LogoFormat(ldata, options)
format.color_scheme = wl.classic
format.resolution = 150
if img_format == 'png':
tmp = wl.png_formatter( ldata, format )
output = cStringIO.StringIO(tmp)
img = mpimg.imread(output)
plt.axis('off')
imgplot = plt.imshow( img )
#plt.show()
return plt
elif img_format == 'svg':
tmp = wl.svg_formatter( ldata, format )
return tmp
## note then can do e.g. ut.writeLines(svg.split('\n'),'test.svg')
| get_cols | identifier_name |
parse.py | #!/usr/bin/python
from utils.Helper import *
from utils.Classes import *
from utils.Route_Map import *
from utils.Write_Config import *
from utils.Write_Code import *
from utils.ktest_parser import *
def read_file(filename, directory=None, equiv=None):
with open(filename, 'r') as data_file:
data = json.load(data_file)
session_list_dict = {} # maps router (item) to list of sessions with neighbors
route_map_dict = {}
policy_dict = {}
community_list_dict = {}
as_path_list_dict = {}
route_filter_list_dict = {}
prefix_to_intf_dict = {}
router_dict = {}
if directory:
create_directory(directory)
config_dir = directory+'/configs/'
create_directory(config_dir)
config_file = None # init config_file; if directory not set, write_config_wrapper no-op
topology_file = "topology.json" if directory else None
for item in data['nodes']:
if data['nodes'][item]['configurationFormat'].find('CISCO') == -1:
print >> sys.stderr, "Warning:", data['nodes'][item]['name'], "does not use CISCO format"
continue
route_map_dict[item] = {} # dict of Route_Map_Clause name to list of clauses
if directory:
create_directory(config_dir + str(item))
config_filename = config_dir + str(item) + '/Quagga.conf'
config_file = open(config_filename, 'w')
os.chmod(config_filename, 0o775)
# Populate data structures
community_list_dict[item] = get_community_list(data, item)
as_path_list_dict[item] = get_as_path_list(data, item)
route_filter_list_dict[item] = get_route_filter(data, item)
prefix_to_intf_dict[item] = get_interfaces(data, item)
policy_dict[item], bgp_tuple = get_bgp_info(data, item, session_list_dict, \
prefix_to_intf_dict[item])
# NOTE: Modifies route_map_dict and policy_dict
get_route_map_info(data, item, router_dict, route_map_dict[item], \
policy_dict[item], community_list_dict[item], \
as_path_list_dict[item], route_filter_list_dict[item])
# Update policy attrs in session_list_dict; point to correct policy in route_map_dict
update_session_list_dict(item, session_list_dict, route_map_dict, \
policy_dict, prefix_to_intf_dict)
# Update router_dict entries with static routes
get_static_routes(data, item, router_dict[item])
# Functions strictly to write to config in utils/Write_Config.py
if config_file:
write_config_file(config_file, data, item, bgp_tuple, session_list_dict, \
community_list_dict[item], as_path_list_dict[item], \
route_filter_list_dict[item], route_map_dict[item], policy_dict[item])
if equiv:
# Create neighbor mapping with router mapping tuples
maps = router_neighbor_mappings(session_list_dict, equiv)
write_equivalence_check(session_list_dict, directory, maps)
write_code(session_list_dict, directory)
else:
if directory:
write_code(session_list_dict, directory)
topology = create_topology(prefix_to_intf_dict, directory, filename=topology_file)
return session_list_dict, prefix_to_intf_dict, topology
# -----------------------------------------------------------------------------
# FUNCTIONS TO GET DATAMODEL INFORMATION
# -----------------------------------------------------------------------------
def get_route_filter(data, item):
ipAccessList = set()
routeFilterList = set()
route_filter_list_dict = {}
# get ip access-list first to compare with ip prefix-list
if data['nodes'][item].get('ipAccessLists'):
for entry in data['nodes'][item]['ipAccessLists']:
ipAccessList.add(entry)
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
routeFilterList.add(entry)
# ip prefix-list (no seq info)
# Takes care of extended ACLs used to configure BGP
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
# if entry not in ipAccessList and entry.find('~') == -1:
if entry.find('~') == -1:
name = str(data['nodes'][item]['routeFilterLists'][entry]['name'])
# NOTE: Currently keep track of seq according to standards
# Haven't found in batfish datamodel for routeFilterLists
seq = 5
if not route_filter_list_dict.get(name):
route_filter_list_dict[name] = []
for line in data['nodes'][item]['routeFilterLists'][entry]['lines']:
prefix = str(line['prefix'])
# NOTE: Hack for routeFilterLists with 0 mask (lookup ipAccessLists instead)
if prefix.split('/')[1] == '0' and name in data['nodes'][item]['ipAccessLists']:
for ln in data['nodes'][item]['ipAccessLists'][name]['lines']:
# TODO: get information to populate Route_Filter_List
# prefix, lengthRange, seq
permit = True if ln['action'] == 'ACCEPT' else False
# prefix = str(ln['srcIps'][0]) # Compatible with old Batfish
prefix = str(ln['matchCondition']['headerSpace']['srcIps']["ipWildcard"]) # Compatible with new Batfish
# If no mask length, assume full host match
if len(prefix.split('/')) < 2:
prefix += '/32'
rng = prefix.split('/')[1]
rfl = Route_Filter_List(name, permit, prefix, rng, rng, seq)
route_filter_list_dict[name].append(rfl)
seq += 5
break
permit = True if line['action'] == 'ACCEPT' else False
rng = line['lengthRange'].split('-')
rfl = Route_Filter_List(name, permit, prefix, str(rng[0]), str(rng[1]), seq)
route_filter_list_dict[name].append(rfl)
seq += 5
return route_filter_list_dict
def get_community_list(data, item):
# ip community-list expanded
community_list_dict = {}
if data['nodes'][item].get('communityLists'):
for community in data['nodes'][item]['communityLists']:
communityName = str(data['nodes'][item]['communityLists'][community]['name'])
for line in data['nodes'][item]['communityLists'][community]['lines']:
if not community_list_dict.get(communityName):
community_list_dict[communityName] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
cl = Community_List(communityName, permit, regex)
community_list_dict[communityName].append(cl)
return community_list_dict
def get_as_path_list(data, item):
# ip as-path access-list
as_path_list_dict = {}
if data['nodes'][item].get('asPathAccessLists'):
for as_path_list in data['nodes'][item]['asPathAccessLists']:
as_path_list_name = str(data['nodes'][item]['asPathAccessLists'][as_path_list]['name'])
for line in data['nodes'][item]['asPathAccessLists'][as_path_list]['lines']:
if not as_path_list_dict.get(as_path_list_name):
as_path_list_dict[as_path_list_name] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
aspl = AS_Path_List(as_path_list_name, permit, regex)
as_path_list_dict[as_path_list_name].append(aspl)
return as_path_list_dict
# -----------------------------------------------------------------------------
# FUNCTIONS GATHER INFORMATION FOR DATA STRUCTURES
# -----------------------------------------------------------------------------
def get_bgp_info(data, item, session_list_dict, prefix_to_intf_dict):
policy_dict = {} # maps policies seen in vrfs to list of conjuncts of routingPolicies
ReturnBGP = collections.namedtuple('ReturnBGP', ['localAs', 'router_id', 'bgpProcess', \
'as_to_peer_group_dict', 'ngh_to_as_dict', 'ngh_list', 'prefix_to_intf_dict'])
if data['nodes'][item]['vrfs']['default'].get('bgpProcess'):
bgpProcess = data['nodes'][item]['vrfs']['default']['bgpProcess']
localAs = -1 # Initialize default value in case bgpProcess has no neighbors
router_id = bgpProcess['routerId']
ngh_list = list()
if bgpProcess.get('neighbors'):
ngh_list = list(bgpProcess['neighbors'].keys())
localAs = bgpProcess['neighbors'][ngh_list[0]]['localAs']
as_to_peer_group_dict = {} # as number mapped to peer-group name
ngh_to_as_dict = {} # neighbor ip matched to remote-as number (no group)
ngh_policy = {} # contains importPolicy and exportPolicy for each neighbor
for ngh in ngh_list:
group = bgpProcess['neighbors'][ngh].get('group')
remoteAs = bgpProcess['neighbors'][ngh]['remoteAs']
# Either map remote as to peer-group or neighbor IP
if group:
as_to_peer_group_dict[str(remoteAs)] = str(group)
'''
else:
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
'''
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
ngh_policy[ngh] = {}
ngh_policy[ngh]['exportPolicy'] = bgpProcess['neighbors'][ngh].get('exportPolicy')
ngh_policy[ngh]['importPolicy'] = bgpProcess['neighbors'][ngh].get('importPolicy')
policy_dict[ngh_policy[ngh]['exportPolicy']] = []
if ngh_policy[ngh]['importPolicy']:
|
else:
ses = Session(item, ngh, remoteAs, "", ngh_policy[ngh]['exportPolicy'])
if not session_list_dict.get(item):
session_list_dict[item] = []
# session_list_dict[item] = {}
session_list_dict[item].append(ses)
# session_list_dict[item][ngh] = ses
'''
if config_file:
write_bgp(config_file, localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
'''
else:
return policy_dict, None
bgp_tuple = ReturnBGP(localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
return policy_dict, bgp_tuple
def get_interfaces(data, item):
prefix_to_intf = {}
ip = None
for intf in data['nodes'][item]['interfaces']:
intf_obj = data['nodes'][item]['interfaces'][intf]
# Replace original interface name with new encoding
# Take care to have intf_name to be <= 15 characters
# new_intf_name = item + '-' + get_ethernet_port_name(intf)
new_intf_name = get_interface_name(item, intf)
if data['nodes'][item]['interfaces'][intf].get('allPrefixes'):
ip = str(data['nodes'][item]['interfaces'][intf].get('allPrefixes')[0])
# Map prefix to interface name for item dictionary
prefix_to_intf[ip] = new_intf_name
return prefix_to_intf
def get_static_routes(data, item, router):
if data['nodes'][item]['vrfs']['default'].get('staticRoutes'):
for route in data['nodes'][item]['vrfs']['default']['staticRoutes']:
network = str(route['network'])
next_hop = str(route['nextHopIp'])
if next_hop.find('NONE') != -1:
next_hop = None
cost = int(route['administrativeCost'])
router.add_static_route(Route(network, next_hop, cost))
def read_router_file(router_file):
routers = [] # list of equivalent router pairs
if router_file and os.path.exists(router_file):
with open(router_file, 'r') as f:
for line in f:
toks = line.rstrip().split(':')
routers.append({toks[0]:toks[1]})
return routers
if __name__ == '__main__':
parser = argparse.ArgumentParser("Read Batfish Nodes file and directory")
parser.add_argument("file", type=str, help="Batfish output topology JSON file.")
parser.add_argument("dir", type=str, help="Target directory to store configs.")
parser.add_argument("--nodes", type=str, help="Router(s)", nargs='*')
parser.add_argument("--rf", type=str, help="File containing equivalent router pairs")
args = parser.parse_args()
filename = args.file
directory = args.dir
nodes = args.nodes
# Router argument is a list of pairs, where fst is pair of routers and snd is map
# routers = [(("rtr53f3e.cogen", "rtr54f3e.cogen"), {"169.232.13.210/32": "169.232.13.208/32"})]
router_file = args.rf
routers = read_router_file(router_file) # List of dictionary pairs for routers
# Read the topology
session_list_dict, prefix_to_intf_dict, topology = read_file(filename, directory, routers)
'''
# Create neighbor mapping with router mapping tuples
with open('../exabgp_Trial/mapping.txt', 'r') as f:
mapping = json.loads(f.read())
test_announcements, test_map = generate_announcements(session_list_dict, directory, routers=nodes)
'''
| policy_dict[ngh_policy[ngh]['importPolicy']] = []
ses = Session(item, ngh, remoteAs, ngh_policy[ngh]['importPolicy'], \
ngh_policy[ngh]['exportPolicy']) | conditional_block |
parse.py | #!/usr/bin/python
from utils.Helper import *
from utils.Classes import *
from utils.Route_Map import *
from utils.Write_Config import *
from utils.Write_Code import *
from utils.ktest_parser import *
def read_file(filename, directory=None, equiv=None):
with open(filename, 'r') as data_file:
data = json.load(data_file)
session_list_dict = {} # maps router (item) to list of sessions with neighbors
route_map_dict = {}
policy_dict = {}
community_list_dict = {}
as_path_list_dict = {}
route_filter_list_dict = {}
prefix_to_intf_dict = {} |
if directory:
create_directory(directory)
config_dir = directory+'/configs/'
create_directory(config_dir)
config_file = None # init config_file; if directory not set, write_config_wrapper no-op
topology_file = "topology.json" if directory else None
for item in data['nodes']:
if data['nodes'][item]['configurationFormat'].find('CISCO') == -1:
print >> sys.stderr, "Warning:", data['nodes'][item]['name'], "does not use CISCO format"
continue
route_map_dict[item] = {} # dict of Route_Map_Clause name to list of clauses
if directory:
create_directory(config_dir + str(item))
config_filename = config_dir + str(item) + '/Quagga.conf'
config_file = open(config_filename, 'w')
os.chmod(config_filename, 0o775)
# Populate data structures
community_list_dict[item] = get_community_list(data, item)
as_path_list_dict[item] = get_as_path_list(data, item)
route_filter_list_dict[item] = get_route_filter(data, item)
prefix_to_intf_dict[item] = get_interfaces(data, item)
policy_dict[item], bgp_tuple = get_bgp_info(data, item, session_list_dict, \
prefix_to_intf_dict[item])
# NOTE: Modifies route_map_dict and policy_dict
get_route_map_info(data, item, router_dict, route_map_dict[item], \
policy_dict[item], community_list_dict[item], \
as_path_list_dict[item], route_filter_list_dict[item])
# Update policy attrs in session_list_dict; point to correct policy in route_map_dict
update_session_list_dict(item, session_list_dict, route_map_dict, \
policy_dict, prefix_to_intf_dict)
# Update router_dict entries with static routes
get_static_routes(data, item, router_dict[item])
# Functions strictly to write to config in utils/Write_Config.py
if config_file:
write_config_file(config_file, data, item, bgp_tuple, session_list_dict, \
community_list_dict[item], as_path_list_dict[item], \
route_filter_list_dict[item], route_map_dict[item], policy_dict[item])
if equiv:
# Create neighbor mapping with router mapping tuples
maps = router_neighbor_mappings(session_list_dict, equiv)
write_equivalence_check(session_list_dict, directory, maps)
write_code(session_list_dict, directory)
else:
if directory:
write_code(session_list_dict, directory)
topology = create_topology(prefix_to_intf_dict, directory, filename=topology_file)
return session_list_dict, prefix_to_intf_dict, topology
# -----------------------------------------------------------------------------
# FUNCTIONS TO GET DATAMODEL INFORMATION
# -----------------------------------------------------------------------------
def get_route_filter(data, item):
ipAccessList = set()
routeFilterList = set()
route_filter_list_dict = {}
# get ip access-list first to compare with ip prefix-list
if data['nodes'][item].get('ipAccessLists'):
for entry in data['nodes'][item]['ipAccessLists']:
ipAccessList.add(entry)
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
routeFilterList.add(entry)
# ip prefix-list (no seq info)
# Takes care of extended ACLs used to configure BGP
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
# if entry not in ipAccessList and entry.find('~') == -1:
if entry.find('~') == -1:
name = str(data['nodes'][item]['routeFilterLists'][entry]['name'])
# NOTE: Currently keep track of seq according to standards
# Haven't found in batfish datamodel for routeFilterLists
seq = 5
if not route_filter_list_dict.get(name):
route_filter_list_dict[name] = []
for line in data['nodes'][item]['routeFilterLists'][entry]['lines']:
prefix = str(line['prefix'])
# NOTE: Hack for routeFilterLists with 0 mask (lookup ipAccessLists instead)
if prefix.split('/')[1] == '0' and name in data['nodes'][item]['ipAccessLists']:
for ln in data['nodes'][item]['ipAccessLists'][name]['lines']:
# TODO: get information to populate Route_Filter_List
# prefix, lengthRange, seq
permit = True if ln['action'] == 'ACCEPT' else False
# prefix = str(ln['srcIps'][0]) # Compatible with old Batfish
prefix = str(ln['matchCondition']['headerSpace']['srcIps']["ipWildcard"]) # Compatible with new Batfish
# If no mask length, assume full host match
if len(prefix.split('/')) < 2:
prefix += '/32'
rng = prefix.split('/')[1]
rfl = Route_Filter_List(name, permit, prefix, rng, rng, seq)
route_filter_list_dict[name].append(rfl)
seq += 5
break
permit = True if line['action'] == 'ACCEPT' else False
rng = line['lengthRange'].split('-')
rfl = Route_Filter_List(name, permit, prefix, str(rng[0]), str(rng[1]), seq)
route_filter_list_dict[name].append(rfl)
seq += 5
return route_filter_list_dict
def get_community_list(data, item):
# ip community-list expanded
community_list_dict = {}
if data['nodes'][item].get('communityLists'):
for community in data['nodes'][item]['communityLists']:
communityName = str(data['nodes'][item]['communityLists'][community]['name'])
for line in data['nodes'][item]['communityLists'][community]['lines']:
if not community_list_dict.get(communityName):
community_list_dict[communityName] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
cl = Community_List(communityName, permit, regex)
community_list_dict[communityName].append(cl)
return community_list_dict
def get_as_path_list(data, item):
# ip as-path access-list
as_path_list_dict = {}
if data['nodes'][item].get('asPathAccessLists'):
for as_path_list in data['nodes'][item]['asPathAccessLists']:
as_path_list_name = str(data['nodes'][item]['asPathAccessLists'][as_path_list]['name'])
for line in data['nodes'][item]['asPathAccessLists'][as_path_list]['lines']:
if not as_path_list_dict.get(as_path_list_name):
as_path_list_dict[as_path_list_name] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
aspl = AS_Path_List(as_path_list_name, permit, regex)
as_path_list_dict[as_path_list_name].append(aspl)
return as_path_list_dict
# -----------------------------------------------------------------------------
# FUNCTIONS GATHER INFORMATION FOR DATA STRUCTURES
# -----------------------------------------------------------------------------
def get_bgp_info(data, item, session_list_dict, prefix_to_intf_dict):
policy_dict = {} # maps policies seen in vrfs to list of conjuncts of routingPolicies
ReturnBGP = collections.namedtuple('ReturnBGP', ['localAs', 'router_id', 'bgpProcess', \
'as_to_peer_group_dict', 'ngh_to_as_dict', 'ngh_list', 'prefix_to_intf_dict'])
if data['nodes'][item]['vrfs']['default'].get('bgpProcess'):
bgpProcess = data['nodes'][item]['vrfs']['default']['bgpProcess']
localAs = -1 # Initialize default value in case bgpProcess has no neighbors
router_id = bgpProcess['routerId']
ngh_list = list()
if bgpProcess.get('neighbors'):
ngh_list = list(bgpProcess['neighbors'].keys())
localAs = bgpProcess['neighbors'][ngh_list[0]]['localAs']
as_to_peer_group_dict = {} # as number mapped to peer-group name
ngh_to_as_dict = {} # neighbor ip matched to remote-as number (no group)
ngh_policy = {} # contains importPolicy and exportPolicy for each neighbor
for ngh in ngh_list:
group = bgpProcess['neighbors'][ngh].get('group')
remoteAs = bgpProcess['neighbors'][ngh]['remoteAs']
# Either map remote as to peer-group or neighbor IP
if group:
as_to_peer_group_dict[str(remoteAs)] = str(group)
'''
else:
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
'''
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
ngh_policy[ngh] = {}
ngh_policy[ngh]['exportPolicy'] = bgpProcess['neighbors'][ngh].get('exportPolicy')
ngh_policy[ngh]['importPolicy'] = bgpProcess['neighbors'][ngh].get('importPolicy')
policy_dict[ngh_policy[ngh]['exportPolicy']] = []
if ngh_policy[ngh]['importPolicy']:
policy_dict[ngh_policy[ngh]['importPolicy']] = []
ses = Session(item, ngh, remoteAs, ngh_policy[ngh]['importPolicy'], \
ngh_policy[ngh]['exportPolicy'])
else:
ses = Session(item, ngh, remoteAs, "", ngh_policy[ngh]['exportPolicy'])
if not session_list_dict.get(item):
session_list_dict[item] = []
# session_list_dict[item] = {}
session_list_dict[item].append(ses)
# session_list_dict[item][ngh] = ses
'''
if config_file:
write_bgp(config_file, localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
'''
else:
return policy_dict, None
bgp_tuple = ReturnBGP(localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
return policy_dict, bgp_tuple
def get_interfaces(data, item):
prefix_to_intf = {}
ip = None
for intf in data['nodes'][item]['interfaces']:
intf_obj = data['nodes'][item]['interfaces'][intf]
# Replace original interface name with new encoding
# Take care to have intf_name to be <= 15 characters
# new_intf_name = item + '-' + get_ethernet_port_name(intf)
new_intf_name = get_interface_name(item, intf)
if data['nodes'][item]['interfaces'][intf].get('allPrefixes'):
ip = str(data['nodes'][item]['interfaces'][intf].get('allPrefixes')[0])
# Map prefix to interface name for item dictionary
prefix_to_intf[ip] = new_intf_name
return prefix_to_intf
def get_static_routes(data, item, router):
if data['nodes'][item]['vrfs']['default'].get('staticRoutes'):
for route in data['nodes'][item]['vrfs']['default']['staticRoutes']:
network = str(route['network'])
next_hop = str(route['nextHopIp'])
if next_hop.find('NONE') != -1:
next_hop = None
cost = int(route['administrativeCost'])
router.add_static_route(Route(network, next_hop, cost))
def read_router_file(router_file):
routers = [] # list of equivalent router pairs
if router_file and os.path.exists(router_file):
with open(router_file, 'r') as f:
for line in f:
toks = line.rstrip().split(':')
routers.append({toks[0]:toks[1]})
return routers
if __name__ == '__main__':
parser = argparse.ArgumentParser("Read Batfish Nodes file and directory")
parser.add_argument("file", type=str, help="Batfish output topology JSON file.")
parser.add_argument("dir", type=str, help="Target directory to store configs.")
parser.add_argument("--nodes", type=str, help="Router(s)", nargs='*')
parser.add_argument("--rf", type=str, help="File containing equivalent router pairs")
args = parser.parse_args()
filename = args.file
directory = args.dir
nodes = args.nodes
# Router argument is a list of pairs, where fst is pair of routers and snd is map
# routers = [(("rtr53f3e.cogen", "rtr54f3e.cogen"), {"169.232.13.210/32": "169.232.13.208/32"})]
router_file = args.rf
routers = read_router_file(router_file) # List of dictionary pairs for routers
# Read the topology
session_list_dict, prefix_to_intf_dict, topology = read_file(filename, directory, routers)
'''
# Create neighbor mapping with router mapping tuples
with open('../exabgp_Trial/mapping.txt', 'r') as f:
mapping = json.loads(f.read())
test_announcements, test_map = generate_announcements(session_list_dict, directory, routers=nodes)
''' | router_dict = {} | random_line_split |
parse.py | #!/usr/bin/python
from utils.Helper import *
from utils.Classes import *
from utils.Route_Map import *
from utils.Write_Config import *
from utils.Write_Code import *
from utils.ktest_parser import *
def read_file(filename, directory=None, equiv=None):
with open(filename, 'r') as data_file:
data = json.load(data_file)
session_list_dict = {} # maps router (item) to list of sessions with neighbors
route_map_dict = {}
policy_dict = {}
community_list_dict = {}
as_path_list_dict = {}
route_filter_list_dict = {}
prefix_to_intf_dict = {}
router_dict = {}
if directory:
create_directory(directory)
config_dir = directory+'/configs/'
create_directory(config_dir)
config_file = None # init config_file; if directory not set, write_config_wrapper no-op
topology_file = "topology.json" if directory else None
for item in data['nodes']:
if data['nodes'][item]['configurationFormat'].find('CISCO') == -1:
print >> sys.stderr, "Warning:", data['nodes'][item]['name'], "does not use CISCO format"
continue
route_map_dict[item] = {} # dict of Route_Map_Clause name to list of clauses
if directory:
create_directory(config_dir + str(item))
config_filename = config_dir + str(item) + '/Quagga.conf'
config_file = open(config_filename, 'w')
os.chmod(config_filename, 0o775)
# Populate data structures
community_list_dict[item] = get_community_list(data, item)
as_path_list_dict[item] = get_as_path_list(data, item)
route_filter_list_dict[item] = get_route_filter(data, item)
prefix_to_intf_dict[item] = get_interfaces(data, item)
policy_dict[item], bgp_tuple = get_bgp_info(data, item, session_list_dict, \
prefix_to_intf_dict[item])
# NOTE: Modifies route_map_dict and policy_dict
get_route_map_info(data, item, router_dict, route_map_dict[item], \
policy_dict[item], community_list_dict[item], \
as_path_list_dict[item], route_filter_list_dict[item])
# Update policy attrs in session_list_dict; point to correct policy in route_map_dict
update_session_list_dict(item, session_list_dict, route_map_dict, \
policy_dict, prefix_to_intf_dict)
# Update router_dict entries with static routes
get_static_routes(data, item, router_dict[item])
# Functions strictly to write to config in utils/Write_Config.py
if config_file:
write_config_file(config_file, data, item, bgp_tuple, session_list_dict, \
community_list_dict[item], as_path_list_dict[item], \
route_filter_list_dict[item], route_map_dict[item], policy_dict[item])
if equiv:
# Create neighbor mapping with router mapping tuples
maps = router_neighbor_mappings(session_list_dict, equiv)
write_equivalence_check(session_list_dict, directory, maps)
write_code(session_list_dict, directory)
else:
if directory:
write_code(session_list_dict, directory)
topology = create_topology(prefix_to_intf_dict, directory, filename=topology_file)
return session_list_dict, prefix_to_intf_dict, topology
# -----------------------------------------------------------------------------
# FUNCTIONS TO GET DATAMODEL INFORMATION
# -----------------------------------------------------------------------------
def get_route_filter(data, item):
ipAccessList = set()
routeFilterList = set()
route_filter_list_dict = {}
# get ip access-list first to compare with ip prefix-list
if data['nodes'][item].get('ipAccessLists'):
for entry in data['nodes'][item]['ipAccessLists']:
ipAccessList.add(entry)
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
routeFilterList.add(entry)
# ip prefix-list (no seq info)
# Takes care of extended ACLs used to configure BGP
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
# if entry not in ipAccessList and entry.find('~') == -1:
if entry.find('~') == -1:
name = str(data['nodes'][item]['routeFilterLists'][entry]['name'])
# NOTE: Currently keep track of seq according to standards
# Haven't found in batfish datamodel for routeFilterLists
seq = 5
if not route_filter_list_dict.get(name):
route_filter_list_dict[name] = []
for line in data['nodes'][item]['routeFilterLists'][entry]['lines']:
prefix = str(line['prefix'])
# NOTE: Hack for routeFilterLists with 0 mask (lookup ipAccessLists instead)
if prefix.split('/')[1] == '0' and name in data['nodes'][item]['ipAccessLists']:
for ln in data['nodes'][item]['ipAccessLists'][name]['lines']:
# TODO: get information to populate Route_Filter_List
# prefix, lengthRange, seq
permit = True if ln['action'] == 'ACCEPT' else False
# prefix = str(ln['srcIps'][0]) # Compatible with old Batfish
prefix = str(ln['matchCondition']['headerSpace']['srcIps']["ipWildcard"]) # Compatible with new Batfish
# If no mask length, assume full host match
if len(prefix.split('/')) < 2:
prefix += '/32'
rng = prefix.split('/')[1]
rfl = Route_Filter_List(name, permit, prefix, rng, rng, seq)
route_filter_list_dict[name].append(rfl)
seq += 5
break
permit = True if line['action'] == 'ACCEPT' else False
rng = line['lengthRange'].split('-')
rfl = Route_Filter_List(name, permit, prefix, str(rng[0]), str(rng[1]), seq)
route_filter_list_dict[name].append(rfl)
seq += 5
return route_filter_list_dict
def get_community_list(data, item):
# ip community-list expanded
|
def get_as_path_list(data, item):
# ip as-path access-list
as_path_list_dict = {}
if data['nodes'][item].get('asPathAccessLists'):
for as_path_list in data['nodes'][item]['asPathAccessLists']:
as_path_list_name = str(data['nodes'][item]['asPathAccessLists'][as_path_list]['name'])
for line in data['nodes'][item]['asPathAccessLists'][as_path_list]['lines']:
if not as_path_list_dict.get(as_path_list_name):
as_path_list_dict[as_path_list_name] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
aspl = AS_Path_List(as_path_list_name, permit, regex)
as_path_list_dict[as_path_list_name].append(aspl)
return as_path_list_dict
# -----------------------------------------------------------------------------
# FUNCTIONS GATHER INFORMATION FOR DATA STRUCTURES
# -----------------------------------------------------------------------------
def get_bgp_info(data, item, session_list_dict, prefix_to_intf_dict):
policy_dict = {} # maps policies seen in vrfs to list of conjuncts of routingPolicies
ReturnBGP = collections.namedtuple('ReturnBGP', ['localAs', 'router_id', 'bgpProcess', \
'as_to_peer_group_dict', 'ngh_to_as_dict', 'ngh_list', 'prefix_to_intf_dict'])
if data['nodes'][item]['vrfs']['default'].get('bgpProcess'):
bgpProcess = data['nodes'][item]['vrfs']['default']['bgpProcess']
localAs = -1 # Initialize default value in case bgpProcess has no neighbors
router_id = bgpProcess['routerId']
ngh_list = list()
if bgpProcess.get('neighbors'):
ngh_list = list(bgpProcess['neighbors'].keys())
localAs = bgpProcess['neighbors'][ngh_list[0]]['localAs']
as_to_peer_group_dict = {} # as number mapped to peer-group name
ngh_to_as_dict = {} # neighbor ip matched to remote-as number (no group)
ngh_policy = {} # contains importPolicy and exportPolicy for each neighbor
for ngh in ngh_list:
group = bgpProcess['neighbors'][ngh].get('group')
remoteAs = bgpProcess['neighbors'][ngh]['remoteAs']
# Either map remote as to peer-group or neighbor IP
if group:
as_to_peer_group_dict[str(remoteAs)] = str(group)
'''
else:
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
'''
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
ngh_policy[ngh] = {}
ngh_policy[ngh]['exportPolicy'] = bgpProcess['neighbors'][ngh].get('exportPolicy')
ngh_policy[ngh]['importPolicy'] = bgpProcess['neighbors'][ngh].get('importPolicy')
policy_dict[ngh_policy[ngh]['exportPolicy']] = []
if ngh_policy[ngh]['importPolicy']:
policy_dict[ngh_policy[ngh]['importPolicy']] = []
ses = Session(item, ngh, remoteAs, ngh_policy[ngh]['importPolicy'], \
ngh_policy[ngh]['exportPolicy'])
else:
ses = Session(item, ngh, remoteAs, "", ngh_policy[ngh]['exportPolicy'])
if not session_list_dict.get(item):
session_list_dict[item] = []
# session_list_dict[item] = {}
session_list_dict[item].append(ses)
# session_list_dict[item][ngh] = ses
'''
if config_file:
write_bgp(config_file, localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
'''
else:
return policy_dict, None
bgp_tuple = ReturnBGP(localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
return policy_dict, bgp_tuple
def get_interfaces(data, item):
prefix_to_intf = {}
ip = None
for intf in data['nodes'][item]['interfaces']:
intf_obj = data['nodes'][item]['interfaces'][intf]
# Replace original interface name with new encoding
# Take care to have intf_name to be <= 15 characters
# new_intf_name = item + '-' + get_ethernet_port_name(intf)
new_intf_name = get_interface_name(item, intf)
if data['nodes'][item]['interfaces'][intf].get('allPrefixes'):
ip = str(data['nodes'][item]['interfaces'][intf].get('allPrefixes')[0])
# Map prefix to interface name for item dictionary
prefix_to_intf[ip] = new_intf_name
return prefix_to_intf
def get_static_routes(data, item, router):
if data['nodes'][item]['vrfs']['default'].get('staticRoutes'):
for route in data['nodes'][item]['vrfs']['default']['staticRoutes']:
network = str(route['network'])
next_hop = str(route['nextHopIp'])
if next_hop.find('NONE') != -1:
next_hop = None
cost = int(route['administrativeCost'])
router.add_static_route(Route(network, next_hop, cost))
def read_router_file(router_file):
routers = [] # list of equivalent router pairs
if router_file and os.path.exists(router_file):
with open(router_file, 'r') as f:
for line in f:
toks = line.rstrip().split(':')
routers.append({toks[0]:toks[1]})
return routers
if __name__ == '__main__':
parser = argparse.ArgumentParser("Read Batfish Nodes file and directory")
parser.add_argument("file", type=str, help="Batfish output topology JSON file.")
parser.add_argument("dir", type=str, help="Target directory to store configs.")
parser.add_argument("--nodes", type=str, help="Router(s)", nargs='*')
parser.add_argument("--rf", type=str, help="File containing equivalent router pairs")
args = parser.parse_args()
filename = args.file
directory = args.dir
nodes = args.nodes
# Router argument is a list of pairs, where fst is pair of routers and snd is map
# routers = [(("rtr53f3e.cogen", "rtr54f3e.cogen"), {"169.232.13.210/32": "169.232.13.208/32"})]
router_file = args.rf
routers = read_router_file(router_file) # List of dictionary pairs for routers
# Read the topology
session_list_dict, prefix_to_intf_dict, topology = read_file(filename, directory, routers)
'''
# Create neighbor mapping with router mapping tuples
with open('../exabgp_Trial/mapping.txt', 'r') as f:
mapping = json.loads(f.read())
test_announcements, test_map = generate_announcements(session_list_dict, directory, routers=nodes)
'''
| community_list_dict = {}
if data['nodes'][item].get('communityLists'):
for community in data['nodes'][item]['communityLists']:
communityName = str(data['nodes'][item]['communityLists'][community]['name'])
for line in data['nodes'][item]['communityLists'][community]['lines']:
if not community_list_dict.get(communityName):
community_list_dict[communityName] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
cl = Community_List(communityName, permit, regex)
community_list_dict[communityName].append(cl)
return community_list_dict | identifier_body |
parse.py | #!/usr/bin/python
from utils.Helper import *
from utils.Classes import *
from utils.Route_Map import *
from utils.Write_Config import *
from utils.Write_Code import *
from utils.ktest_parser import *
def read_file(filename, directory=None, equiv=None):
with open(filename, 'r') as data_file:
data = json.load(data_file)
session_list_dict = {} # maps router (item) to list of sessions with neighbors
route_map_dict = {}
policy_dict = {}
community_list_dict = {}
as_path_list_dict = {}
route_filter_list_dict = {}
prefix_to_intf_dict = {}
router_dict = {}
if directory:
create_directory(directory)
config_dir = directory+'/configs/'
create_directory(config_dir)
config_file = None # init config_file; if directory not set, write_config_wrapper no-op
topology_file = "topology.json" if directory else None
for item in data['nodes']:
if data['nodes'][item]['configurationFormat'].find('CISCO') == -1:
print >> sys.stderr, "Warning:", data['nodes'][item]['name'], "does not use CISCO format"
continue
route_map_dict[item] = {} # dict of Route_Map_Clause name to list of clauses
if directory:
create_directory(config_dir + str(item))
config_filename = config_dir + str(item) + '/Quagga.conf'
config_file = open(config_filename, 'w')
os.chmod(config_filename, 0o775)
# Populate data structures
community_list_dict[item] = get_community_list(data, item)
as_path_list_dict[item] = get_as_path_list(data, item)
route_filter_list_dict[item] = get_route_filter(data, item)
prefix_to_intf_dict[item] = get_interfaces(data, item)
policy_dict[item], bgp_tuple = get_bgp_info(data, item, session_list_dict, \
prefix_to_intf_dict[item])
# NOTE: Modifies route_map_dict and policy_dict
get_route_map_info(data, item, router_dict, route_map_dict[item], \
policy_dict[item], community_list_dict[item], \
as_path_list_dict[item], route_filter_list_dict[item])
# Update policy attrs in session_list_dict; point to correct policy in route_map_dict
update_session_list_dict(item, session_list_dict, route_map_dict, \
policy_dict, prefix_to_intf_dict)
# Update router_dict entries with static routes
get_static_routes(data, item, router_dict[item])
# Functions strictly to write to config in utils/Write_Config.py
if config_file:
write_config_file(config_file, data, item, bgp_tuple, session_list_dict, \
community_list_dict[item], as_path_list_dict[item], \
route_filter_list_dict[item], route_map_dict[item], policy_dict[item])
if equiv:
# Create neighbor mapping with router mapping tuples
maps = router_neighbor_mappings(session_list_dict, equiv)
write_equivalence_check(session_list_dict, directory, maps)
write_code(session_list_dict, directory)
else:
if directory:
write_code(session_list_dict, directory)
topology = create_topology(prefix_to_intf_dict, directory, filename=topology_file)
return session_list_dict, prefix_to_intf_dict, topology
# -----------------------------------------------------------------------------
# FUNCTIONS TO GET DATAMODEL INFORMATION
# -----------------------------------------------------------------------------
def get_route_filter(data, item):
ipAccessList = set()
routeFilterList = set()
route_filter_list_dict = {}
# get ip access-list first to compare with ip prefix-list
if data['nodes'][item].get('ipAccessLists'):
for entry in data['nodes'][item]['ipAccessLists']:
ipAccessList.add(entry)
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
routeFilterList.add(entry)
# ip prefix-list (no seq info)
# Takes care of extended ACLs used to configure BGP
if data['nodes'][item].get('routeFilterLists'):
for entry in data['nodes'][item]['routeFilterLists']:
# if entry not in ipAccessList and entry.find('~') == -1:
if entry.find('~') == -1:
name = str(data['nodes'][item]['routeFilterLists'][entry]['name'])
# NOTE: Currently keep track of seq according to standards
# Haven't found in batfish datamodel for routeFilterLists
seq = 5
if not route_filter_list_dict.get(name):
route_filter_list_dict[name] = []
for line in data['nodes'][item]['routeFilterLists'][entry]['lines']:
prefix = str(line['prefix'])
# NOTE: Hack for routeFilterLists with 0 mask (lookup ipAccessLists instead)
if prefix.split('/')[1] == '0' and name in data['nodes'][item]['ipAccessLists']:
for ln in data['nodes'][item]['ipAccessLists'][name]['lines']:
# TODO: get information to populate Route_Filter_List
# prefix, lengthRange, seq
permit = True if ln['action'] == 'ACCEPT' else False
# prefix = str(ln['srcIps'][0]) # Compatible with old Batfish
prefix = str(ln['matchCondition']['headerSpace']['srcIps']["ipWildcard"]) # Compatible with new Batfish
# If no mask length, assume full host match
if len(prefix.split('/')) < 2:
prefix += '/32'
rng = prefix.split('/')[1]
rfl = Route_Filter_List(name, permit, prefix, rng, rng, seq)
route_filter_list_dict[name].append(rfl)
seq += 5
break
permit = True if line['action'] == 'ACCEPT' else False
rng = line['lengthRange'].split('-')
rfl = Route_Filter_List(name, permit, prefix, str(rng[0]), str(rng[1]), seq)
route_filter_list_dict[name].append(rfl)
seq += 5
return route_filter_list_dict
def get_community_list(data, item):
# ip community-list expanded
community_list_dict = {}
if data['nodes'][item].get('communityLists'):
for community in data['nodes'][item]['communityLists']:
communityName = str(data['nodes'][item]['communityLists'][community]['name'])
for line in data['nodes'][item]['communityLists'][community]['lines']:
if not community_list_dict.get(communityName):
community_list_dict[communityName] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
cl = Community_List(communityName, permit, regex)
community_list_dict[communityName].append(cl)
return community_list_dict
def get_as_path_list(data, item):
# ip as-path access-list
as_path_list_dict = {}
if data['nodes'][item].get('asPathAccessLists'):
for as_path_list in data['nodes'][item]['asPathAccessLists']:
as_path_list_name = str(data['nodes'][item]['asPathAccessLists'][as_path_list]['name'])
for line in data['nodes'][item]['asPathAccessLists'][as_path_list]['lines']:
if not as_path_list_dict.get(as_path_list_name):
as_path_list_dict[as_path_list_name] = []
regex = str(line['regex'])
permit = True if line['action'] == 'ACCEPT' else False
aspl = AS_Path_List(as_path_list_name, permit, regex)
as_path_list_dict[as_path_list_name].append(aspl)
return as_path_list_dict
# -----------------------------------------------------------------------------
# FUNCTIONS GATHER INFORMATION FOR DATA STRUCTURES
# -----------------------------------------------------------------------------
def | (data, item, session_list_dict, prefix_to_intf_dict):
policy_dict = {} # maps policies seen in vrfs to list of conjuncts of routingPolicies
ReturnBGP = collections.namedtuple('ReturnBGP', ['localAs', 'router_id', 'bgpProcess', \
'as_to_peer_group_dict', 'ngh_to_as_dict', 'ngh_list', 'prefix_to_intf_dict'])
if data['nodes'][item]['vrfs']['default'].get('bgpProcess'):
bgpProcess = data['nodes'][item]['vrfs']['default']['bgpProcess']
localAs = -1 # Initialize default value in case bgpProcess has no neighbors
router_id = bgpProcess['routerId']
ngh_list = list()
if bgpProcess.get('neighbors'):
ngh_list = list(bgpProcess['neighbors'].keys())
localAs = bgpProcess['neighbors'][ngh_list[0]]['localAs']
as_to_peer_group_dict = {} # as number mapped to peer-group name
ngh_to_as_dict = {} # neighbor ip matched to remote-as number (no group)
ngh_policy = {} # contains importPolicy and exportPolicy for each neighbor
for ngh in ngh_list:
group = bgpProcess['neighbors'][ngh].get('group')
remoteAs = bgpProcess['neighbors'][ngh]['remoteAs']
# Either map remote as to peer-group or neighbor IP
if group:
as_to_peer_group_dict[str(remoteAs)] = str(group)
'''
else:
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
'''
ngh_to_as_dict[ngh.split('/')[0]] = str(remoteAs)
ngh_policy[ngh] = {}
ngh_policy[ngh]['exportPolicy'] = bgpProcess['neighbors'][ngh].get('exportPolicy')
ngh_policy[ngh]['importPolicy'] = bgpProcess['neighbors'][ngh].get('importPolicy')
policy_dict[ngh_policy[ngh]['exportPolicy']] = []
if ngh_policy[ngh]['importPolicy']:
policy_dict[ngh_policy[ngh]['importPolicy']] = []
ses = Session(item, ngh, remoteAs, ngh_policy[ngh]['importPolicy'], \
ngh_policy[ngh]['exportPolicy'])
else:
ses = Session(item, ngh, remoteAs, "", ngh_policy[ngh]['exportPolicy'])
if not session_list_dict.get(item):
session_list_dict[item] = []
# session_list_dict[item] = {}
session_list_dict[item].append(ses)
# session_list_dict[item][ngh] = ses
'''
if config_file:
write_bgp(config_file, localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
'''
else:
return policy_dict, None
bgp_tuple = ReturnBGP(localAs, router_id, bgpProcess, as_to_peer_group_dict, \
ngh_to_as_dict, ngh_list, prefix_to_intf_dict)
return policy_dict, bgp_tuple
def get_interfaces(data, item):
prefix_to_intf = {}
ip = None
for intf in data['nodes'][item]['interfaces']:
intf_obj = data['nodes'][item]['interfaces'][intf]
# Replace original interface name with new encoding
# Take care to have intf_name to be <= 15 characters
# new_intf_name = item + '-' + get_ethernet_port_name(intf)
new_intf_name = get_interface_name(item, intf)
if data['nodes'][item]['interfaces'][intf].get('allPrefixes'):
ip = str(data['nodes'][item]['interfaces'][intf].get('allPrefixes')[0])
# Map prefix to interface name for item dictionary
prefix_to_intf[ip] = new_intf_name
return prefix_to_intf
def get_static_routes(data, item, router):
if data['nodes'][item]['vrfs']['default'].get('staticRoutes'):
for route in data['nodes'][item]['vrfs']['default']['staticRoutes']:
network = str(route['network'])
next_hop = str(route['nextHopIp'])
if next_hop.find('NONE') != -1:
next_hop = None
cost = int(route['administrativeCost'])
router.add_static_route(Route(network, next_hop, cost))
def read_router_file(router_file):
routers = [] # list of equivalent router pairs
if router_file and os.path.exists(router_file):
with open(router_file, 'r') as f:
for line in f:
toks = line.rstrip().split(':')
routers.append({toks[0]:toks[1]})
return routers
if __name__ == '__main__':
parser = argparse.ArgumentParser("Read Batfish Nodes file and directory")
parser.add_argument("file", type=str, help="Batfish output topology JSON file.")
parser.add_argument("dir", type=str, help="Target directory to store configs.")
parser.add_argument("--nodes", type=str, help="Router(s)", nargs='*')
parser.add_argument("--rf", type=str, help="File containing equivalent router pairs")
args = parser.parse_args()
filename = args.file
directory = args.dir
nodes = args.nodes
# Router argument is a list of pairs, where fst is pair of routers and snd is map
# routers = [(("rtr53f3e.cogen", "rtr54f3e.cogen"), {"169.232.13.210/32": "169.232.13.208/32"})]
router_file = args.rf
routers = read_router_file(router_file) # List of dictionary pairs for routers
# Read the topology
session_list_dict, prefix_to_intf_dict, topology = read_file(filename, directory, routers)
'''
# Create neighbor mapping with router mapping tuples
with open('../exabgp_Trial/mapping.txt', 'r') as f:
mapping = json.loads(f.read())
test_announcements, test_map = generate_announcements(session_list_dict, directory, routers=nodes)
'''
| get_bgp_info | identifier_name |
cli.rs | use std::process;
use std::str::FromStr;
use std::string::ToString;
use console::Term;
use structopt::StructOpt;
use crate::bat::assets::HighlightingAssets;
use crate::bat::output::PagingMode;
use crate::config;
use crate::style;
#[derive(StructOpt, Clone, Debug)]
#[structopt(
name = "delta",
about = "A syntax-highlighter for git and diff output",
after_help = "\
Colors
------
All delta color options work the same way. There are two ways to specify a color:
1. RGB hex code
An example of passing an RGB hex code is:
--file-color=\"#0e7c0e\"
2. ANSI color name
There are 8 ANSI color names:
black, red, green, yellow, blue, magenta, cyan, white.
In addition, all of them have a bright form:
bright-black, bright-red, bright-green, bright-yellow, bright-blue, bright-magenta, bright-cyan, bright-white
An example is:
--file-color=\"green\"
Unlike RGB hex codes, ANSI color names are just names: you can choose the exact color that each
name corresponds to in the settings of your terminal application (the application you use to run
command line programs). This means that if you use ANSI color names, and you change the color
theme used by your terminal, then delta's colors will respond automatically, without needing to
change the delta command line.
\"purple\" is accepted as a synonym for \"magenta\". Color names and codes are case-insensitive.
"
)]
pub struct Opt {
/// Use default colors appropriate for a light terminal background. For more control, see the other
/// color options.
#[structopt(long = "light")]
pub light: bool,
/// Use default colors appropriate for a dark terminal background. For more control, see the
/// other color options.
#[structopt(long = "dark")]
pub dark: bool,
#[structopt(long = "minus-color")]
/// The background color to use for removed lines.
pub minus_color: Option<String>,
#[structopt(long = "minus-emph-color")]
/// The background color to use for emphasized sections of removed lines.
pub minus_emph_color: Option<String>,
#[structopt(long = "plus-color")]
/// The background color to use for added lines.
pub plus_color: Option<String>,
#[structopt(long = "plus-emph-color")]
/// The background color to use for emphasized sections of added lines.
pub plus_emph_color: Option<String>,
#[structopt(long = "theme", env = "BAT_THEME")]
/// The code syntax highlighting theme to use. Use --theme=none to disable syntax highlighting.
/// If the theme is not set using this option, it will be taken from the BAT_THEME environment
/// variable, if that contains a valid theme name. Use --list-themes and --compare-themes to
/// view available themes. Note that the choice of theme only affects code syntax highlighting.
/// See --commit-color, --file-color, --hunk-color to configure the colors of other parts of
/// the diff output.
pub theme: Option<String>,
#[structopt(long = "highlight-removed")]
/// Apply syntax highlighting to removed lines. The default is to
/// apply syntax highlighting to unchanged and new lines only.
pub highlight_removed: bool,
#[structopt(long = "commit-style", default_value = "plain")]
/// Formatting style for the commit section of git output. Options
/// are: plain, box.
pub commit_style: SectionStyle,
#[structopt(long = "commit-color", default_value = "yellow")]
/// Color for the commit section of git output.
pub commit_color: String,
#[structopt(long = "file-style", default_value = "underline")]
/// Formatting style for the file section of git output. Options
/// are: plain, box, underline.
pub file_style: SectionStyle,
#[structopt(long = "file-color", default_value = "blue")]
/// Color for the file section of git output.
pub file_color: String,
#[structopt(long = "hunk-style", default_value = "box")]
/// Formatting style for the hunk-marker section of git output. Options
/// are: plain, box.
pub hunk_style: SectionStyle,
#[structopt(long = "hunk-color", default_value = "blue")]
/// Color for the hunk-marker section of git output.
pub hunk_color: String,
/// The width (in characters) of the background color
/// highlighting. By default, the width is the current terminal
/// width. Use --width=variable to apply background colors to the
/// end of each line, without right padding to equal width.
#[structopt(short = "w", long = "width")]
pub width: Option<String>,
/// The number of spaces to replace tab characters with. Use --tabs=0 to pass tab characters
/// through directly, but note that in that case delta will calculate line widths assuming tabs
/// occupy one character's width on the screen: if your terminal renders tabs as more than than
/// one character wide then delta's output will look incorrect.
#[structopt(long = "tabs", default_value = "4")]
pub tab_width: usize,
/// Show the command-line arguments (RGB hex codes) for the background colors that are in
/// effect. The hex codes are displayed with their associated background color. This option can
/// be combined with --light and --dark to view the background colors for those modes. It can
/// also be used to experiment with different RGB hex codes by combining this option with
/// --minus-color, --minus-emph-color, --plus-color, --plus-emph-color.
#[structopt(long = "show-background-colors")]
pub show_background_colors: bool,
/// List supported languages and associated file extensions.
#[structopt(long = "list-languages")]
pub list_languages: bool,
/// List available syntax-highlighting color themes.
#[structopt(long = "list-theme-names")]
pub list_theme_names: bool,
/// List available syntax highlighting themes, each with an example of highlighted diff output.
/// If diff output is supplied on standard input then this will be used for the demo. For
/// example: `git show --color=always | delta --list-themes`.
#[structopt(long = "list-themes")]
pub list_themes: bool,
/// The maximum distance between two lines for them to be inferred to be homologous. Homologous
/// line pairs are highlighted according to the deletion and insertion operations transforming
/// one into the other.
#[structopt(long = "max-line-distance", default_value = "0.3")]
pub max_line_distance: f64,
/// Whether to use a pager when displaying output. Options are: auto, always, and never. The
/// default pager is `less`: this can be altered by setting the environment variables BAT_PAGER
/// or PAGER (BAT_PAGER has priority).
#[structopt(long = "paging", default_value = "auto")]
pub paging_mode: String,
}
#[derive(Clone, Debug, PartialEq)]
pub enum | {
Box,
Plain,
Underline,
}
// TODO: clean up enum parsing and error handling
#[derive(Debug)]
pub enum Error {
SectionStyleParseError,
}
impl FromStr for SectionStyle {
type Err = Error;
fn from_str(s: &str) -> Result<SectionStyle, Error> {
match s.to_lowercase().as_str() {
"box" => Ok(SectionStyle::Box),
"plain" => Ok(SectionStyle::Plain),
"underline" => Ok(SectionStyle::Underline),
_ => Err(Error::SectionStyleParseError),
}
}
}
impl ToString for Error {
fn to_string(&self) -> String {
"".to_string()
}
}
pub fn process_command_line_arguments<'a>(
assets: &'a HighlightingAssets,
opt: &'a Opt,
) -> config::Config<'a> {
if opt.light && opt.dark {
eprintln!("--light and --dark cannot be used together.");
process::exit(1);
}
match &opt.theme {
Some(theme) if !style::is_no_syntax_highlighting_theme_name(&theme) => {
if !assets.theme_set.themes.contains_key(theme.as_str()) {
eprintln!("Invalid theme: '{}'", theme);
process::exit(1);
}
let is_light_theme = style::is_light_theme(&theme);
if is_light_theme && opt.dark {
eprintln!(
"{} is a light theme, but you supplied --dark. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
} else if !is_light_theme && opt.light {
eprintln!(
"{} is a dark theme, but you supplied --light. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
}
}
_ => (),
};
// We do not use the full width, in case `less --status-column` is in effect. See #41 and #10.
// TODO: There seems to be some confusion in the accounting: we are actually leaving 2
// characters unused for less at the right edge of the terminal, despite the subtraction of 1
// here.
let available_terminal_width = (Term::stdout().size().1 - 1) as usize;
let background_color_width = match opt.width.as_ref().map(String::as_str) {
Some("variable") => None,
Some(width) => Some(
width
.parse::<usize>()
.unwrap_or_else(|_| panic!("Invalid width: {}", width)),
),
None => Some(available_terminal_width),
};
let paging_mode = match opt.paging_mode.as_ref() {
"always" => PagingMode::Always,
"never" => PagingMode::Never,
"auto" => PagingMode::QuitIfOneScreen,
_ => {
eprintln!(
"Invalid paging value: {} (valid values are \"always\", \"never\", and \"auto\")",
opt.paging_mode
);
process::exit(1);
}
};
config::get_config(
opt,
&assets.syntax_set,
&assets.theme_set,
available_terminal_width,
background_color_width,
paging_mode,
)
}
| SectionStyle | identifier_name |
cli.rs | use std::process;
use std::str::FromStr;
use std::string::ToString;
use console::Term;
use structopt::StructOpt;
use crate::bat::assets::HighlightingAssets;
use crate::bat::output::PagingMode;
use crate::config;
use crate::style;
#[derive(StructOpt, Clone, Debug)]
#[structopt(
name = "delta",
about = "A syntax-highlighter for git and diff output",
after_help = "\
Colors
------
All delta color options work the same way. There are two ways to specify a color:
1. RGB hex code
An example of passing an RGB hex code is:
--file-color=\"#0e7c0e\"
2. ANSI color name
There are 8 ANSI color names:
black, red, green, yellow, blue, magenta, cyan, white.
In addition, all of them have a bright form:
bright-black, bright-red, bright-green, bright-yellow, bright-blue, bright-magenta, bright-cyan, bright-white
An example is:
--file-color=\"green\"
Unlike RGB hex codes, ANSI color names are just names: you can choose the exact color that each
name corresponds to in the settings of your terminal application (the application you use to run
command line programs). This means that if you use ANSI color names, and you change the color
theme used by your terminal, then delta's colors will respond automatically, without needing to
change the delta command line.
\"purple\" is accepted as a synonym for \"magenta\". Color names and codes are case-insensitive.
"
)]
pub struct Opt {
/// Use default colors appropriate for a light terminal background. For more control, see the other
/// color options.
#[structopt(long = "light")]
pub light: bool,
/// Use default colors appropriate for a dark terminal background. For more control, see the
/// other color options.
#[structopt(long = "dark")]
pub dark: bool,
#[structopt(long = "minus-color")]
/// The background color to use for removed lines.
pub minus_color: Option<String>,
#[structopt(long = "minus-emph-color")]
/// The background color to use for emphasized sections of removed lines.
pub minus_emph_color: Option<String>,
#[structopt(long = "plus-color")]
/// The background color to use for added lines.
pub plus_color: Option<String>,
#[structopt(long = "plus-emph-color")]
/// The background color to use for emphasized sections of added lines.
pub plus_emph_color: Option<String>,
#[structopt(long = "theme", env = "BAT_THEME")]
/// The code syntax highlighting theme to use. Use --theme=none to disable syntax highlighting.
/// If the theme is not set using this option, it will be taken from the BAT_THEME environment
/// variable, if that contains a valid theme name. Use --list-themes and --compare-themes to
/// view available themes. Note that the choice of theme only affects code syntax highlighting.
/// See --commit-color, --file-color, --hunk-color to configure the colors of other parts of
/// the diff output.
pub theme: Option<String>,
#[structopt(long = "highlight-removed")]
/// Apply syntax highlighting to removed lines. The default is to
/// apply syntax highlighting to unchanged and new lines only.
pub highlight_removed: bool,
#[structopt(long = "commit-style", default_value = "plain")]
/// Formatting style for the commit section of git output. Options
/// are: plain, box.
pub commit_style: SectionStyle,
#[structopt(long = "commit-color", default_value = "yellow")]
/// Color for the commit section of git output.
pub commit_color: String,
#[structopt(long = "file-style", default_value = "underline")]
/// Formatting style for the file section of git output. Options
/// are: plain, box, underline.
pub file_style: SectionStyle,
#[structopt(long = "file-color", default_value = "blue")]
/// Color for the file section of git output.
pub file_color: String,
#[structopt(long = "hunk-style", default_value = "box")]
/// Formatting style for the hunk-marker section of git output. Options
/// are: plain, box.
pub hunk_style: SectionStyle,
#[structopt(long = "hunk-color", default_value = "blue")]
/// Color for the hunk-marker section of git output.
pub hunk_color: String,
/// The width (in characters) of the background color
/// highlighting. By default, the width is the current terminal
/// width. Use --width=variable to apply background colors to the
/// end of each line, without right padding to equal width.
#[structopt(short = "w", long = "width")]
pub width: Option<String>,
/// The number of spaces to replace tab characters with. Use --tabs=0 to pass tab characters
/// through directly, but note that in that case delta will calculate line widths assuming tabs
/// occupy one character's width on the screen: if your terminal renders tabs as more than than
/// one character wide then delta's output will look incorrect.
#[structopt(long = "tabs", default_value = "4")]
pub tab_width: usize,
| /// be combined with --light and --dark to view the background colors for those modes. It can
/// also be used to experiment with different RGB hex codes by combining this option with
/// --minus-color, --minus-emph-color, --plus-color, --plus-emph-color.
#[structopt(long = "show-background-colors")]
pub show_background_colors: bool,
/// List supported languages and associated file extensions.
#[structopt(long = "list-languages")]
pub list_languages: bool,
/// List available syntax-highlighting color themes.
#[structopt(long = "list-theme-names")]
pub list_theme_names: bool,
/// List available syntax highlighting themes, each with an example of highlighted diff output.
/// If diff output is supplied on standard input then this will be used for the demo. For
/// example: `git show --color=always | delta --list-themes`.
#[structopt(long = "list-themes")]
pub list_themes: bool,
/// The maximum distance between two lines for them to be inferred to be homologous. Homologous
/// line pairs are highlighted according to the deletion and insertion operations transforming
/// one into the other.
#[structopt(long = "max-line-distance", default_value = "0.3")]
pub max_line_distance: f64,
/// Whether to use a pager when displaying output. Options are: auto, always, and never. The
/// default pager is `less`: this can be altered by setting the environment variables BAT_PAGER
/// or PAGER (BAT_PAGER has priority).
#[structopt(long = "paging", default_value = "auto")]
pub paging_mode: String,
}
#[derive(Clone, Debug, PartialEq)]
pub enum SectionStyle {
Box,
Plain,
Underline,
}
// TODO: clean up enum parsing and error handling
#[derive(Debug)]
pub enum Error {
SectionStyleParseError,
}
impl FromStr for SectionStyle {
type Err = Error;
fn from_str(s: &str) -> Result<SectionStyle, Error> {
match s.to_lowercase().as_str() {
"box" => Ok(SectionStyle::Box),
"plain" => Ok(SectionStyle::Plain),
"underline" => Ok(SectionStyle::Underline),
_ => Err(Error::SectionStyleParseError),
}
}
}
impl ToString for Error {
fn to_string(&self) -> String {
"".to_string()
}
}
pub fn process_command_line_arguments<'a>(
assets: &'a HighlightingAssets,
opt: &'a Opt,
) -> config::Config<'a> {
if opt.light && opt.dark {
eprintln!("--light and --dark cannot be used together.");
process::exit(1);
}
match &opt.theme {
Some(theme) if !style::is_no_syntax_highlighting_theme_name(&theme) => {
if !assets.theme_set.themes.contains_key(theme.as_str()) {
eprintln!("Invalid theme: '{}'", theme);
process::exit(1);
}
let is_light_theme = style::is_light_theme(&theme);
if is_light_theme && opt.dark {
eprintln!(
"{} is a light theme, but you supplied --dark. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
} else if !is_light_theme && opt.light {
eprintln!(
"{} is a dark theme, but you supplied --light. \
If you use --theme, you do not need to supply --light or --dark.",
theme
);
process::exit(1);
}
}
_ => (),
};
// We do not use the full width, in case `less --status-column` is in effect. See #41 and #10.
// TODO: There seems to be some confusion in the accounting: we are actually leaving 2
// characters unused for less at the right edge of the terminal, despite the subtraction of 1
// here.
let available_terminal_width = (Term::stdout().size().1 - 1) as usize;
let background_color_width = match opt.width.as_ref().map(String::as_str) {
Some("variable") => None,
Some(width) => Some(
width
.parse::<usize>()
.unwrap_or_else(|_| panic!("Invalid width: {}", width)),
),
None => Some(available_terminal_width),
};
let paging_mode = match opt.paging_mode.as_ref() {
"always" => PagingMode::Always,
"never" => PagingMode::Never,
"auto" => PagingMode::QuitIfOneScreen,
_ => {
eprintln!(
"Invalid paging value: {} (valid values are \"always\", \"never\", and \"auto\")",
opt.paging_mode
);
process::exit(1);
}
};
config::get_config(
opt,
&assets.syntax_set,
&assets.theme_set,
available_terminal_width,
background_color_width,
paging_mode,
)
} | /// Show the command-line arguments (RGB hex codes) for the background colors that are in
/// effect. The hex codes are displayed with their associated background color. This option can | random_line_split |
HypeClipPath.js | /*!
Hype ClipPath 1.7.2
copyright (c) 2021 Max Ziebell, (https://maxziebell.de). MIT-license
*/
/*
* Version-History
* 0.9 (Beta) Initial release under MIT-license
* 1.0 First official release, inverted logic and still a function
* 1.1 With a little refactoring HTML clipping is supported, limitations apply
* 1.2 Fixed a bug determining if it's a SVG or Group
* 1.3 Converted to full extension. Added new methods on hypeDocument
* 1.4 Added live preview in IDE
* 1.5 Fixed some preview issues (nudging, delay)
* 1.6 Fixed some preview issues (zoom, at the cost of antialias)
* 1.7 Using Mutation Observer (not only IDE), debouncing and performance update
* 1.7.1 fixed Safari update bug
* 1.7.2 fixed querySelector bug (thanks to michelangelo)
*/
if("HypeClipPath" in window === false) window['HypeClipPath'] = (function () {
var kSvgNS = 'http://www.w3.org/2000/svg';
/* debounce updates to frames (see sceneLoad) */
var _tickId;
var _tickRunning = false;
var _updatesToRunOnTick = {};
/* @const */
const _isHypeIDE = window.location.href.indexOf("/Hype/Scratch/HypeScratch.") != -1;
var _lookup = {};
/* FPS */
var _FPS;
/* Compability */
var _supportsClipPath = false;
if(window.CSS && window.CSS.supports){
_supportsClipPath = CSS.supports("clip-path", "url(#test)");
}
function supportsClipPath() {
return _supportsClipPath;
}
/* clip path generator function */
function generateClipPath(){
/* create clip and path node */
var clipPathElm = document.createElementNS(kSvgNS, 'clipPath');
var pathElm = document.createElementNS(kSvgNS, 'path');
/* append path data to clip path */
clipPathElm.appendChild(pathElm);
/* return our clip path for further processing */
return clipPathElm;
}
/* clip path update function */
function updateClipPath(clipPathElm, obj){
/* fetch path node */
var pathElm = clipPathElm.querySelector('path');
/* set attributes, transfer path data */
for(var name in obj.pathAttributes){
if (obj.pathAttributes[name]) {
pathElm.setAttribute(name, obj.pathAttributes[name]);
}
}
/* assign unique id to clip path and offset */
for(var name in obj.clipPathAttributes){
if (obj.clipPathAttributes[name]) {
clipPathElm.setAttribute(name, obj.clipPathAttributes[name]);
}
}
}
/* defs generator function */
function generateDefs(){
/* create and return defs node */
var defsElm = document.createElementNS(kSvgNS, 'defs');
return defsElm;
}
/* extend Hype */
function extendHype(hypeDocument, element, event) {
/* init document specific lookup for mutation observer */
var hypeDocId = hypeDocument.documentId();
_lookup[hypeDocId] = {};
/* hypeDocument function to get cached current scene (caching depends on reset of cache in sceneUnload) */
hypeDocument.getCurrentSceneElement = function(){
if (_lookup[hypeDocId]['currentSceneElm']==undefined){
_lookup[hypeDocId]['currentSceneElm'] = document.querySelector('#'+hypeDocument.documentId()+' > .HYPE_scene[style*="block"]');
}
return _lookup[hypeDocId]['currentSceneElm'];
}
/* hypeDocument function to apply ALL clip path in scene (debounced to framerate) */
hypeDocument.applyClipPaths = function(){
/* fetch scene */
var sceneElm = hypeDocument.getCurrentSceneElement();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* loop over candidates */
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
/* hypeDocument function to apply a clip path (debounced to framerate) */
hypeDocument.applyClipPathToElement = function(targetElm){
if (targetElm.dataset.clipPath) {
if (!_updatesToRunOnTick[targetElm.id]) {
_updatesToRunOnTick[targetElm.id] = function(){
/* fetch scene sourceElm */
var sceneElm = hypeDocument.getCurrentSceneElement();
var sourceElm = sceneElm.querySelector(targetElm.dataset.clipPath);
/* if found apply it */
if (sourceElm) {
hypeDocument.generateClipPathForElement(sourceElm, targetElm);
} else {
//remove
removeClipPath(targetElm);
}
}
}
} else {
//remove
removeClipPath(targetElm);
}
}
/* hypeDocument function calculate transforms on a vector element and return an SVG compatible transform string */
/* we can't just clone the transforms from one to another node as SVG transforms have a diffrent logic on the transform origin
If anybody knows a quicker way of doing this please contact me! Source: https://css-tricks.com/transforms-on-svg-elements */
hypeDocument.calculateAndStoreTransformForElement = function(sourceElm){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
/* get offsets */
var sourceLeft = hypeDocument.getElementProperty(sourceElm, 'left');
var sourceTop = hypeDocument.getElementProperty(sourceElm, 'top');
/* offsets */
var offsetX = sourceLeft;
var offsetY = sourceTop;
var originOffsetX = 0;
var originOffsetY = 0;
/* scale */
var sourceScaleX = hypeDocument.getElementProperty(sourceElm, 'scaleX');
var sourceScaleY = hypeDocument.getElementProperty(sourceElm, 'scaleY');
/* rotation */
var sourceRotate = hypeDocument.getElementProperty(sourceElm, 'rotateZ') || 0;
/* store for later use source */
transformLookup[sourceElm.id] = {
'left': sourceLeft,
'top': sourceTop,
'scaleX': sourceScaleX,
'scaleY': sourceScaleY,
'rotateZ': sourceRotate
};
/* transformOrigin */
if (sourceRotate!=0 || sourceScaleX!=1 || sourceScaleY!=1) {
var sourceWidth = hypeDocument.getElementProperty(sourceElm, 'width');
var sourceHeight = hypeDocument.getElementProperty(sourceElm, 'height');
var transformOrigin = (sourceElm.style.transformOrigin) ? String(sourceElm.style.transformOrigin).split(' ') : [50,50];
originOffsetX = sourceWidth * parseFloat(transformOrigin[0]) / 100;
originOffsetY = sourceHeight * parseFloat(transformOrigin[1]) / 100;
}
/* queue transforms using unshift as they are counter intuitive applied in reverse in SVG */
var transform = [];
if (sourceScaleX!=1 || sourceScaleY!=1) {
transform.unshift('scale('+sourceScaleX+' '+sourceScaleY+')');
transform.unshift('translate('+(-originOffsetX*(sourceScaleX-1))+' '+(-originOffsetY*(sourceScaleY-1))+')');
}
if (sourceRotate) {
transform.unshift('rotate('+sourceRotate+' '+originOffsetX+' '+originOffsetY+')');
}
transform.unshift('translate('+offsetX+' '+offsetY+')');
/* return string */
return transform.join(' ');
}
/* hypeDocument function to apply a clip path (attention: not debounced) */
hypeDocument.generateClipPathForElement = function(sourceElm, targetElm){
/* if source and target are defined process them */
if (sourceElm && targetElm) {
/* do stuff if source and target contain SVG */
if (sourceElm.querySelector('svg')) {
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
/* make sure we have a SVG as direct child */
switch (sourceElm.dataset.clipPathStyle) {
/* clip path using url (default) */
default:
var uniqueIdBase = "hype_clip_path_"+targetElm.getAttribute('id')+'_'+sourceElm.getAttribute('id');
var uniqueId = uniqueIdBase+'_'+(Math.ceil(Math.random()*100000+100000));
/* make sure we have a defs section (like on imported SVG from AI) */
if (!sourceElm.querySelector('svg > defs')) {
/* append defs */
sourceElm.querySelector('svg').appendChild(generateDefs());
} else if(_isHypeIDE) {
/* move defs to last position as the Hype IDE has bug and updates first path even if in defs */
sourceElm.querySelector('svg').appendChild(sourceElm.querySelector('svg > defs'));
}
/* append clip path node if needed */
var clipPathElm = sourceElm.querySelector('svg > defs > [id^='+uniqueIdBase+']');
if (!clipPathElm) {
clipPathElm = sourceElm.querySelector('svg > defs').appendChild(generateClipPath());
}
/* update clip path node */
updateClipPath (clipPathElm, {
clipPathAttributes: {
'id': uniqueId,
'shape-rendering': 'optimizeSpeed',
},
pathAttributes: {
'd' : sourceElm.querySelector('svg > path').getAttribute('d'),
'clip-rule': targetElm.dataset.clipPathClipRule,
'transform': hypeDocument.calculateAndStoreTransformForElement(sourceElm),
'shape-rendering': 'optimizeSpeed',
}
});
/* set clip path as CSS style to applyElm being targetElm or targetElm.parentNode */
applyElm.style.webkitClipPath = 'url("#'+uniqueId+'")';
applyElm.style.clipPath = 'url("#'+uniqueId+'")';
/* reverse lookup */
sourceElm.dataset.clipPathSelector = targetElm.dataset.clipPath;
forceRedraw(applyElm);
break;
}
/* as safari doesn't clip outside the bounds on groups let's remind people in chrome */
applyElm.style.overflow = 'hidden';
/* hide source element */
if (!_isHypeIDE && !sourceElm.dataset.clipPathVisible){
sourceElm.style.opacity = 0;
sourceElm.style.pointerEvents = 'none';
}
}
}
}
}
/* function to setup a mutation observer */
function setupObserver (hypeDocument, element, options){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
if (!observerLookup[options.mOiD]) {
observerLookup[options.mOiD] = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
options.callback.call(null, hypeDocument, mutation);
});
});
}
/* start monitoring for related changes */
observerLookup[options.mOiD].observe(element, options);
}
/* callback for a mutation observer on target nodes (on the fly changes of dataset attributes) */
function callbackTargetProps(hypeDocument, mutation){
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'data-clip-path':
if(processingClipPathDemandsUpdate(mutation)){
hypeDocument.applyClipPathToElement(mutation.target);
}
break;
case 'data-clip-path-clip-rule':
hypeDocument.applyClipPathToElement(mutation.target);
break;
}
}
/* callback for a mutation observer on target nodes (montoring transform changes) */
function callbackSourceProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'style':
if (processingStyleDemandsUpdate(hypeDocument, mutation)){
/* apply update to targets referenced by this source */
var selector = mutation.target.dataset.clipPathSelector;
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]); /* TODO only update transform not path data */
}
}
break;
}
}
/* callback for a mutation observer on target nodes (path updates) */
function callbackPathProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
switch (mutation.attributeName) {
case 'd':
var selector = mutation.target.parentNode.parentNode.dataset.clipPathSelector;
if (selector) {
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
break;
}
}
/* determine if there are change in style (return boolean) */
function processingStyleDemandsUpdate(hypeDocument, mutation){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
var transform = transformLookup[mutation.target.id];
var update = false;
if (transform) {
for (var prop in transform){
if (transform[prop] != hypeDocument.getElementProperty(mutation.target, prop)){
return true;
}
}
}
return false;
}
/* determine if dataset.clipPath has changes (return boolean) */
function processingClipPathDemandsUpdate(mutation){
/* fetche values */
var newValue = mutation.target.dataset.clipPath;
var oldValue = mutation.oldValue;
/* if they differ act */
if (newValue != oldValue) {
/* is new value set */
if (newValue) {
/* apply new clip path */
return true;
} else {
/* else remove clip path */
removeClipPath(mutation.target)
}
}
return false;
}
function removeClipPath (targetElm){
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
applyElm.style.webkitClipPath = null;
applyElm.style.clipPath = null;
}
var forceRedraw = function(element){
var disp = element.style.display;
element.style.display = 'none';
void 0!=element.offsetHeight;
element.style.display = disp;
};
/* sceneLoad */
function sceneLoad(hypeDocument, element, event) {
/* make sure we have a scene specific storage */
var hypeDocId = hypeDocument.documentId();
/* fetch fresh scene element */
var sceneElm = hypeDocument.getCurrentSceneElement();
if (!_lookup[hypeDocId][sceneElm.id]){
_lookup[hypeDocId][sceneElm.id] = {};
_lookup[hypeDocId][sceneElm.id]['Observer'] = {};
_lookup[hypeDocId][sceneElm.id]['Transform'] = {};
}
/* initial apply */
hypeDocument.applyClipPaths();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* cancel any running ticks */
if (_tickId) window.cancelAnimationFrame(_tickId);
/* loop over candidates if we have any */
if (Object.keys(targetElms).length){
for (var i=0; i < targetElms.length; i++) {
/* initial apply */
//hypeDocument.applyClipPathToElement(targetElms[i]);
/* ignore observer setup is set to static */
if (!targetElms[i].hasAttribute('data-clip-path-static') || _isHypeIDE){
/* observer target (masked element/group) */
setupObserver(hypeDocument, targetElms[i], {
attributes: true,
attributeOldValue: true,
attributeFilter: ['data-clip-path', 'data-clip-path-clip-rule'],
mOiD: targetElms[i].id,
callback: callbackTargetProps
});
/* observer source (mask path) if clipPath is set and found */
if(targetElms[i].dataset.clipPath){
var sourceElm = sceneElm.querySelector(targetElms[i].dataset.clipPath);
if (sourceElm) {
setupObserver(hypeDocument, sourceElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: ['style'],
mOiD: sourceElm.id,
callback: callbackSourceProps
});
var query = _isHypeIDE ? '[hypeobjectid="'+sourceElm.getAttribute('hypeobjectid')+'"] > svg > path' : '#'+sourceElm.id+' > svg > path';
setupObserver(hypeDocument, document.querySelector(query), {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "d"],
mOiD: sourceElm.id+'_path',
callback: callbackPathProps
});
}
}
}
}
/* setup new tick debouncer if needed */
if (_FPS){
var fpsInterval = 1000 / _FPS;
var then = -1000;
var startTime = then;
var tick = function(){
if (!_tickRunning) {
now = performance.now();
elapsed = now - then;
if (elapsed > fpsInterval) {
_tickRunning = true;
then = now - (elapsed % fpsInterval);
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
_tickRunning = false;
}
}
_tickId = window.requestAnimationFrame(tick);
}
} else {
var tick = function(){
if (!_tickRunning) {
tickRunning = true;
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
tickRunning = false;
}
_tickId = window.requestAnimationFrame(tick);
}
}
/* start tick */
if (_isHypeIDE) {
window.requestAnimationFrame(tick);
} else {
tick();
}
}
}
function setFramesPerSecond (FPS){
FPS = parseInt(FPS);
_FPS = (FPS>0 && FPS<60) ? FPS : null;
}
/* sceneUnload */
function | (hypeDocument, element, event) {
/* disconnect mutation observer */
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
for (var mOiD in observerLookup) {
observerLookup[mOiD].disconnect();
if(_isHypeIDE) delete(observerLookup[mOiD]);
}
/* delete cache version so a new one is generated */
delete _lookup[hypeDocId]['currentSceneElm'];
}
/* parse transforms helper for IDE */
function parse_transform(a) {
var b = {};
for (var i in a = a.match(/(\w+)\(([^,)]+),?([^)]+)?\)/gi)) {
var c = a[i].match(/[\w\.\-]+/g);
b[c.shift()] = c;
}
return b;
}
/* IDE preview -- START */
window.addEventListener("DOMContentLoaded", function(event) {
if (_isHypeIDE && supportsClipPath()) {
/* make a fake hypeDocument (IDE) version */
var hypeDocument = {
getElementProperty: function(elm, prop){
switch (prop){ /* TODO WebkitMatrix lookups (although they are influenced by rotation) rather use upcoming hypeattributescalex/y */
case 'left': return parseFloat(elm.getAttribute('hypeattributeleft')); break;
case 'top': return parseFloat(elm.getAttribute('hypeattributetop')); break;
case 'rotateZ': return parseFloat(elm.getAttribute('hypeattributerotationanglez')); break;
case 'width': return parseFloat(elm.style.width); break;
case 'height': return parseFloat(elm.style.height); break;
case 'scaleX': var transform = parse_transform(elm.style.transform); return transform.scaleX ? parseFloat(transform.scaleX): 1; break;
case 'scaleY': var transform = parse_transform(elm.style.transform); return transform.scaleY ? parseFloat(transform.scaleY): 1; break;
}
},
documentId: function(){
return 'hypeDocument'
}
};
/* fake a HypeDocumentLoad event */
extendHype(hypeDocument);
/* overwrite extentions that need tweaking in IDE enviroment */
hypeDocument.getCurrentSceneElement = function(){
return document.getElementById('HypeMainContentDiv');
}
/* fake a HypeSceneLoad event */
sceneLoad(hypeDocument);
/* temporary workaround as long as the IDE uses zoom on 100% and plus */
var zoomCorrector = function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.type == 'attributes') {
if (mutation.attributeName == 'style') {
var zoom = mutation.target.style.zoom;
if (zoom){
mutation.target.style.zoom = null;
mutation.target.style.transform = 'scale('+zoom+', '+zoom+')';
mutation.target.style.transformOrigin = 'left top';
}
}
}
});
}
/* fix zoom in IDE to only use transforms */
var zoomObserver = new MutationObserver(zoomCorrector);
var HypeSceneEditorElm = document.getElementById('HypeSceneEditor');
zoomObserver.observe(HypeSceneEditorElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "style"]
});
/* trigger an initial zoom event */
zoomCorrector([{
target: HypeSceneEditorElm,
type : 'attributes',
attributeName : 'style'
}]);
/* track changes */
var changeObserver = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
/* detection of removal of attribute data-clip-path in IDE */
if (!mutation.target.hasAttribute('data-clip-path')) {
removeClipPath(mutation.target);
}
});
/* delay because existing observers need to run before being reset */
setTimeout(function(){
sceneUnload(hypeDocument);
sceneLoad(hypeDocument);
},1);
});
/* wait for Hype IDE to add build view */
changeObserver.observe(hypeDocument.getCurrentSceneElement(), {
attributes: true,
attributeOldValue: true,
subtree: true,
attributeFilter: ["data-clip-path"],
});
} else{
/* not Hype IDE or doesn't support clip path so let's set up some rules to help with these legacy browsers */
if (!supportsClipPath()) {
document.styleSheets[0].insertRule('.hideIfClipPathNotSupported {display:none!important;}',0);
document.styleSheets[0].insertRule('.showIfClipPathNotSupported {display:block!important;}',1);
}
}
});
/* IDE preview -- END */
/* setup callbacks */
if (supportsClipPath()){
if("HYPE_eventListeners" in window === false) { window.HYPE_eventListeners = Array();}
window.HYPE_eventListeners.push({"type":"HypeDocumentLoad", "callback": extendHype});
window.HYPE_eventListeners.push({"type":"HypeSceneLoad", "callback": sceneLoad});
window.HYPE_eventListeners.push({"type":"HypeSceneUnload", "callback": sceneUnload});
}
/* Reveal Public interface to window['HypeClipPath'] */
return {
version: '1.7.2',
'supportsClipPath': supportsClipPath,
'setFramesPerSecond': setFramesPerSecond
};
})(); | sceneUnload | identifier_name |
HypeClipPath.js | /*!
Hype ClipPath 1.7.2
copyright (c) 2021 Max Ziebell, (https://maxziebell.de). MIT-license
*/
/*
* Version-History
* 0.9 (Beta) Initial release under MIT-license
* 1.0 First official release, inverted logic and still a function
* 1.1 With a little refactoring HTML clipping is supported, limitations apply
* 1.2 Fixed a bug determining if it's a SVG or Group
* 1.3 Converted to full extension. Added new methods on hypeDocument
* 1.4 Added live preview in IDE
* 1.5 Fixed some preview issues (nudging, delay)
* 1.6 Fixed some preview issues (zoom, at the cost of antialias)
* 1.7 Using Mutation Observer (not only IDE), debouncing and performance update
* 1.7.1 fixed Safari update bug
* 1.7.2 fixed querySelector bug (thanks to michelangelo)
*/
if("HypeClipPath" in window === false) window['HypeClipPath'] = (function () {
var kSvgNS = 'http://www.w3.org/2000/svg';
/* debounce updates to frames (see sceneLoad) */
var _tickId;
var _tickRunning = false;
var _updatesToRunOnTick = {};
/* @const */
const _isHypeIDE = window.location.href.indexOf("/Hype/Scratch/HypeScratch.") != -1;
var _lookup = {};
/* FPS */
var _FPS;
/* Compability */
var _supportsClipPath = false;
if(window.CSS && window.CSS.supports){
_supportsClipPath = CSS.supports("clip-path", "url(#test)");
}
function supportsClipPath() {
return _supportsClipPath;
}
/* clip path generator function */
function generateClipPath(){
/* create clip and path node */
var clipPathElm = document.createElementNS(kSvgNS, 'clipPath');
var pathElm = document.createElementNS(kSvgNS, 'path');
/* append path data to clip path */
clipPathElm.appendChild(pathElm);
/* return our clip path for further processing */
return clipPathElm;
}
/* clip path update function */
function updateClipPath(clipPathElm, obj){
/* fetch path node */
var pathElm = clipPathElm.querySelector('path');
/* set attributes, transfer path data */
for(var name in obj.pathAttributes){
if (obj.pathAttributes[name]) {
pathElm.setAttribute(name, obj.pathAttributes[name]);
}
}
/* assign unique id to clip path and offset */
for(var name in obj.clipPathAttributes){
if (obj.clipPathAttributes[name]) |
}
}
/* defs generator function */
function generateDefs(){
/* create and return defs node */
var defsElm = document.createElementNS(kSvgNS, 'defs');
return defsElm;
}
/* extend Hype */
function extendHype(hypeDocument, element, event) {
/* init document specific lookup for mutation observer */
var hypeDocId = hypeDocument.documentId();
_lookup[hypeDocId] = {};
/* hypeDocument function to get cached current scene (caching depends on reset of cache in sceneUnload) */
hypeDocument.getCurrentSceneElement = function(){
if (_lookup[hypeDocId]['currentSceneElm']==undefined){
_lookup[hypeDocId]['currentSceneElm'] = document.querySelector('#'+hypeDocument.documentId()+' > .HYPE_scene[style*="block"]');
}
return _lookup[hypeDocId]['currentSceneElm'];
}
/* hypeDocument function to apply ALL clip path in scene (debounced to framerate) */
hypeDocument.applyClipPaths = function(){
/* fetch scene */
var sceneElm = hypeDocument.getCurrentSceneElement();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* loop over candidates */
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
/* hypeDocument function to apply a clip path (debounced to framerate) */
hypeDocument.applyClipPathToElement = function(targetElm){
if (targetElm.dataset.clipPath) {
if (!_updatesToRunOnTick[targetElm.id]) {
_updatesToRunOnTick[targetElm.id] = function(){
/* fetch scene sourceElm */
var sceneElm = hypeDocument.getCurrentSceneElement();
var sourceElm = sceneElm.querySelector(targetElm.dataset.clipPath);
/* if found apply it */
if (sourceElm) {
hypeDocument.generateClipPathForElement(sourceElm, targetElm);
} else {
//remove
removeClipPath(targetElm);
}
}
}
} else {
//remove
removeClipPath(targetElm);
}
}
/* hypeDocument function calculate transforms on a vector element and return an SVG compatible transform string */
/* we can't just clone the transforms from one to another node as SVG transforms have a diffrent logic on the transform origin
If anybody knows a quicker way of doing this please contact me! Source: https://css-tricks.com/transforms-on-svg-elements */
hypeDocument.calculateAndStoreTransformForElement = function(sourceElm){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
/* get offsets */
var sourceLeft = hypeDocument.getElementProperty(sourceElm, 'left');
var sourceTop = hypeDocument.getElementProperty(sourceElm, 'top');
/* offsets */
var offsetX = sourceLeft;
var offsetY = sourceTop;
var originOffsetX = 0;
var originOffsetY = 0;
/* scale */
var sourceScaleX = hypeDocument.getElementProperty(sourceElm, 'scaleX');
var sourceScaleY = hypeDocument.getElementProperty(sourceElm, 'scaleY');
/* rotation */
var sourceRotate = hypeDocument.getElementProperty(sourceElm, 'rotateZ') || 0;
/* store for later use source */
transformLookup[sourceElm.id] = {
'left': sourceLeft,
'top': sourceTop,
'scaleX': sourceScaleX,
'scaleY': sourceScaleY,
'rotateZ': sourceRotate
};
/* transformOrigin */
if (sourceRotate!=0 || sourceScaleX!=1 || sourceScaleY!=1) {
var sourceWidth = hypeDocument.getElementProperty(sourceElm, 'width');
var sourceHeight = hypeDocument.getElementProperty(sourceElm, 'height');
var transformOrigin = (sourceElm.style.transformOrigin) ? String(sourceElm.style.transformOrigin).split(' ') : [50,50];
originOffsetX = sourceWidth * parseFloat(transformOrigin[0]) / 100;
originOffsetY = sourceHeight * parseFloat(transformOrigin[1]) / 100;
}
/* queue transforms using unshift as they are counter intuitive applied in reverse in SVG */
var transform = [];
if (sourceScaleX!=1 || sourceScaleY!=1) {
transform.unshift('scale('+sourceScaleX+' '+sourceScaleY+')');
transform.unshift('translate('+(-originOffsetX*(sourceScaleX-1))+' '+(-originOffsetY*(sourceScaleY-1))+')');
}
if (sourceRotate) {
transform.unshift('rotate('+sourceRotate+' '+originOffsetX+' '+originOffsetY+')');
}
transform.unshift('translate('+offsetX+' '+offsetY+')');
/* return string */
return transform.join(' ');
}
/* hypeDocument function to apply a clip path (attention: not debounced) */
hypeDocument.generateClipPathForElement = function(sourceElm, targetElm){
/* if source and target are defined process them */
if (sourceElm && targetElm) {
/* do stuff if source and target contain SVG */
if (sourceElm.querySelector('svg')) {
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
/* make sure we have a SVG as direct child */
switch (sourceElm.dataset.clipPathStyle) {
/* clip path using url (default) */
default:
var uniqueIdBase = "hype_clip_path_"+targetElm.getAttribute('id')+'_'+sourceElm.getAttribute('id');
var uniqueId = uniqueIdBase+'_'+(Math.ceil(Math.random()*100000+100000));
/* make sure we have a defs section (like on imported SVG from AI) */
if (!sourceElm.querySelector('svg > defs')) {
/* append defs */
sourceElm.querySelector('svg').appendChild(generateDefs());
} else if(_isHypeIDE) {
/* move defs to last position as the Hype IDE has bug and updates first path even if in defs */
sourceElm.querySelector('svg').appendChild(sourceElm.querySelector('svg > defs'));
}
/* append clip path node if needed */
var clipPathElm = sourceElm.querySelector('svg > defs > [id^='+uniqueIdBase+']');
if (!clipPathElm) {
clipPathElm = sourceElm.querySelector('svg > defs').appendChild(generateClipPath());
}
/* update clip path node */
updateClipPath (clipPathElm, {
clipPathAttributes: {
'id': uniqueId,
'shape-rendering': 'optimizeSpeed',
},
pathAttributes: {
'd' : sourceElm.querySelector('svg > path').getAttribute('d'),
'clip-rule': targetElm.dataset.clipPathClipRule,
'transform': hypeDocument.calculateAndStoreTransformForElement(sourceElm),
'shape-rendering': 'optimizeSpeed',
}
});
/* set clip path as CSS style to applyElm being targetElm or targetElm.parentNode */
applyElm.style.webkitClipPath = 'url("#'+uniqueId+'")';
applyElm.style.clipPath = 'url("#'+uniqueId+'")';
/* reverse lookup */
sourceElm.dataset.clipPathSelector = targetElm.dataset.clipPath;
forceRedraw(applyElm);
break;
}
/* as safari doesn't clip outside the bounds on groups let's remind people in chrome */
applyElm.style.overflow = 'hidden';
/* hide source element */
if (!_isHypeIDE && !sourceElm.dataset.clipPathVisible){
sourceElm.style.opacity = 0;
sourceElm.style.pointerEvents = 'none';
}
}
}
}
}
/* function to setup a mutation observer */
function setupObserver (hypeDocument, element, options){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
if (!observerLookup[options.mOiD]) {
observerLookup[options.mOiD] = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
options.callback.call(null, hypeDocument, mutation);
});
});
}
/* start monitoring for related changes */
observerLookup[options.mOiD].observe(element, options);
}
/* callback for a mutation observer on target nodes (on the fly changes of dataset attributes) */
function callbackTargetProps(hypeDocument, mutation){
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'data-clip-path':
if(processingClipPathDemandsUpdate(mutation)){
hypeDocument.applyClipPathToElement(mutation.target);
}
break;
case 'data-clip-path-clip-rule':
hypeDocument.applyClipPathToElement(mutation.target);
break;
}
}
/* callback for a mutation observer on target nodes (montoring transform changes) */
function callbackSourceProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'style':
if (processingStyleDemandsUpdate(hypeDocument, mutation)){
/* apply update to targets referenced by this source */
var selector = mutation.target.dataset.clipPathSelector;
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]); /* TODO only update transform not path data */
}
}
break;
}
}
/* callback for a mutation observer on target nodes (path updates) */
function callbackPathProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
switch (mutation.attributeName) {
case 'd':
var selector = mutation.target.parentNode.parentNode.dataset.clipPathSelector;
if (selector) {
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
break;
}
}
/* determine if there are change in style (return boolean) */
function processingStyleDemandsUpdate(hypeDocument, mutation){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
var transform = transformLookup[mutation.target.id];
var update = false;
if (transform) {
for (var prop in transform){
if (transform[prop] != hypeDocument.getElementProperty(mutation.target, prop)){
return true;
}
}
}
return false;
}
/* determine if dataset.clipPath has changes (return boolean) */
function processingClipPathDemandsUpdate(mutation){
/* fetche values */
var newValue = mutation.target.dataset.clipPath;
var oldValue = mutation.oldValue;
/* if they differ act */
if (newValue != oldValue) {
/* is new value set */
if (newValue) {
/* apply new clip path */
return true;
} else {
/* else remove clip path */
removeClipPath(mutation.target)
}
}
return false;
}
function removeClipPath (targetElm){
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
applyElm.style.webkitClipPath = null;
applyElm.style.clipPath = null;
}
var forceRedraw = function(element){
var disp = element.style.display;
element.style.display = 'none';
void 0!=element.offsetHeight;
element.style.display = disp;
};
/* sceneLoad */
function sceneLoad(hypeDocument, element, event) {
/* make sure we have a scene specific storage */
var hypeDocId = hypeDocument.documentId();
/* fetch fresh scene element */
var sceneElm = hypeDocument.getCurrentSceneElement();
if (!_lookup[hypeDocId][sceneElm.id]){
_lookup[hypeDocId][sceneElm.id] = {};
_lookup[hypeDocId][sceneElm.id]['Observer'] = {};
_lookup[hypeDocId][sceneElm.id]['Transform'] = {};
}
/* initial apply */
hypeDocument.applyClipPaths();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* cancel any running ticks */
if (_tickId) window.cancelAnimationFrame(_tickId);
/* loop over candidates if we have any */
if (Object.keys(targetElms).length){
for (var i=0; i < targetElms.length; i++) {
/* initial apply */
//hypeDocument.applyClipPathToElement(targetElms[i]);
/* ignore observer setup is set to static */
if (!targetElms[i].hasAttribute('data-clip-path-static') || _isHypeIDE){
/* observer target (masked element/group) */
setupObserver(hypeDocument, targetElms[i], {
attributes: true,
attributeOldValue: true,
attributeFilter: ['data-clip-path', 'data-clip-path-clip-rule'],
mOiD: targetElms[i].id,
callback: callbackTargetProps
});
/* observer source (mask path) if clipPath is set and found */
if(targetElms[i].dataset.clipPath){
var sourceElm = sceneElm.querySelector(targetElms[i].dataset.clipPath);
if (sourceElm) {
setupObserver(hypeDocument, sourceElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: ['style'],
mOiD: sourceElm.id,
callback: callbackSourceProps
});
var query = _isHypeIDE ? '[hypeobjectid="'+sourceElm.getAttribute('hypeobjectid')+'"] > svg > path' : '#'+sourceElm.id+' > svg > path';
setupObserver(hypeDocument, document.querySelector(query), {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "d"],
mOiD: sourceElm.id+'_path',
callback: callbackPathProps
});
}
}
}
}
/* setup new tick debouncer if needed */
if (_FPS){
var fpsInterval = 1000 / _FPS;
var then = -1000;
var startTime = then;
var tick = function(){
if (!_tickRunning) {
now = performance.now();
elapsed = now - then;
if (elapsed > fpsInterval) {
_tickRunning = true;
then = now - (elapsed % fpsInterval);
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
_tickRunning = false;
}
}
_tickId = window.requestAnimationFrame(tick);
}
} else {
var tick = function(){
if (!_tickRunning) {
tickRunning = true;
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
tickRunning = false;
}
_tickId = window.requestAnimationFrame(tick);
}
}
/* start tick */
if (_isHypeIDE) {
window.requestAnimationFrame(tick);
} else {
tick();
}
}
}
function setFramesPerSecond (FPS){
FPS = parseInt(FPS);
_FPS = (FPS>0 && FPS<60) ? FPS : null;
}
/* sceneUnload */
function sceneUnload(hypeDocument, element, event) {
/* disconnect mutation observer */
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
for (var mOiD in observerLookup) {
observerLookup[mOiD].disconnect();
if(_isHypeIDE) delete(observerLookup[mOiD]);
}
/* delete cache version so a new one is generated */
delete _lookup[hypeDocId]['currentSceneElm'];
}
/* parse transforms helper for IDE */
function parse_transform(a) {
var b = {};
for (var i in a = a.match(/(\w+)\(([^,)]+),?([^)]+)?\)/gi)) {
var c = a[i].match(/[\w\.\-]+/g);
b[c.shift()] = c;
}
return b;
}
/* IDE preview -- START */
window.addEventListener("DOMContentLoaded", function(event) {
if (_isHypeIDE && supportsClipPath()) {
/* make a fake hypeDocument (IDE) version */
var hypeDocument = {
getElementProperty: function(elm, prop){
switch (prop){ /* TODO WebkitMatrix lookups (although they are influenced by rotation) rather use upcoming hypeattributescalex/y */
case 'left': return parseFloat(elm.getAttribute('hypeattributeleft')); break;
case 'top': return parseFloat(elm.getAttribute('hypeattributetop')); break;
case 'rotateZ': return parseFloat(elm.getAttribute('hypeattributerotationanglez')); break;
case 'width': return parseFloat(elm.style.width); break;
case 'height': return parseFloat(elm.style.height); break;
case 'scaleX': var transform = parse_transform(elm.style.transform); return transform.scaleX ? parseFloat(transform.scaleX): 1; break;
case 'scaleY': var transform = parse_transform(elm.style.transform); return transform.scaleY ? parseFloat(transform.scaleY): 1; break;
}
},
documentId: function(){
return 'hypeDocument'
}
};
/* fake a HypeDocumentLoad event */
extendHype(hypeDocument);
/* overwrite extentions that need tweaking in IDE enviroment */
hypeDocument.getCurrentSceneElement = function(){
return document.getElementById('HypeMainContentDiv');
}
/* fake a HypeSceneLoad event */
sceneLoad(hypeDocument);
/* temporary workaround as long as the IDE uses zoom on 100% and plus */
var zoomCorrector = function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.type == 'attributes') {
if (mutation.attributeName == 'style') {
var zoom = mutation.target.style.zoom;
if (zoom){
mutation.target.style.zoom = null;
mutation.target.style.transform = 'scale('+zoom+', '+zoom+')';
mutation.target.style.transformOrigin = 'left top';
}
}
}
});
}
/* fix zoom in IDE to only use transforms */
var zoomObserver = new MutationObserver(zoomCorrector);
var HypeSceneEditorElm = document.getElementById('HypeSceneEditor');
zoomObserver.observe(HypeSceneEditorElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "style"]
});
/* trigger an initial zoom event */
zoomCorrector([{
target: HypeSceneEditorElm,
type : 'attributes',
attributeName : 'style'
}]);
/* track changes */
var changeObserver = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
/* detection of removal of attribute data-clip-path in IDE */
if (!mutation.target.hasAttribute('data-clip-path')) {
removeClipPath(mutation.target);
}
});
/* delay because existing observers need to run before being reset */
setTimeout(function(){
sceneUnload(hypeDocument);
sceneLoad(hypeDocument);
},1);
});
/* wait for Hype IDE to add build view */
changeObserver.observe(hypeDocument.getCurrentSceneElement(), {
attributes: true,
attributeOldValue: true,
subtree: true,
attributeFilter: ["data-clip-path"],
});
} else{
/* not Hype IDE or doesn't support clip path so let's set up some rules to help with these legacy browsers */
if (!supportsClipPath()) {
document.styleSheets[0].insertRule('.hideIfClipPathNotSupported {display:none!important;}',0);
document.styleSheets[0].insertRule('.showIfClipPathNotSupported {display:block!important;}',1);
}
}
});
/* IDE preview -- END */
/* setup callbacks */
if (supportsClipPath()){
if("HYPE_eventListeners" in window === false) { window.HYPE_eventListeners = Array();}
window.HYPE_eventListeners.push({"type":"HypeDocumentLoad", "callback": extendHype});
window.HYPE_eventListeners.push({"type":"HypeSceneLoad", "callback": sceneLoad});
window.HYPE_eventListeners.push({"type":"HypeSceneUnload", "callback": sceneUnload});
}
/* Reveal Public interface to window['HypeClipPath'] */
return {
version: '1.7.2',
'supportsClipPath': supportsClipPath,
'setFramesPerSecond': setFramesPerSecond
};
})(); | {
clipPathElm.setAttribute(name, obj.clipPathAttributes[name]);
} | conditional_block |
HypeClipPath.js | /*!
Hype ClipPath 1.7.2
copyright (c) 2021 Max Ziebell, (https://maxziebell.de). MIT-license
*/
/*
* Version-History
* 0.9 (Beta) Initial release under MIT-license
* 1.0 First official release, inverted logic and still a function
* 1.1 With a little refactoring HTML clipping is supported, limitations apply
* 1.2 Fixed a bug determining if it's a SVG or Group
* 1.3 Converted to full extension. Added new methods on hypeDocument
* 1.4 Added live preview in IDE
* 1.5 Fixed some preview issues (nudging, delay)
* 1.6 Fixed some preview issues (zoom, at the cost of antialias)
* 1.7 Using Mutation Observer (not only IDE), debouncing and performance update
* 1.7.1 fixed Safari update bug
* 1.7.2 fixed querySelector bug (thanks to michelangelo)
*/
if("HypeClipPath" in window === false) window['HypeClipPath'] = (function () {
var kSvgNS = 'http://www.w3.org/2000/svg';
/* debounce updates to frames (see sceneLoad) */
var _tickId;
var _tickRunning = false;
var _updatesToRunOnTick = {};
/* @const */
const _isHypeIDE = window.location.href.indexOf("/Hype/Scratch/HypeScratch.") != -1;
var _lookup = {};
/* FPS */
var _FPS;
/* Compability */
var _supportsClipPath = false;
if(window.CSS && window.CSS.supports){
_supportsClipPath = CSS.supports("clip-path", "url(#test)");
}
function supportsClipPath() {
return _supportsClipPath;
}
/* clip path generator function */
function generateClipPath(){
/* create clip and path node */
var clipPathElm = document.createElementNS(kSvgNS, 'clipPath');
var pathElm = document.createElementNS(kSvgNS, 'path');
/* append path data to clip path */
clipPathElm.appendChild(pathElm);
/* return our clip path for further processing */
return clipPathElm;
}
/* clip path update function */
function updateClipPath(clipPathElm, obj){
/* fetch path node */
var pathElm = clipPathElm.querySelector('path');
/* set attributes, transfer path data */
for(var name in obj.pathAttributes){
if (obj.pathAttributes[name]) {
pathElm.setAttribute(name, obj.pathAttributes[name]);
}
}
/* assign unique id to clip path and offset */
for(var name in obj.clipPathAttributes){
if (obj.clipPathAttributes[name]) {
clipPathElm.setAttribute(name, obj.clipPathAttributes[name]);
}
}
}
/* defs generator function */
function generateDefs(){
/* create and return defs node */
var defsElm = document.createElementNS(kSvgNS, 'defs');
return defsElm;
}
/* extend Hype */
function extendHype(hypeDocument, element, event) {
/* init document specific lookup for mutation observer */
var hypeDocId = hypeDocument.documentId();
_lookup[hypeDocId] = {};
/* hypeDocument function to get cached current scene (caching depends on reset of cache in sceneUnload) */
hypeDocument.getCurrentSceneElement = function(){
if (_lookup[hypeDocId]['currentSceneElm']==undefined){
_lookup[hypeDocId]['currentSceneElm'] = document.querySelector('#'+hypeDocument.documentId()+' > .HYPE_scene[style*="block"]');
}
return _lookup[hypeDocId]['currentSceneElm'];
}
/* hypeDocument function to apply ALL clip path in scene (debounced to framerate) */
hypeDocument.applyClipPaths = function(){
/* fetch scene */
var sceneElm = hypeDocument.getCurrentSceneElement();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* loop over candidates */
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
/* hypeDocument function to apply a clip path (debounced to framerate) */
hypeDocument.applyClipPathToElement = function(targetElm){
if (targetElm.dataset.clipPath) {
if (!_updatesToRunOnTick[targetElm.id]) {
_updatesToRunOnTick[targetElm.id] = function(){
/* fetch scene sourceElm */
var sceneElm = hypeDocument.getCurrentSceneElement();
var sourceElm = sceneElm.querySelector(targetElm.dataset.clipPath);
/* if found apply it */
if (sourceElm) {
hypeDocument.generateClipPathForElement(sourceElm, targetElm);
} else {
//remove
removeClipPath(targetElm);
}
}
}
} else {
//remove
removeClipPath(targetElm);
}
}
/* hypeDocument function calculate transforms on a vector element and return an SVG compatible transform string */
/* we can't just clone the transforms from one to another node as SVG transforms have a diffrent logic on the transform origin
If anybody knows a quicker way of doing this please contact me! Source: https://css-tricks.com/transforms-on-svg-elements */
hypeDocument.calculateAndStoreTransformForElement = function(sourceElm){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
/* get offsets */
var sourceLeft = hypeDocument.getElementProperty(sourceElm, 'left');
var sourceTop = hypeDocument.getElementProperty(sourceElm, 'top');
/* offsets */
var offsetX = sourceLeft;
var offsetY = sourceTop;
var originOffsetX = 0;
var originOffsetY = 0;
/* scale */
var sourceScaleX = hypeDocument.getElementProperty(sourceElm, 'scaleX');
var sourceScaleY = hypeDocument.getElementProperty(sourceElm, 'scaleY');
/* rotation */
var sourceRotate = hypeDocument.getElementProperty(sourceElm, 'rotateZ') || 0;
/* store for later use source */
transformLookup[sourceElm.id] = {
'left': sourceLeft,
'top': sourceTop,
'scaleX': sourceScaleX,
'scaleY': sourceScaleY,
'rotateZ': sourceRotate
};
/* transformOrigin */
if (sourceRotate!=0 || sourceScaleX!=1 || sourceScaleY!=1) {
var sourceWidth = hypeDocument.getElementProperty(sourceElm, 'width');
var sourceHeight = hypeDocument.getElementProperty(sourceElm, 'height');
var transformOrigin = (sourceElm.style.transformOrigin) ? String(sourceElm.style.transformOrigin).split(' ') : [50,50];
originOffsetX = sourceWidth * parseFloat(transformOrigin[0]) / 100;
originOffsetY = sourceHeight * parseFloat(transformOrigin[1]) / 100;
}
/* queue transforms using unshift as they are counter intuitive applied in reverse in SVG */
var transform = [];
if (sourceScaleX!=1 || sourceScaleY!=1) {
transform.unshift('scale('+sourceScaleX+' '+sourceScaleY+')');
transform.unshift('translate('+(-originOffsetX*(sourceScaleX-1))+' '+(-originOffsetY*(sourceScaleY-1))+')');
}
if (sourceRotate) {
transform.unshift('rotate('+sourceRotate+' '+originOffsetX+' '+originOffsetY+')');
}
transform.unshift('translate('+offsetX+' '+offsetY+')');
/* return string */
return transform.join(' ');
}
/* hypeDocument function to apply a clip path (attention: not debounced) */
hypeDocument.generateClipPathForElement = function(sourceElm, targetElm){
/* if source and target are defined process them */
if (sourceElm && targetElm) {
/* do stuff if source and target contain SVG */
if (sourceElm.querySelector('svg')) {
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
/* make sure we have a SVG as direct child */
switch (sourceElm.dataset.clipPathStyle) {
/* clip path using url (default) */
default:
var uniqueIdBase = "hype_clip_path_"+targetElm.getAttribute('id')+'_'+sourceElm.getAttribute('id');
var uniqueId = uniqueIdBase+'_'+(Math.ceil(Math.random()*100000+100000));
/* make sure we have a defs section (like on imported SVG from AI) */
if (!sourceElm.querySelector('svg > defs')) {
/* append defs */
sourceElm.querySelector('svg').appendChild(generateDefs());
} else if(_isHypeIDE) {
/* move defs to last position as the Hype IDE has bug and updates first path even if in defs */
sourceElm.querySelector('svg').appendChild(sourceElm.querySelector('svg > defs'));
}
/* append clip path node if needed */
var clipPathElm = sourceElm.querySelector('svg > defs > [id^='+uniqueIdBase+']');
if (!clipPathElm) {
clipPathElm = sourceElm.querySelector('svg > defs').appendChild(generateClipPath());
}
/* update clip path node */
updateClipPath (clipPathElm, {
clipPathAttributes: {
'id': uniqueId,
'shape-rendering': 'optimizeSpeed',
},
pathAttributes: {
'd' : sourceElm.querySelector('svg > path').getAttribute('d'),
'clip-rule': targetElm.dataset.clipPathClipRule,
'transform': hypeDocument.calculateAndStoreTransformForElement(sourceElm),
'shape-rendering': 'optimizeSpeed',
}
});
/* set clip path as CSS style to applyElm being targetElm or targetElm.parentNode */
applyElm.style.webkitClipPath = 'url("#'+uniqueId+'")';
applyElm.style.clipPath = 'url("#'+uniqueId+'")';
/* reverse lookup */
sourceElm.dataset.clipPathSelector = targetElm.dataset.clipPath;
forceRedraw(applyElm);
break;
}
/* as safari doesn't clip outside the bounds on groups let's remind people in chrome */
applyElm.style.overflow = 'hidden';
/* hide source element */
if (!_isHypeIDE && !sourceElm.dataset.clipPathVisible){
sourceElm.style.opacity = 0;
sourceElm.style.pointerEvents = 'none';
}
}
}
}
}
/* function to setup a mutation observer */
function setupObserver (hypeDocument, element, options){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
if (!observerLookup[options.mOiD]) {
observerLookup[options.mOiD] = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
options.callback.call(null, hypeDocument, mutation);
});
});
}
/* start monitoring for related changes */
observerLookup[options.mOiD].observe(element, options);
}
/* callback for a mutation observer on target nodes (on the fly changes of dataset attributes) */
function callbackTargetProps(hypeDocument, mutation){
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'data-clip-path':
if(processingClipPathDemandsUpdate(mutation)){
hypeDocument.applyClipPathToElement(mutation.target);
}
break;
case 'data-clip-path-clip-rule':
hypeDocument.applyClipPathToElement(mutation.target);
break;
}
}
/* callback for a mutation observer on target nodes (montoring transform changes) */
function callbackSourceProps(hypeDocument, mutation){ | /* apply update to targets referenced by this source */
var selector = mutation.target.dataset.clipPathSelector;
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]); /* TODO only update transform not path data */
}
}
break;
}
}
/* callback for a mutation observer on target nodes (path updates) */
function callbackPathProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
switch (mutation.attributeName) {
case 'd':
var selector = mutation.target.parentNode.parentNode.dataset.clipPathSelector;
if (selector) {
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
break;
}
}
/* determine if there are change in style (return boolean) */
function processingStyleDemandsUpdate(hypeDocument, mutation){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
var transform = transformLookup[mutation.target.id];
var update = false;
if (transform) {
for (var prop in transform){
if (transform[prop] != hypeDocument.getElementProperty(mutation.target, prop)){
return true;
}
}
}
return false;
}
/* determine if dataset.clipPath has changes (return boolean) */
function processingClipPathDemandsUpdate(mutation){
/* fetche values */
var newValue = mutation.target.dataset.clipPath;
var oldValue = mutation.oldValue;
/* if they differ act */
if (newValue != oldValue) {
/* is new value set */
if (newValue) {
/* apply new clip path */
return true;
} else {
/* else remove clip path */
removeClipPath(mutation.target)
}
}
return false;
}
function removeClipPath (targetElm){
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
applyElm.style.webkitClipPath = null;
applyElm.style.clipPath = null;
}
var forceRedraw = function(element){
var disp = element.style.display;
element.style.display = 'none';
void 0!=element.offsetHeight;
element.style.display = disp;
};
/* sceneLoad */
function sceneLoad(hypeDocument, element, event) {
/* make sure we have a scene specific storage */
var hypeDocId = hypeDocument.documentId();
/* fetch fresh scene element */
var sceneElm = hypeDocument.getCurrentSceneElement();
if (!_lookup[hypeDocId][sceneElm.id]){
_lookup[hypeDocId][sceneElm.id] = {};
_lookup[hypeDocId][sceneElm.id]['Observer'] = {};
_lookup[hypeDocId][sceneElm.id]['Transform'] = {};
}
/* initial apply */
hypeDocument.applyClipPaths();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* cancel any running ticks */
if (_tickId) window.cancelAnimationFrame(_tickId);
/* loop over candidates if we have any */
if (Object.keys(targetElms).length){
for (var i=0; i < targetElms.length; i++) {
/* initial apply */
//hypeDocument.applyClipPathToElement(targetElms[i]);
/* ignore observer setup is set to static */
if (!targetElms[i].hasAttribute('data-clip-path-static') || _isHypeIDE){
/* observer target (masked element/group) */
setupObserver(hypeDocument, targetElms[i], {
attributes: true,
attributeOldValue: true,
attributeFilter: ['data-clip-path', 'data-clip-path-clip-rule'],
mOiD: targetElms[i].id,
callback: callbackTargetProps
});
/* observer source (mask path) if clipPath is set and found */
if(targetElms[i].dataset.clipPath){
var sourceElm = sceneElm.querySelector(targetElms[i].dataset.clipPath);
if (sourceElm) {
setupObserver(hypeDocument, sourceElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: ['style'],
mOiD: sourceElm.id,
callback: callbackSourceProps
});
var query = _isHypeIDE ? '[hypeobjectid="'+sourceElm.getAttribute('hypeobjectid')+'"] > svg > path' : '#'+sourceElm.id+' > svg > path';
setupObserver(hypeDocument, document.querySelector(query), {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "d"],
mOiD: sourceElm.id+'_path',
callback: callbackPathProps
});
}
}
}
}
/* setup new tick debouncer if needed */
if (_FPS){
var fpsInterval = 1000 / _FPS;
var then = -1000;
var startTime = then;
var tick = function(){
if (!_tickRunning) {
now = performance.now();
elapsed = now - then;
if (elapsed > fpsInterval) {
_tickRunning = true;
then = now - (elapsed % fpsInterval);
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
_tickRunning = false;
}
}
_tickId = window.requestAnimationFrame(tick);
}
} else {
var tick = function(){
if (!_tickRunning) {
tickRunning = true;
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
tickRunning = false;
}
_tickId = window.requestAnimationFrame(tick);
}
}
/* start tick */
if (_isHypeIDE) {
window.requestAnimationFrame(tick);
} else {
tick();
}
}
}
function setFramesPerSecond (FPS){
FPS = parseInt(FPS);
_FPS = (FPS>0 && FPS<60) ? FPS : null;
}
/* sceneUnload */
function sceneUnload(hypeDocument, element, event) {
/* disconnect mutation observer */
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
for (var mOiD in observerLookup) {
observerLookup[mOiD].disconnect();
if(_isHypeIDE) delete(observerLookup[mOiD]);
}
/* delete cache version so a new one is generated */
delete _lookup[hypeDocId]['currentSceneElm'];
}
/* parse transforms helper for IDE */
function parse_transform(a) {
var b = {};
for (var i in a = a.match(/(\w+)\(([^,)]+),?([^)]+)?\)/gi)) {
var c = a[i].match(/[\w\.\-]+/g);
b[c.shift()] = c;
}
return b;
}
/* IDE preview -- START */
window.addEventListener("DOMContentLoaded", function(event) {
if (_isHypeIDE && supportsClipPath()) {
/* make a fake hypeDocument (IDE) version */
var hypeDocument = {
getElementProperty: function(elm, prop){
switch (prop){ /* TODO WebkitMatrix lookups (although they are influenced by rotation) rather use upcoming hypeattributescalex/y */
case 'left': return parseFloat(elm.getAttribute('hypeattributeleft')); break;
case 'top': return parseFloat(elm.getAttribute('hypeattributetop')); break;
case 'rotateZ': return parseFloat(elm.getAttribute('hypeattributerotationanglez')); break;
case 'width': return parseFloat(elm.style.width); break;
case 'height': return parseFloat(elm.style.height); break;
case 'scaleX': var transform = parse_transform(elm.style.transform); return transform.scaleX ? parseFloat(transform.scaleX): 1; break;
case 'scaleY': var transform = parse_transform(elm.style.transform); return transform.scaleY ? parseFloat(transform.scaleY): 1; break;
}
},
documentId: function(){
return 'hypeDocument'
}
};
/* fake a HypeDocumentLoad event */
extendHype(hypeDocument);
/* overwrite extentions that need tweaking in IDE enviroment */
hypeDocument.getCurrentSceneElement = function(){
return document.getElementById('HypeMainContentDiv');
}
/* fake a HypeSceneLoad event */
sceneLoad(hypeDocument);
/* temporary workaround as long as the IDE uses zoom on 100% and plus */
var zoomCorrector = function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.type == 'attributes') {
if (mutation.attributeName == 'style') {
var zoom = mutation.target.style.zoom;
if (zoom){
mutation.target.style.zoom = null;
mutation.target.style.transform = 'scale('+zoom+', '+zoom+')';
mutation.target.style.transformOrigin = 'left top';
}
}
}
});
}
/* fix zoom in IDE to only use transforms */
var zoomObserver = new MutationObserver(zoomCorrector);
var HypeSceneEditorElm = document.getElementById('HypeSceneEditor');
zoomObserver.observe(HypeSceneEditorElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "style"]
});
/* trigger an initial zoom event */
zoomCorrector([{
target: HypeSceneEditorElm,
type : 'attributes',
attributeName : 'style'
}]);
/* track changes */
var changeObserver = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
/* detection of removal of attribute data-clip-path in IDE */
if (!mutation.target.hasAttribute('data-clip-path')) {
removeClipPath(mutation.target);
}
});
/* delay because existing observers need to run before being reset */
setTimeout(function(){
sceneUnload(hypeDocument);
sceneLoad(hypeDocument);
},1);
});
/* wait for Hype IDE to add build view */
changeObserver.observe(hypeDocument.getCurrentSceneElement(), {
attributes: true,
attributeOldValue: true,
subtree: true,
attributeFilter: ["data-clip-path"],
});
} else{
/* not Hype IDE or doesn't support clip path so let's set up some rules to help with these legacy browsers */
if (!supportsClipPath()) {
document.styleSheets[0].insertRule('.hideIfClipPathNotSupported {display:none!important;}',0);
document.styleSheets[0].insertRule('.showIfClipPathNotSupported {display:block!important;}',1);
}
}
});
/* IDE preview -- END */
/* setup callbacks */
if (supportsClipPath()){
if("HYPE_eventListeners" in window === false) { window.HYPE_eventListeners = Array();}
window.HYPE_eventListeners.push({"type":"HypeDocumentLoad", "callback": extendHype});
window.HYPE_eventListeners.push({"type":"HypeSceneLoad", "callback": sceneLoad});
window.HYPE_eventListeners.push({"type":"HypeSceneUnload", "callback": sceneUnload});
}
/* Reveal Public interface to window['HypeClipPath'] */
return {
version: '1.7.2',
'supportsClipPath': supportsClipPath,
'setFramesPerSecond': setFramesPerSecond
};
})(); | var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'style':
if (processingStyleDemandsUpdate(hypeDocument, mutation)){ | random_line_split |
HypeClipPath.js | /*!
Hype ClipPath 1.7.2
copyright (c) 2021 Max Ziebell, (https://maxziebell.de). MIT-license
*/
/*
* Version-History
* 0.9 (Beta) Initial release under MIT-license
* 1.0 First official release, inverted logic and still a function
* 1.1 With a little refactoring HTML clipping is supported, limitations apply
* 1.2 Fixed a bug determining if it's a SVG or Group
* 1.3 Converted to full extension. Added new methods on hypeDocument
* 1.4 Added live preview in IDE
* 1.5 Fixed some preview issues (nudging, delay)
* 1.6 Fixed some preview issues (zoom, at the cost of antialias)
* 1.7 Using Mutation Observer (not only IDE), debouncing and performance update
* 1.7.1 fixed Safari update bug
* 1.7.2 fixed querySelector bug (thanks to michelangelo)
*/
if("HypeClipPath" in window === false) window['HypeClipPath'] = (function () {
var kSvgNS = 'http://www.w3.org/2000/svg';
/* debounce updates to frames (see sceneLoad) */
var _tickId;
var _tickRunning = false;
var _updatesToRunOnTick = {};
/* @const */
const _isHypeIDE = window.location.href.indexOf("/Hype/Scratch/HypeScratch.") != -1;
var _lookup = {};
/* FPS */
var _FPS;
/* Compability */
var _supportsClipPath = false;
if(window.CSS && window.CSS.supports){
_supportsClipPath = CSS.supports("clip-path", "url(#test)");
}
function supportsClipPath() {
return _supportsClipPath;
}
/* clip path generator function */
function generateClipPath(){
/* create clip and path node */
var clipPathElm = document.createElementNS(kSvgNS, 'clipPath');
var pathElm = document.createElementNS(kSvgNS, 'path');
/* append path data to clip path */
clipPathElm.appendChild(pathElm);
/* return our clip path for further processing */
return clipPathElm;
}
/* clip path update function */
function updateClipPath(clipPathElm, obj){
/* fetch path node */
var pathElm = clipPathElm.querySelector('path');
/* set attributes, transfer path data */
for(var name in obj.pathAttributes){
if (obj.pathAttributes[name]) {
pathElm.setAttribute(name, obj.pathAttributes[name]);
}
}
/* assign unique id to clip path and offset */
for(var name in obj.clipPathAttributes){
if (obj.clipPathAttributes[name]) {
clipPathElm.setAttribute(name, obj.clipPathAttributes[name]);
}
}
}
/* defs generator function */
function generateDefs(){
/* create and return defs node */
var defsElm = document.createElementNS(kSvgNS, 'defs');
return defsElm;
}
/* extend Hype */
function extendHype(hypeDocument, element, event) {
/* init document specific lookup for mutation observer */
var hypeDocId = hypeDocument.documentId();
_lookup[hypeDocId] = {};
/* hypeDocument function to get cached current scene (caching depends on reset of cache in sceneUnload) */
hypeDocument.getCurrentSceneElement = function(){
if (_lookup[hypeDocId]['currentSceneElm']==undefined){
_lookup[hypeDocId]['currentSceneElm'] = document.querySelector('#'+hypeDocument.documentId()+' > .HYPE_scene[style*="block"]');
}
return _lookup[hypeDocId]['currentSceneElm'];
}
/* hypeDocument function to apply ALL clip path in scene (debounced to framerate) */
hypeDocument.applyClipPaths = function(){
/* fetch scene */
var sceneElm = hypeDocument.getCurrentSceneElement();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* loop over candidates */
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
/* hypeDocument function to apply a clip path (debounced to framerate) */
hypeDocument.applyClipPathToElement = function(targetElm){
if (targetElm.dataset.clipPath) {
if (!_updatesToRunOnTick[targetElm.id]) {
_updatesToRunOnTick[targetElm.id] = function(){
/* fetch scene sourceElm */
var sceneElm = hypeDocument.getCurrentSceneElement();
var sourceElm = sceneElm.querySelector(targetElm.dataset.clipPath);
/* if found apply it */
if (sourceElm) {
hypeDocument.generateClipPathForElement(sourceElm, targetElm);
} else {
//remove
removeClipPath(targetElm);
}
}
}
} else {
//remove
removeClipPath(targetElm);
}
}
/* hypeDocument function calculate transforms on a vector element and return an SVG compatible transform string */
/* we can't just clone the transforms from one to another node as SVG transforms have a diffrent logic on the transform origin
If anybody knows a quicker way of doing this please contact me! Source: https://css-tricks.com/transforms-on-svg-elements */
hypeDocument.calculateAndStoreTransformForElement = function(sourceElm){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
/* get offsets */
var sourceLeft = hypeDocument.getElementProperty(sourceElm, 'left');
var sourceTop = hypeDocument.getElementProperty(sourceElm, 'top');
/* offsets */
var offsetX = sourceLeft;
var offsetY = sourceTop;
var originOffsetX = 0;
var originOffsetY = 0;
/* scale */
var sourceScaleX = hypeDocument.getElementProperty(sourceElm, 'scaleX');
var sourceScaleY = hypeDocument.getElementProperty(sourceElm, 'scaleY');
/* rotation */
var sourceRotate = hypeDocument.getElementProperty(sourceElm, 'rotateZ') || 0;
/* store for later use source */
transformLookup[sourceElm.id] = {
'left': sourceLeft,
'top': sourceTop,
'scaleX': sourceScaleX,
'scaleY': sourceScaleY,
'rotateZ': sourceRotate
};
/* transformOrigin */
if (sourceRotate!=0 || sourceScaleX!=1 || sourceScaleY!=1) {
var sourceWidth = hypeDocument.getElementProperty(sourceElm, 'width');
var sourceHeight = hypeDocument.getElementProperty(sourceElm, 'height');
var transformOrigin = (sourceElm.style.transformOrigin) ? String(sourceElm.style.transformOrigin).split(' ') : [50,50];
originOffsetX = sourceWidth * parseFloat(transformOrigin[0]) / 100;
originOffsetY = sourceHeight * parseFloat(transformOrigin[1]) / 100;
}
/* queue transforms using unshift as they are counter intuitive applied in reverse in SVG */
var transform = [];
if (sourceScaleX!=1 || sourceScaleY!=1) {
transform.unshift('scale('+sourceScaleX+' '+sourceScaleY+')');
transform.unshift('translate('+(-originOffsetX*(sourceScaleX-1))+' '+(-originOffsetY*(sourceScaleY-1))+')');
}
if (sourceRotate) {
transform.unshift('rotate('+sourceRotate+' '+originOffsetX+' '+originOffsetY+')');
}
transform.unshift('translate('+offsetX+' '+offsetY+')');
/* return string */
return transform.join(' ');
}
/* hypeDocument function to apply a clip path (attention: not debounced) */
hypeDocument.generateClipPathForElement = function(sourceElm, targetElm){
/* if source and target are defined process them */
if (sourceElm && targetElm) {
/* do stuff if source and target contain SVG */
if (sourceElm.querySelector('svg')) {
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
/* make sure we have a SVG as direct child */
switch (sourceElm.dataset.clipPathStyle) {
/* clip path using url (default) */
default:
var uniqueIdBase = "hype_clip_path_"+targetElm.getAttribute('id')+'_'+sourceElm.getAttribute('id');
var uniqueId = uniqueIdBase+'_'+(Math.ceil(Math.random()*100000+100000));
/* make sure we have a defs section (like on imported SVG from AI) */
if (!sourceElm.querySelector('svg > defs')) {
/* append defs */
sourceElm.querySelector('svg').appendChild(generateDefs());
} else if(_isHypeIDE) {
/* move defs to last position as the Hype IDE has bug and updates first path even if in defs */
sourceElm.querySelector('svg').appendChild(sourceElm.querySelector('svg > defs'));
}
/* append clip path node if needed */
var clipPathElm = sourceElm.querySelector('svg > defs > [id^='+uniqueIdBase+']');
if (!clipPathElm) {
clipPathElm = sourceElm.querySelector('svg > defs').appendChild(generateClipPath());
}
/* update clip path node */
updateClipPath (clipPathElm, {
clipPathAttributes: {
'id': uniqueId,
'shape-rendering': 'optimizeSpeed',
},
pathAttributes: {
'd' : sourceElm.querySelector('svg > path').getAttribute('d'),
'clip-rule': targetElm.dataset.clipPathClipRule,
'transform': hypeDocument.calculateAndStoreTransformForElement(sourceElm),
'shape-rendering': 'optimizeSpeed',
}
});
/* set clip path as CSS style to applyElm being targetElm or targetElm.parentNode */
applyElm.style.webkitClipPath = 'url("#'+uniqueId+'")';
applyElm.style.clipPath = 'url("#'+uniqueId+'")';
/* reverse lookup */
sourceElm.dataset.clipPathSelector = targetElm.dataset.clipPath;
forceRedraw(applyElm);
break;
}
/* as safari doesn't clip outside the bounds on groups let's remind people in chrome */
applyElm.style.overflow = 'hidden';
/* hide source element */
if (!_isHypeIDE && !sourceElm.dataset.clipPathVisible){
sourceElm.style.opacity = 0;
sourceElm.style.pointerEvents = 'none';
}
}
}
}
}
/* function to setup a mutation observer */
function setupObserver (hypeDocument, element, options){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
if (!observerLookup[options.mOiD]) {
observerLookup[options.mOiD] = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
options.callback.call(null, hypeDocument, mutation);
});
});
}
/* start monitoring for related changes */
observerLookup[options.mOiD].observe(element, options);
}
/* callback for a mutation observer on target nodes (on the fly changes of dataset attributes) */
function callbackTargetProps(hypeDocument, mutation){
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'data-clip-path':
if(processingClipPathDemandsUpdate(mutation)){
hypeDocument.applyClipPathToElement(mutation.target);
}
break;
case 'data-clip-path-clip-rule':
hypeDocument.applyClipPathToElement(mutation.target);
break;
}
}
/* callback for a mutation observer on target nodes (montoring transform changes) */
function callbackSourceProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
/* clip path attribute was mingled with */
switch (mutation.attributeName) {
case 'style':
if (processingStyleDemandsUpdate(hypeDocument, mutation)){
/* apply update to targets referenced by this source */
var selector = mutation.target.dataset.clipPathSelector;
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]); /* TODO only update transform not path data */
}
}
break;
}
}
/* callback for a mutation observer on target nodes (path updates) */
function callbackPathProps(hypeDocument, mutation){
var sceneElm = _isHypeIDE? document : hypeDocument.getCurrentSceneElement();
switch (mutation.attributeName) {
case 'd':
var selector = mutation.target.parentNode.parentNode.dataset.clipPathSelector;
if (selector) {
var targetElms = sceneElm.querySelectorAll ('[data-clip-path="'+selector+'"]');
for (var i=0; i < targetElms.length; i++) {
hypeDocument.applyClipPathToElement(targetElms[i]);
}
}
break;
}
}
/* determine if there are change in style (return boolean) */
function processingStyleDemandsUpdate(hypeDocument, mutation){
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var transformLookup = _lookup[hypeDocId][sceneElm.id]['Transform'];
var transform = transformLookup[mutation.target.id];
var update = false;
if (transform) {
for (var prop in transform){
if (transform[prop] != hypeDocument.getElementProperty(mutation.target, prop)){
return true;
}
}
}
return false;
}
/* determine if dataset.clipPath has changes (return boolean) */
function processingClipPathDemandsUpdate(mutation){
/* fetche values */
var newValue = mutation.target.dataset.clipPath;
var oldValue = mutation.oldValue;
/* if they differ act */
if (newValue != oldValue) {
/* is new value set */
if (newValue) {
/* apply new clip path */
return true;
} else {
/* else remove clip path */
removeClipPath(mutation.target)
}
}
return false;
}
function removeClipPath (targetElm){
var applyElm = targetElm.classList.contains('HYPE_element_container') ? targetElm : targetElm.parentNode;
applyElm.style.webkitClipPath = null;
applyElm.style.clipPath = null;
}
var forceRedraw = function(element){
var disp = element.style.display;
element.style.display = 'none';
void 0!=element.offsetHeight;
element.style.display = disp;
};
/* sceneLoad */
function sceneLoad(hypeDocument, element, event) |
function setFramesPerSecond (FPS){
FPS = parseInt(FPS);
_FPS = (FPS>0 && FPS<60) ? FPS : null;
}
/* sceneUnload */
function sceneUnload(hypeDocument, element, event) {
/* disconnect mutation observer */
var hypeDocId = hypeDocument.documentId();
var sceneElm = hypeDocument.getCurrentSceneElement();
var observerLookup = _lookup[hypeDocId][sceneElm.id]['Observer'];
for (var mOiD in observerLookup) {
observerLookup[mOiD].disconnect();
if(_isHypeIDE) delete(observerLookup[mOiD]);
}
/* delete cache version so a new one is generated */
delete _lookup[hypeDocId]['currentSceneElm'];
}
/* parse transforms helper for IDE */
function parse_transform(a) {
var b = {};
for (var i in a = a.match(/(\w+)\(([^,)]+),?([^)]+)?\)/gi)) {
var c = a[i].match(/[\w\.\-]+/g);
b[c.shift()] = c;
}
return b;
}
/* IDE preview -- START */
window.addEventListener("DOMContentLoaded", function(event) {
if (_isHypeIDE && supportsClipPath()) {
/* make a fake hypeDocument (IDE) version */
var hypeDocument = {
getElementProperty: function(elm, prop){
switch (prop){ /* TODO WebkitMatrix lookups (although they are influenced by rotation) rather use upcoming hypeattributescalex/y */
case 'left': return parseFloat(elm.getAttribute('hypeattributeleft')); break;
case 'top': return parseFloat(elm.getAttribute('hypeattributetop')); break;
case 'rotateZ': return parseFloat(elm.getAttribute('hypeattributerotationanglez')); break;
case 'width': return parseFloat(elm.style.width); break;
case 'height': return parseFloat(elm.style.height); break;
case 'scaleX': var transform = parse_transform(elm.style.transform); return transform.scaleX ? parseFloat(transform.scaleX): 1; break;
case 'scaleY': var transform = parse_transform(elm.style.transform); return transform.scaleY ? parseFloat(transform.scaleY): 1; break;
}
},
documentId: function(){
return 'hypeDocument'
}
};
/* fake a HypeDocumentLoad event */
extendHype(hypeDocument);
/* overwrite extentions that need tweaking in IDE enviroment */
hypeDocument.getCurrentSceneElement = function(){
return document.getElementById('HypeMainContentDiv');
}
/* fake a HypeSceneLoad event */
sceneLoad(hypeDocument);
/* temporary workaround as long as the IDE uses zoom on 100% and plus */
var zoomCorrector = function(mutations) {
mutations.forEach(function(mutation) {
if (mutation.type == 'attributes') {
if (mutation.attributeName == 'style') {
var zoom = mutation.target.style.zoom;
if (zoom){
mutation.target.style.zoom = null;
mutation.target.style.transform = 'scale('+zoom+', '+zoom+')';
mutation.target.style.transformOrigin = 'left top';
}
}
}
});
}
/* fix zoom in IDE to only use transforms */
var zoomObserver = new MutationObserver(zoomCorrector);
var HypeSceneEditorElm = document.getElementById('HypeSceneEditor');
zoomObserver.observe(HypeSceneEditorElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "style"]
});
/* trigger an initial zoom event */
zoomCorrector([{
target: HypeSceneEditorElm,
type : 'attributes',
attributeName : 'style'
}]);
/* track changes */
var changeObserver = new MutationObserver(function(mutations) {
mutations.forEach(function(mutation) {
/* detection of removal of attribute data-clip-path in IDE */
if (!mutation.target.hasAttribute('data-clip-path')) {
removeClipPath(mutation.target);
}
});
/* delay because existing observers need to run before being reset */
setTimeout(function(){
sceneUnload(hypeDocument);
sceneLoad(hypeDocument);
},1);
});
/* wait for Hype IDE to add build view */
changeObserver.observe(hypeDocument.getCurrentSceneElement(), {
attributes: true,
attributeOldValue: true,
subtree: true,
attributeFilter: ["data-clip-path"],
});
} else{
/* not Hype IDE or doesn't support clip path so let's set up some rules to help with these legacy browsers */
if (!supportsClipPath()) {
document.styleSheets[0].insertRule('.hideIfClipPathNotSupported {display:none!important;}',0);
document.styleSheets[0].insertRule('.showIfClipPathNotSupported {display:block!important;}',1);
}
}
});
/* IDE preview -- END */
/* setup callbacks */
if (supportsClipPath()){
if("HYPE_eventListeners" in window === false) { window.HYPE_eventListeners = Array();}
window.HYPE_eventListeners.push({"type":"HypeDocumentLoad", "callback": extendHype});
window.HYPE_eventListeners.push({"type":"HypeSceneLoad", "callback": sceneLoad});
window.HYPE_eventListeners.push({"type":"HypeSceneUnload", "callback": sceneUnload});
}
/* Reveal Public interface to window['HypeClipPath'] */
return {
version: '1.7.2',
'supportsClipPath': supportsClipPath,
'setFramesPerSecond': setFramesPerSecond
};
})(); | {
/* make sure we have a scene specific storage */
var hypeDocId = hypeDocument.documentId();
/* fetch fresh scene element */
var sceneElm = hypeDocument.getCurrentSceneElement();
if (!_lookup[hypeDocId][sceneElm.id]){
_lookup[hypeDocId][sceneElm.id] = {};
_lookup[hypeDocId][sceneElm.id]['Observer'] = {};
_lookup[hypeDocId][sceneElm.id]['Transform'] = {};
}
/* initial apply */
hypeDocument.applyClipPaths();
/* fetch candidates and loop over them */
var targetElms = sceneElm.querySelectorAll('[data-clip-path]');
/* cancel any running ticks */
if (_tickId) window.cancelAnimationFrame(_tickId);
/* loop over candidates if we have any */
if (Object.keys(targetElms).length){
for (var i=0; i < targetElms.length; i++) {
/* initial apply */
//hypeDocument.applyClipPathToElement(targetElms[i]);
/* ignore observer setup is set to static */
if (!targetElms[i].hasAttribute('data-clip-path-static') || _isHypeIDE){
/* observer target (masked element/group) */
setupObserver(hypeDocument, targetElms[i], {
attributes: true,
attributeOldValue: true,
attributeFilter: ['data-clip-path', 'data-clip-path-clip-rule'],
mOiD: targetElms[i].id,
callback: callbackTargetProps
});
/* observer source (mask path) if clipPath is set and found */
if(targetElms[i].dataset.clipPath){
var sourceElm = sceneElm.querySelector(targetElms[i].dataset.clipPath);
if (sourceElm) {
setupObserver(hypeDocument, sourceElm, {
attributes: true,
attributeOldValue: true,
attributeFilter: ['style'],
mOiD: sourceElm.id,
callback: callbackSourceProps
});
var query = _isHypeIDE ? '[hypeobjectid="'+sourceElm.getAttribute('hypeobjectid')+'"] > svg > path' : '#'+sourceElm.id+' > svg > path';
setupObserver(hypeDocument, document.querySelector(query), {
attributes: true,
attributeOldValue: true,
attributeFilter: [ "d"],
mOiD: sourceElm.id+'_path',
callback: callbackPathProps
});
}
}
}
}
/* setup new tick debouncer if needed */
if (_FPS){
var fpsInterval = 1000 / _FPS;
var then = -1000;
var startTime = then;
var tick = function(){
if (!_tickRunning) {
now = performance.now();
elapsed = now - then;
if (elapsed > fpsInterval) {
_tickRunning = true;
then = now - (elapsed % fpsInterval);
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
_tickRunning = false;
}
}
_tickId = window.requestAnimationFrame(tick);
}
} else {
var tick = function(){
if (!_tickRunning) {
tickRunning = true;
for (var id in _updatesToRunOnTick) {
_updatesToRunOnTick[id]();
}
_updatesToRunOnTick = {};
tickRunning = false;
}
_tickId = window.requestAnimationFrame(tick);
}
}
/* start tick */
if (_isHypeIDE) {
window.requestAnimationFrame(tick);
} else {
tick();
}
}
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.