file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
optimizer-output.component.ts | import { Router } from '@angular/router';
import { Sort } from '@angular/material/sort';
import { FormControl } from '@angular/forms';
import { Component, EventEmitter, OnInit, Output, ViewChild } from '@angular/core';
import { MatTableDataSource } from '@angular/material/table';
import { SelectionModel } from '@angular/cdk/collections';
import { NgbModal, NgbModalOptions } from '@ng-bootstrap/ng-bootstrap';
import {ModalDismissReasons} from '@ng-bootstrap/ng-bootstrap';
import { MatPaginator } from '@angular/material/paginator';
import { groupByJson } from '../../planner/scenario-planning/scenario-planning.component';
import { Input } from '@angular/core';
import { Angular5Csv } from 'angular5-csv/dist/Angular5-csv';
import { ScenarioPlannerService } from '../../backend-services/scenario-planner.service';
import * as Notiflix from 'notiflix';
import { environment } from 'src/environments/environment';
import { DataControllerService } from 'src/app/base/data-controller/data-controller.service';
import {
trigger,
state,
style,
animate,
transition
} from "@angular/animations";
export interface ScenarioPlanner {
product_tpn: number;
total_incremental_sales: string;
total_activation_cost:number;
pack_type: string;
product_name:string;
activation_type:string;
processed_lift: number;
}
export interface ScenarioPlannerConstraint {
pack_type:string
fsi: boolean;
fai: boolean;
search: boolean;
sot: boolean;
bpp: boolean;
}
Notiflix.Notify.init({
position:'right-bottom',
timeout:3000
})
@Component({
selector: 'app-optimizer-output',
templateUrl: './optimizer-output.component.html',
styleUrls: ['./optimizer-output.component.scss'],
animations: [
trigger("changeDivSize", [
state(
"initial",
style({
backgroundColor: "green",
width: "100px",
height: "100px"
})
),
state(
"final",
style({
backgroundColor: "red",
width: "200px",
height: "200px"
})
),
transition("initial=>final", animate("1500ms")),
transition("final=>initial", animate("1000ms"))
]),
trigger("balloonEffect", [
state(
"initial",
style({
backgroundColor: "green",
transform: "scale(1)"
})
),
state(
"final",
style({
backgroundColor: "red",
transform: "scale(1.5)"
})
),
transition("final=>initial", animate("1000ms")),
transition("initial=>final", animate("1500ms"))
]),
trigger("fadeInOut", [
state(
"void",
style({
opacity: 0
})
),
transition("void <=> *", animate(1000))
]),
trigger("EnterLeave", [
state("flyIn", style({ transform: "translateX(0)" })),
transition(":enter", [
style({ transform: "translateX(-100%)" }),
animate("0.5s 300ms ease-in")
]),
transition(":leave", [
animate("0.3s ease-out", style({ transform: "translateX(100%)" }))
])
])
]
})
export class OptimizerOutputComponent implements OnInit {
response_data:any;
SOURCE: any;
valueSelected:any=0;
modalOptions:NgbModalOptions | undefined;
filterData: any;
defaultData:any
datastream:any;
reload1: boolean=true;
TATSPack_ARRAY: any=[];
currencySymbol: any;
optimizedLift:any=0;
totalLift:any=0;
incremantalCSV: number=0;
totalscvROAS:number=0;
totalActivationCost:number=0;
Ratecardjson: any;
budgetConstraintSubscribe: any;
totalBudget: any=0;
| (private modalService: NgbModal,
private dataservice:DataControllerService,
private routes:Router,private apiServices:ScenarioPlannerService) {
// console.log(this.route.getCurrentNavigation()?.extras.state);
this.datastream=this.routes.getCurrentNavigation()?.extras.state;
this.currencySymbol=environment.currencySymbol;
this.modalOptions = {
backdrop:'static',
backdropClass:'customBackdrop'
}
};
ELEMENT_DATA: ScenarioPlanner[] = [];
activationLIB:any={};
TATS:any={};
packTypeList:any;
TATS_ARRAY:any=[];
DynActivationColumns:any=[];
TATS_BY_PACK:any={};
Chartpoints_pla_rev:any={};
FileName:string='';
activationLIBSelected:any={};
binaryOption=[
{id: 'Yes', name: "Yes"},
{id: 'No', name: "No"},];
reload:boolean=true;
ELEMENT_DATA_CONSTRAINTS:any=[];
//displayedColumnsConstraints: string[] = ['pack_type','fsi', 'fai','search', 'sot', 'bpp'];
//dataSourceConstraints = new MatTableDataSource<ScenarioPlannerConstraint>(this.ELEMENT_DATA_CONSTRAINTS);
PlacementLabel:any=[];
@Input() dataSetLabel:any=[ 'FAI', 'FSI', 'SOT', 'BBP','Search'];
@Input() dataSet:any={ data: [0, 0, 0, 0, 0],
title: {
text: 'Incremental Revenue by Placements',
display: true
} };
dataSetLabel1:any=[];
saveList:any=[{'name':'SELECT','id':0},
{'name':'Load1','id':1}]
selectedplacementTypes='';
dataSet1:any={ data: [], label: 'Expected Lift by Pack type' };
//'total_activation_cost','total_incremental_sales','processed_lift'
displayedColumns: string[] = ['pack_sub_type','pack_type','activation_type','total_activation_cost','total_incremental_sales','csv_roas','processed_lift',];
dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
selection = new SelectionModel<ScenarioPlanner>(true, []);
sortedData: ScenarioPlanner[]=[];
selectedData:any=[];
skuList: ScenarioPlanner[] = [];
activityType: ScenarioPlanner[] = [];
activityLift:any = '';
activityROI:any = '';
renderedData: any;
closeModal: any;
liftSliderValue:any = [5,60];
roiSliderValue:any = [5,40];
groupedOnPackType=[];
// Configuration for the filters
skuSelected:any = [];
placementTypes = new FormControl();
//segment
Segment = new FormControl();
segmentList: any[] = [];
selectedSegmentList: any = [];
constraint_list=[]
ngOnInit(): void {
Notiflix.Loading.dots('Loading...');
this.budgetConstraintSubscribe = this.dataservice.BudgetConstraintOb.subscribe((constraint:any) => {
if(constraint){
this.totalBudget=constraint['total'];
}
console.log(constraint,"constraintz");
console.log(this.totalBudget,"totalbudget")
});
this.apiServices.getActivationList().subscribe((res:any)=>{
console.log(res,"RES");
Notiflix.Loading.remove();
if(res.code==200){
this.DynActivationColumns=res.data;
for(let [key,value] of Object.entries(res.data)){
let values:any=value;
this.activationLIB[values.value]=values.name;
this.PlacementLabel.push(values.name);
}
if(this.datastream){
this.SOURCE=this.datastream.source
if(this.datastream.source=='from_opt_activation'){
this.ELEMENT_DATA_CONSTRAINTS=this.datastream.data[0] || [];
this.selectedData=this.datastream.data[1] || [];
this.response_data=this.datastream.data[2] || [];
this.filterData=this.datastream.data[3] || [];
this.defaultData=this.datastream.data[3] || [];
this.Ratecardjson=this.datastream.data[4] || [];
this.ELEMENT_DATA_CONSTRAINTS.forEach((element:any) => {
let itemlist=[];
for( const [key,value] of Object.entries(element)){
if((value) && (this.activationLIB[key]!=undefined)){
itemlist.push(this.activationLIB[key]);
}
}
this.activationLIBSelected[element.pack_type]=itemlist;
});
}
this.ELEMENT_DATA=this.filterData;
this.ngAfterViewInit();
this.getSavedData();
this.groupedOnPackType=groupByJson(this.filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
this.chartInit(this.ELEMENT_DATA);
}else{
this.routes.navigate(['/planner']);
}
}
});
}
@ViewChild(MatPaginator) paginator: any;
ngAfterViewInit() {
console.log(this.ELEMENT_DATA,"this.ELEMENT_DATA__");
this.ELEMENT_DATA=this.ELEMENT_DATA.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
this.ELEMENT_DATA.forEach((element:any) => {
element['csv_roas']=((element.total_incremental_sales/element.total_activation_cost)*100).toFixed()
});
this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
this.dataSource.paginator = this.paginator;
this.dataSource.connect().subscribe(d => {
this.renderedData = d});
}
// File Reader ( EXCEL OR CSV) to JSON Format
// Input Handler for the promocode upload
async testData(event:any){
// let promoList:any=await this.onFileChange(event);
// let FilteredSet=promoList['sheet1'];
// this.ELEMENT_DATA=FilteredSet;
// this.dataSource= new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
// this.ngAfterViewInit();
}
saveScenarioTrigger(content:any) {
this.modalService.open(content, this.modalOptions).result.then((result) => {
});
}
deleteSavedList(){
let that=this;
Notiflix.Confirm.show('Confirm Delete','Are you sure you want to delete this item?','Yes','No',
()=>{
//scenario_planner_listdelete
this.apiServices.scenario_planner_listdelete(this.valueSelected).subscribe((res:any)=>{
if(res.code==200 && res.status=='success'){
that.getSavedData();
Notiflix.Notify.success('Deleted Successfully ! ');
}
});
});
}
LoadSaveList(){
this.incremantalCSV=0;
this.totalActivationCost=0;
this.totalscvROAS=0;
if(this.valueSelected!=0){
//load data
Notiflix.Loading.dots('Loading...');
this.apiServices.scenario_planner_listdetails(this.valueSelected).subscribe((res:any)=>{
console.log(res,"listDetails");
Notiflix.Loading.remove();
let response=res;
if(res.code==200 && res.status=='success'){
this.resetFilter();
let filterData:any=response['data'][0].json_data;
this.groupedOnPackType=groupByJson(filterData,'pack_type');
this.segmentList=Object.keys(this.groupedOnPackType);
this.selectedSegmentList = this.segmentList;
filterData = filterData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
console.log(to_find,"to_find");
filterData=recursiveFind(filterData,to_find);
console.log(to_find,"to_find")
}
filterData=filterData.sort((a:any, b:any) => b.processed_lift - a.processed_lift);
console.log(filterData,"filterData");
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
Notiflix.Notify.success('Senario is loaded successfully !!!');
this.filterData=filterData;
this.modalService.dismissAll();
}
});
}else{
//load default data
let filterData:any = this.defaultData.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
this.modalService.dismissAll();
}
}
saveScenario(){
let planner_type='';
if(this.SOURCE=='from_opt_activation'){
planner_type='optimizer';
}else{
planner_type='simulation'
}
let payload={
"name":this.FileName,
"json_data":this.filterData,
"planner_type":planner_type
}
if(this.FileName.trim()!=''){
this.apiServices.scenario_planner_simulate_save(payload).subscribe((res:any)=>{
console.log(res,"res")
if(res.code==200){
this.modalService.dismissAll();
Notiflix.Notify.success('Simulation is Saved Successfully');
this.getSavedData();
this.FileName='';
}else{
if(res.status=='Failed'){
Notiflix.Notify.failure('Failed to save record');
}
}
});
}else{
Notiflix.Notify.failure('Please Enter The Scenario Name')
}
}
getSavedData(){
this.apiServices.scenario_planner_list().subscribe((res:any) =>{
console.log(res,"scenatio_list");
this.saveList=[];
if(res.code==200 && res.status=='success'){
if(this.SOURCE=='from_opt_activation'){
// planner_type='optimizer';
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['optimizer']);
}else{
// planner_type='simulation'
//this.saveList=res.data['simulation'];
this.saveList=[{'name':'Default','id':0}];
this.saveList.push(...res.data['simulation']);
}
}
console.log(this.saveList,"saveList");
});
}
getpackTypeList(filterData:any,byPacktype:any){
this.TATS_ARRAY=[];
for(let [key,value] of Object.entries(this.activationLIB)){
this.TATS_ARRAY.push({'name':value,'value':this.TATS[key]})
}
this.TATSPack_ARRAY=[];
if(this.packTypeList){
for(let [key,value] of Object.entries(this.packTypeList)){
let values:any=value;
this.TATSPack_ARRAY.push({'name':values.name,'value':this.TATS[key]})
}
for(let [key,value] of Object.entries(byPacktype)){
let lvalue:any=value;
this.TATS_BY_PACK[key.toLowerCase()]=lvalue.length;
}
}
}
downloadProducts(){
let filename="Scenario-Planner - OPTIMIZER"
var options = {
fieldSeparator: ',',
quoteStrings: '"',
decimalseparator: '.',
showLabels: true,
showTitle: true,
title: filename,
useBom: true,
noDownload: false,
headers: ['Pack Type', 'Product Sub Type', 'Activity','Cost','Incremental Sales','Expected Lift','CSV ROAS'],
nullToEmptyString: true,
};
this.renderedData.map((item:any)=>
{
for(let [key,value] of Object.entries(item)){
let values:any=value;
if(!this.displayedColumns.includes(key)){
delete item[key];
}else{
if(key=='processed_lift'){
item[key]=values.toFixed(2)+"%";
}
else if(key=='csv_roas'){
item[key]=values+"%";
}
else if(key=='total_activation_cost'){
item[key]=values.toFixed(2);
}
else if(key=='total_incremental_sales'){
item[key]=values.toFixed(2);
}
//'total_activation_cost','total_incremental_sales'
}
}
});
new Angular5Csv(this.renderedData, filename, options);
}
test_filter(){
}
decrementRange(value:any){
value.discount=value.discount-5;
}
incrementRange(value:any){
value.discount=value.discount+5;
}
goBack(){
console.log(this.SOURCE,"this.SOURCE")
if(this.SOURCE=='from_opt_activation'){
this.routes.navigate(['/optimizer'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}});
}else{
this.routes.navigate(['/simulator'],{ state: {'source':'from_output','data':[this.ELEMENT_DATA_CONSTRAINTS,this.selectedData,this.response_data,this.Ratecardjson]}});
}
}
resetFilter(){
this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.ELEMENT_DATA);
this.dataSource.paginator = this.paginator;
this.chartInit(this.ELEMENT_DATA);
}
doFilter(){
this.incremantalCSV=0;
console.log(this.selectedSegmentList,"Segmentedlist")
let filterData:any = this.ELEMENT_DATA.filter((data:any) => this.selectedSegmentList.includes(data["pack_type"]));
if(this.selectedplacementTypes.length!=0){
let to_find:any=[...this.selectedplacementTypes];
filterData=recursiveFind(filterData,to_find);
}
this.dataSource = new MatTableDataSource<ScenarioPlanner>(filterData);
this.dataSource.paginator = this.paginator;
this.chartInit(filterData);
}
chartInit(filterData:any){
this.TATS={};
this.incremantalCSV=0;
this.totalActivationCost=0;
this.totalscvROAS=0;
this.optimizedLift=0;
this.totalLift=0;
this.DynActivationColumns.forEach((element:any) => {
this.TATS[element.value]=0;
//this.Chartpoints_pla_rev[element.value]=0;
//this.incremantalCSV+=element.total_incremental_sales;
});
let gbActivity=groupByJson(filterData,'activation_type');
console.log(gbActivity,"gbActivity")
let gbActivityList=Object.keys(gbActivity);
gbActivityList.forEach((item)=>{
this.Chartpoints_pla_rev[item]=0;
});
let predictedSales=0;
filterData.forEach((element:any)=>{
this.incremantalCSV+=element.total_incremental_sales;
this.totalActivationCost+=element.total_activation_cost;
this.totalscvROAS+=element.total_incremental_sales/element.total_activation_cost;
this.optimizedLift+=element.total_activation_cost;
//this.totalLift+=element.processed_lift;
// calculation = item["total_incremental_sales"] /(item["predicted_sales"] - item["total_incremental_sales"])
predictedSales+=element.predicted_sales;
});
this.totalLift=this.incremantalCSV/(predictedSales-this.incremantalCSV)*100;
this.optimizedLift=this.optimizedLift.toFixed()
this.optimizedLift= numberWithCommas(this.optimizedLift);
gbActivityList.forEach((item)=>{
filterData.forEach((element:any)=>{
if(element.activation_type.includes(item)){
this.Chartpoints_pla_rev[item]=element.total_incremental_sales
}
});
});
for(let [key,value] of Object.entries(this.activationLIB)){
filterData.forEach((element:any)=>{
if(element.activation_type.includes(value)){
this.TATS[key]+=1;
//this.Chartpoints_pla_rev[key]+=element.total_incremental_sales.toFixed(2);
}
});
}
console.log(this.Chartpoints_pla_rev,"===");
let byPacktype=groupByJson(filterData,'pack_type');
console.log(filterData,byPacktype,"1");
this.chartRender(this.Chartpoints_pla_rev,filterData);
this.chartExpLift(filterData,byPacktype);
this.getpackTypeList(filterData,byPacktype);
}
chartRender(data:any,filterData:any){
this.reload=false;
let data_points:any=[];
this.dataSetLabel=[];
console.log(data,"data")
let gbActivity=groupByJson(filterData,'activation_type');
console.log(gbActivity,"gbActivity")
let gbActivityList=Object.keys(gbActivity);
gbActivityList.forEach((item)=>{
if(data[item]!=0){
this.dataSetLabel.push(item);
data_points.push(data[item].toFixed(2));
}
console.log( this.dataSetLabel," this.dataSetLabel",data_points);
});
this.dataSet={ data: data_points,
label: 'Incremental Revenue by Placement' ,backgroundColor:[
'rgb(156, 39, 176)',
'rgb(103, 58, 183 )',
'rgb(33, 150, 243 )',
'rgb(0, 150, 136 )',
'rgb(139, 195, 74 )',
'rgb(233, 30, 99 )',
'rgb(103, 58, 183 )',
]};
setTimeout(()=>{
this.reload=true;
},200);
}
chartExpLift(data:any,byPacktype:any){
this.reload1=false;
let data_points1:any=[];
this.dataSetLabel1=[];
for(let [key,value] of Object.entries(byPacktype)){
this.dataSetLabel1.push(key);
let items:any=value;
let tssum=0;
items.map((item:any)=>{
tssum+=parseInt(item.processed_lift);
});
console.log(tssum.toFixed(2),"tssum");
data_points1.push(tssum);
}
console.log(data_points1,"data_points1")
this.dataSet1={ data: data_points1, label: 'Expected Lift By Pack Type' ,backgroundColor:[
'rgb(156, 39, 176)',
'rgb(103, 58, 183 )',
'rgb(33, 150, 243 )',
'rgb(0, 150, 136 )',
'rgb(139, 195, 74 )',
'rgb(233, 30, 99 )',
'rgb(103, 58, 183 )',
]};
this.apiServices.getpackTypeList().subscribe((res: any) => {
console.log(res, "getpackTypeList");
if (res.code == 200 && res.status == 'success') {
this.packTypeList = res.data;
this.packTypeList.forEach((element:any) => {
element['counts']=0;
});
this.packTypeList.forEach((element:any) => {
console.log(byPacktype[element.name],"byPacktype[element.name]");
element['counts']=byPacktype[element.name]?.length || 0;
});
console.log(this.packTypeList,"updated");
}
});
setTimeout(()=>{
this.reload1=true;
console.log(this.dataSet1, this.dataSetLabel)
},200);
}
sortData(sort: Sort) {
console.log("sort");
const data = this.filterData.slice();
if (!sort.active || sort.direction === '') {
this.sortedData = data;
return;
}
this.sortedData = data.sort((a:any, b:any) => {
const isAsc = sort.direction === 'desc';
switch (sort.active) {
case 'processed_lift': return compare(a.processed_lift, b.processed_lift, isAsc);
case 'total_activation_cost': return compare(a.total_activation_total_activation_cost, b.total_activation_total_activation_cost, isAsc);
case 'total_incremental_sales': return compare(a.total_incremental_sales, b.total_incremental_sales, isAsc);
default: return 0;
}
});
console.log(this.sortedData,"sortedData")
this.dataSource = new MatTableDataSource<ScenarioPlanner>(this.sortedData);
this.dataSource.paginator = this.paginator;
this.dataSource.connect().subscribe(d => {
this.renderedData = d});
// this.ngAfterViewInit();
}
triggerModal(content :any) {
this.modalService.open(content, {ariaLabelledBy: 'modal-basic-title'}).result.then((res) => {
this.closeModal = `Closed with: ${res}`;
}, (res) => {
this.closeModal = `Dismissed ${this.getDismissReason(res)}`;
});
}
private getDismissReason(reason: any): string {
if (reason === ModalDismissReasons.ESC) {
return 'by pressing ESC';
} else if (reason === ModalDismissReasons.BACKDROP_CLICK) {
return 'by clicking on a backdrop';
} else {
return `with: ${reason}`;
}
}
isAllSelected() {
const numSelected = this.selection.selected.length;
const numRows = this.dataSource.data.length;
return numSelected === numRows;
}
masterToggle() {
if (this.isAllSelected()) {
this.selection.clear();
return;}
this.selection.select(...this.dataSource.data);
this.setActivationCounter();
}
checkbox_row(row:any){
this.selection.toggle(row);
this.setActivationCounter();
}
checkboxLabel(row?: ScenarioPlanner): string {
if (!row) {
return `${this.isAllSelected() ? 'deselect' : 'select'} all`;
}
return `${this.selection.isSelected(row) ? 'deselect' : 'select'} row ${row.product_tpn + 1}`;
}
updateProductCounter(){
//totalProducts
}
recountCheckbox(event:any){
event.stopPropagation();
this.setActivationCounter();
}
setActivationCounter(){
setTimeout(()=>{
// this.totalActivities=this.selection.selected.length;
// //console.log(this.selection.selected,"this.totalActivities");
// console.log(groupByJson(this.selection.selected,'sku'),"SKU group")
// this.totalProducts=Object.keys(groupByJson(this.selection.selected,'sku')).length;
},200);
}
}
// Used For Datatable sorting
function compare(a: number | string, b: number | string, isAsc: boolean) {
return (a < b ? -1 : 1) * (isAsc ? 1 : -1);
}
function recursiveFind(inputArr:any,find:any):any{
//break-condition
if(find.length==0){
return inputArr
}else{
// if(find.length==1){
// inputArr=inputArr.filter((data:any) => find[0] == data["activation_type"]);
// find.shift();
// }else{
// inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0]));
// find.shift();
// console.log(inputArr,"inputArr");
// }
inputArr=inputArr.filter((data:any) => data["activation_type"].includes(find[0]));
find.shift();
return recursiveFind(inputArr,find)
}
}
function numberWithCommas(x:any) {
return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
}
| constructor | identifier_name |
common.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import errno
import logging
import operator
import os
import re
from collections import namedtuple
from logging.handlers import RotatingFileHandler
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError, ParamValidationError
from configparser import ConfigParser, NoOptionError, NoSectionError
from pkg_resources import packaging
from tabulate import tabulate
from awsbatch.utils import fail, get_installed_version, get_region_by_stack_id
class Output:
"""Generic Output object."""
def __init__(self, mapping, items=None):
"""
Create a table of generic items.
:param items: list of items
:param mapping: association between keys and item attributes
"""
self.items = items if items else []
self.mapping = mapping
self.keys = []
for key in mapping.keys():
self.keys.append(key)
def add(self, items):
"""Add items to output."""
if isinstance(items, list):
self.items.extend(items)
else:
self.items.append(items)
def show_table(self, keys=None, sort_keys_function=None):
"""
Print the items table.
:param keys: show a specific list of keys (optional)
:param sort_keys_function: function to sort table rows (optional)
"""
rows = []
output_keys = keys or self.keys
for item in self.__get_items(sort_keys_function):
row = []
for output_key in output_keys:
row.append(getattr(item, self.mapping[output_key]))
rows.append(row)
print(tabulate(rows, output_keys))
def show(self, keys=None, sort_keys_function=None):
"""
Print the items in a key value format.
:param keys: show a specific list of keys (optional)
"""
output_keys = keys or self.keys
if not self.items:
print("No items to show")
else:
for item in self.__get_items(sort_keys_function):
for output_key in output_keys:
print("{0:25}: {1!s}".format(output_key, getattr(item, self.mapping[output_key])))
print("-" * 25)
def length(self):
|
def __get_items(self, sort_keys_function=None):
"""Return a sorted copy of self.items if sort_keys_function is given, a reference to self.items otherwise."""
if sort_keys_function:
return sorted(list(self.items), key=sort_keys_function)
return self.items
class Boto3ClientFactory:
"""Boto3 configuration object."""
def __init__(self, region, proxy="NONE"):
"""Initialize the object."""
self.region = region
self.proxy_config = Config()
if proxy != "NONE":
self.proxy_config = Config(proxies={"https": proxy})
def get_client(self, service):
"""
Initialize the boto3 client for a given service.
:param service: boto3 service.
:return: the boto3 client
"""
try:
return boto3.client(service, region_name=self.region, config=self.proxy_config)
except ClientError as e:
fail("AWS %s service failed with exception: %s" % (service, e))
CliRequirement = namedtuple("Requirement", "package operator version")
class CliRequirementsMatcher:
"""Utility class to match requirements specified in CFN stack output."""
COMPARISON_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def __init__(self, requirements_string):
try:
self.requirements = []
for requirement_string in requirements_string.split(","):
match = re.search(r"([\w+_-]+)([<>=]+)([\d.]+)", requirement_string)
self.requirements.append(
CliRequirement(package=match.group(1), operator=match.group(2), version=match.group(3))
)
except IndexError:
fail(f"Unable to parse ParallelCluster AWS Batch CLI requirements: '{requirements_string}'")
def check(self):
"""Verify if CLI requirements are satisfied."""
for req in self.requirements:
if not self.COMPARISON_OPERATORS[req.operator](
packaging.version.parse(get_installed_version(req.package)),
packaging.version.parse(req.version),
):
fail(f"The cluster requires {req.package}{req.operator}{req.version}")
class AWSBatchCliConfig:
"""AWS ParallelCluster AWS Batch CLI configuration object."""
def __init__(self, log, cluster):
"""
Initialize the object.
Search for the [cluster cluster-name] section in the /etc/awsbatch-cli.cfg configuration file, if there
or ask to the pcluster status.
:param log: log
:param cluster: cluster name
"""
self.region = None
self.env_blacklist = None
# search for awsbatch-cli config
cli_config_file = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.cfg"))
if os.path.isfile(cli_config_file):
self.__init_from_config(cli_config_file, cluster, log)
elif cluster:
self.__init_from_stack(cluster, log)
else:
fail("Error: cluster parameter is required")
self.__verify_initialization(log)
def __str__(self):
return "{0}({1})".format(self.__class__.__name__, self.__dict__)
def __verify_initialization(self, log):
config_to_cfn_map = [
("s3_bucket", "ResourcesS3Bucket", "parameter"),
("artifact_directory", "ArtifactS3RootDirectory", "parameter"),
("batch_cli_requirements", "BatchCliRequirements", "output"),
("compute_environment", "BatchComputeEnvironmentArn", "output"),
("job_queue", "BatchJobQueueArn", "output"),
("job_definition", "BatchJobDefinitionArn", "output"),
("head_node_ip", "HeadNodePrivateIP", "output"),
]
for config_param, cfn_param, cfn_prop_type in config_to_cfn_map:
try:
log.debug("%s = %s", config_param, getattr(self, config_param))
except AttributeError:
fail(
"Error getting cluster information from AWS CloudFormation. "
f"Missing {cfn_prop_type} '{cfn_param}' from the CloudFormation stack."
)
CliRequirementsMatcher(self.batch_cli_requirements).check()
def __init_from_config(self, cli_config_file, cluster, log): # noqa: C901 FIXME
"""
Init object attributes from awsbatch-cli configuration file.
:param cli_config_file: awsbatch-cli config
:param cluster: cluster name
:param log: log
"""
with open(cli_config_file, encoding="utf-8") as config_file:
log.info("Searching for configuration file %s" % cli_config_file)
config = ConfigParser()
config.read_file(config_file)
# use cluster if there or search for default value in [main] section of the config file
try:
cluster_name = cluster if cluster else config.get("main", "cluster_name")
except NoSectionError as e:
fail("Error getting the section [%s] from the configuration file (%s)" % (e.section, cli_config_file))
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
cluster_section = "cluster {0}".format(cluster_name)
try:
self.region = config.get("main", "region")
except NoOptionError:
pass
try:
self.env_blacklist = config.get("main", "env_blacklist")
except NoOptionError:
pass
try:
self.stack_name = cluster_name
log.info("Stack name is (%s)" % self.stack_name)
# if region is set for the current stack, override the region from the AWS ParallelCluster config file
# or the region from the [main] section
self.region = config.get(cluster_section, "region")
self.s3_bucket = config.get(cluster_section, "s3_bucket")
self.artifact_directory = config.get(cluster_section, "artifact_directory")
self.batch_cli_requirements = config.get(cluster_section, "batch_cli_requirements")
self.compute_environment = config.get(cluster_section, "compute_environment")
self.job_queue = config.get(cluster_section, "job_queue")
self.job_definition = config.get(cluster_section, "job_definition")
try:
self.job_definition_mnp = config.get(cluster_section, "job_definition_mnp")
except NoOptionError:
pass
self.head_node_ip = config.get(cluster_section, "head_node_ip")
# get proxy
self.proxy = config.get(cluster_section, "proxy")
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
except NoSectionError:
# initialize by getting stack info
self.__init_from_stack(cluster_name, log)
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
def __init_from_stack(self, cluster, log): # noqa: C901 FIXME
"""
Init object attributes by asking to the stack.
:param cluster: cluster name
:param log: log
"""
try:
self.stack_name = cluster
log.info("Describing stack (%s)" % self.stack_name)
# get required values from the output of the describe-stack command
# don't use proxy because we are in the client and use default region
boto3_factory = Boto3ClientFactory(region=self.region)
cfn_client = boto3_factory.get_client("cloudformation")
stack = cfn_client.describe_stacks(StackName=self.stack_name).get("Stacks")[0]
log.debug(stack)
if self.region is None:
self.region = get_region_by_stack_id(stack.get("StackId"))
self.proxy = "NONE"
scheduler = None
stack_status = stack.get("StackStatus")
if stack_status in ["CREATE_COMPLETE", "UPDATE_COMPLETE"]:
for output in stack.get("Outputs", []):
output_key = output.get("OutputKey")
output_value = output.get("OutputValue")
if output_key == "BatchComputeEnvironmentArn":
self.compute_environment = output_value
elif output_key == "BatchJobQueueArn":
self.job_queue = output_value
elif output_key == "BatchJobDefinitionArn":
self.job_definition = output_value
elif output_key == "HeadNodePrivateIP":
self.head_node_ip = output_value
elif output_key == "BatchJobDefinitionMnpArn":
self.job_definition_mnp = output_value
elif output_key == "BatchCliRequirements":
self.batch_cli_requirements = output_value
for parameter in stack.get("Parameters", []):
parameter_key = parameter.get("ParameterKey")
parameter_value = parameter.get("ParameterValue")
if parameter_key == "ProxyServer":
self.proxy = parameter_value
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
elif parameter_key == "ResourcesS3Bucket":
self.s3_bucket = parameter_value
elif parameter_key == "ArtifactS3RootDirectory":
self.artifact_directory = parameter_value
elif parameter_key == "Scheduler":
scheduler = parameter_value
else:
fail(f"The cluster is in the ({stack_status}) status.")
if scheduler is None:
fail("Unable to retrieve cluster's scheduler. Double check CloudFormation stack parameters.")
elif scheduler != "awsbatch":
fail(f"This command cannot be used with a {scheduler} cluster.")
except (ClientError, ParamValidationError) as e:
fail("Error getting cluster information from AWS CloudFormation. Failed with exception: %s" % e)
def config_logger(log_level):
"""
Define a logger for aws-parallelcluster-awsbatch-cli.
:param log_level logging level
:return: the logger
"""
try:
logfile = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.log"))
logdir = os.path.dirname(logfile)
os.makedirs(logdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(logdir):
pass
else:
fail("Cannot create log file (%s). Failed with exception: %s" % (logfile, e))
formatter = logging.Formatter("%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s")
logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)
logfile_handler.setFormatter(formatter)
logger = logging.getLogger("awsbatch-cli")
logger.addHandler(logfile_handler)
try:
logger.setLevel(log_level.upper())
except (TypeError, ValueError) as e:
fail("Error setting log level. Failed with exception: %s" % e)
return logger
| """Return number of items in Output."""
return len(self.items) | identifier_body |
common.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import errno
import logging
import operator
import os
import re
from collections import namedtuple
from logging.handlers import RotatingFileHandler
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError, ParamValidationError
from configparser import ConfigParser, NoOptionError, NoSectionError
from pkg_resources import packaging
from tabulate import tabulate
from awsbatch.utils import fail, get_installed_version, get_region_by_stack_id
class Output:
"""Generic Output object."""
def __init__(self, mapping, items=None):
"""
Create a table of generic items.
:param items: list of items
:param mapping: association between keys and item attributes
"""
self.items = items if items else []
self.mapping = mapping
self.keys = []
for key in mapping.keys():
self.keys.append(key)
def add(self, items):
"""Add items to output."""
if isinstance(items, list):
self.items.extend(items)
else:
self.items.append(items)
def show_table(self, keys=None, sort_keys_function=None):
"""
Print the items table.
:param keys: show a specific list of keys (optional)
:param sort_keys_function: function to sort table rows (optional)
"""
rows = []
output_keys = keys or self.keys
for item in self.__get_items(sort_keys_function):
row = []
for output_key in output_keys:
row.append(getattr(item, self.mapping[output_key]))
rows.append(row)
print(tabulate(rows, output_keys))
def show(self, keys=None, sort_keys_function=None):
"""
Print the items in a key value format.
:param keys: show a specific list of keys (optional)
"""
output_keys = keys or self.keys
if not self.items:
print("No items to show")
else:
for item in self.__get_items(sort_keys_function):
for output_key in output_keys:
print("{0:25}: {1!s}".format(output_key, getattr(item, self.mapping[output_key])))
print("-" * 25)
def length(self):
"""Return number of items in Output."""
return len(self.items)
def __get_items(self, sort_keys_function=None):
"""Return a sorted copy of self.items if sort_keys_function is given, a reference to self.items otherwise."""
if sort_keys_function:
return sorted(list(self.items), key=sort_keys_function)
return self.items
class Boto3ClientFactory:
"""Boto3 configuration object."""
def __init__(self, region, proxy="NONE"):
"""Initialize the object."""
self.region = region
self.proxy_config = Config()
if proxy != "NONE":
self.proxy_config = Config(proxies={"https": proxy})
def get_client(self, service):
"""
Initialize the boto3 client for a given service.
:param service: boto3 service.
:return: the boto3 client
"""
try:
return boto3.client(service, region_name=self.region, config=self.proxy_config)
except ClientError as e:
fail("AWS %s service failed with exception: %s" % (service, e))
CliRequirement = namedtuple("Requirement", "package operator version")
class CliRequirementsMatcher:
"""Utility class to match requirements specified in CFN stack output."""
COMPARISON_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def __init__(self, requirements_string):
try:
self.requirements = []
for requirement_string in requirements_string.split(","):
match = re.search(r"([\w+_-]+)([<>=]+)([\d.]+)", requirement_string)
self.requirements.append(
CliRequirement(package=match.group(1), operator=match.group(2), version=match.group(3))
)
except IndexError:
fail(f"Unable to parse ParallelCluster AWS Batch CLI requirements: '{requirements_string}'")
def check(self):
"""Verify if CLI requirements are satisfied."""
for req in self.requirements:
if not self.COMPARISON_OPERATORS[req.operator](
packaging.version.parse(get_installed_version(req.package)),
packaging.version.parse(req.version),
):
fail(f"The cluster requires {req.package}{req.operator}{req.version}")
class AWSBatchCliConfig:
"""AWS ParallelCluster AWS Batch CLI configuration object."""
def __init__(self, log, cluster):
"""
Initialize the object.
Search for the [cluster cluster-name] section in the /etc/awsbatch-cli.cfg configuration file, if there
or ask to the pcluster status.
:param log: log
:param cluster: cluster name
"""
self.region = None
self.env_blacklist = None
# search for awsbatch-cli config
cli_config_file = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.cfg"))
if os.path.isfile(cli_config_file):
self.__init_from_config(cli_config_file, cluster, log)
elif cluster:
self.__init_from_stack(cluster, log)
else:
fail("Error: cluster parameter is required")
self.__verify_initialization(log)
def __str__(self):
return "{0}({1})".format(self.__class__.__name__, self.__dict__)
def __verify_initialization(self, log):
config_to_cfn_map = [
("s3_bucket", "ResourcesS3Bucket", "parameter"),
("artifact_directory", "ArtifactS3RootDirectory", "parameter"),
("batch_cli_requirements", "BatchCliRequirements", "output"),
("compute_environment", "BatchComputeEnvironmentArn", "output"),
("job_queue", "BatchJobQueueArn", "output"), | ("job_definition", "BatchJobDefinitionArn", "output"),
("head_node_ip", "HeadNodePrivateIP", "output"),
]
for config_param, cfn_param, cfn_prop_type in config_to_cfn_map:
try:
log.debug("%s = %s", config_param, getattr(self, config_param))
except AttributeError:
fail(
"Error getting cluster information from AWS CloudFormation. "
f"Missing {cfn_prop_type} '{cfn_param}' from the CloudFormation stack."
)
CliRequirementsMatcher(self.batch_cli_requirements).check()
def __init_from_config(self, cli_config_file, cluster, log): # noqa: C901 FIXME
"""
Init object attributes from awsbatch-cli configuration file.
:param cli_config_file: awsbatch-cli config
:param cluster: cluster name
:param log: log
"""
with open(cli_config_file, encoding="utf-8") as config_file:
log.info("Searching for configuration file %s" % cli_config_file)
config = ConfigParser()
config.read_file(config_file)
# use cluster if there or search for default value in [main] section of the config file
try:
cluster_name = cluster if cluster else config.get("main", "cluster_name")
except NoSectionError as e:
fail("Error getting the section [%s] from the configuration file (%s)" % (e.section, cli_config_file))
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
cluster_section = "cluster {0}".format(cluster_name)
try:
self.region = config.get("main", "region")
except NoOptionError:
pass
try:
self.env_blacklist = config.get("main", "env_blacklist")
except NoOptionError:
pass
try:
self.stack_name = cluster_name
log.info("Stack name is (%s)" % self.stack_name)
# if region is set for the current stack, override the region from the AWS ParallelCluster config file
# or the region from the [main] section
self.region = config.get(cluster_section, "region")
self.s3_bucket = config.get(cluster_section, "s3_bucket")
self.artifact_directory = config.get(cluster_section, "artifact_directory")
self.batch_cli_requirements = config.get(cluster_section, "batch_cli_requirements")
self.compute_environment = config.get(cluster_section, "compute_environment")
self.job_queue = config.get(cluster_section, "job_queue")
self.job_definition = config.get(cluster_section, "job_definition")
try:
self.job_definition_mnp = config.get(cluster_section, "job_definition_mnp")
except NoOptionError:
pass
self.head_node_ip = config.get(cluster_section, "head_node_ip")
# get proxy
self.proxy = config.get(cluster_section, "proxy")
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
except NoSectionError:
# initialize by getting stack info
self.__init_from_stack(cluster_name, log)
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
def __init_from_stack(self, cluster, log): # noqa: C901 FIXME
"""
Init object attributes by asking to the stack.
:param cluster: cluster name
:param log: log
"""
try:
self.stack_name = cluster
log.info("Describing stack (%s)" % self.stack_name)
# get required values from the output of the describe-stack command
# don't use proxy because we are in the client and use default region
boto3_factory = Boto3ClientFactory(region=self.region)
cfn_client = boto3_factory.get_client("cloudformation")
stack = cfn_client.describe_stacks(StackName=self.stack_name).get("Stacks")[0]
log.debug(stack)
if self.region is None:
self.region = get_region_by_stack_id(stack.get("StackId"))
self.proxy = "NONE"
scheduler = None
stack_status = stack.get("StackStatus")
if stack_status in ["CREATE_COMPLETE", "UPDATE_COMPLETE"]:
for output in stack.get("Outputs", []):
output_key = output.get("OutputKey")
output_value = output.get("OutputValue")
if output_key == "BatchComputeEnvironmentArn":
self.compute_environment = output_value
elif output_key == "BatchJobQueueArn":
self.job_queue = output_value
elif output_key == "BatchJobDefinitionArn":
self.job_definition = output_value
elif output_key == "HeadNodePrivateIP":
self.head_node_ip = output_value
elif output_key == "BatchJobDefinitionMnpArn":
self.job_definition_mnp = output_value
elif output_key == "BatchCliRequirements":
self.batch_cli_requirements = output_value
for parameter in stack.get("Parameters", []):
parameter_key = parameter.get("ParameterKey")
parameter_value = parameter.get("ParameterValue")
if parameter_key == "ProxyServer":
self.proxy = parameter_value
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
elif parameter_key == "ResourcesS3Bucket":
self.s3_bucket = parameter_value
elif parameter_key == "ArtifactS3RootDirectory":
self.artifact_directory = parameter_value
elif parameter_key == "Scheduler":
scheduler = parameter_value
else:
fail(f"The cluster is in the ({stack_status}) status.")
if scheduler is None:
fail("Unable to retrieve cluster's scheduler. Double check CloudFormation stack parameters.")
elif scheduler != "awsbatch":
fail(f"This command cannot be used with a {scheduler} cluster.")
except (ClientError, ParamValidationError) as e:
fail("Error getting cluster information from AWS CloudFormation. Failed with exception: %s" % e)
def config_logger(log_level):
"""
Define a logger for aws-parallelcluster-awsbatch-cli.
:param log_level logging level
:return: the logger
"""
try:
logfile = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.log"))
logdir = os.path.dirname(logfile)
os.makedirs(logdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(logdir):
pass
else:
fail("Cannot create log file (%s). Failed with exception: %s" % (logfile, e))
formatter = logging.Formatter("%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s")
logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)
logfile_handler.setFormatter(formatter)
logger = logging.getLogger("awsbatch-cli")
logger.addHandler(logfile_handler)
try:
logger.setLevel(log_level.upper())
except (TypeError, ValueError) as e:
fail("Error setting log level. Failed with exception: %s" % e)
return logger | random_line_split | |
common.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import errno
import logging
import operator
import os
import re
from collections import namedtuple
from logging.handlers import RotatingFileHandler
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError, ParamValidationError
from configparser import ConfigParser, NoOptionError, NoSectionError
from pkg_resources import packaging
from tabulate import tabulate
from awsbatch.utils import fail, get_installed_version, get_region_by_stack_id
class Output:
"""Generic Output object."""
def __init__(self, mapping, items=None):
"""
Create a table of generic items.
:param items: list of items
:param mapping: association between keys and item attributes
"""
self.items = items if items else []
self.mapping = mapping
self.keys = []
for key in mapping.keys():
self.keys.append(key)
def add(self, items):
"""Add items to output."""
if isinstance(items, list):
self.items.extend(items)
else:
self.items.append(items)
def | (self, keys=None, sort_keys_function=None):
"""
Print the items table.
:param keys: show a specific list of keys (optional)
:param sort_keys_function: function to sort table rows (optional)
"""
rows = []
output_keys = keys or self.keys
for item in self.__get_items(sort_keys_function):
row = []
for output_key in output_keys:
row.append(getattr(item, self.mapping[output_key]))
rows.append(row)
print(tabulate(rows, output_keys))
def show(self, keys=None, sort_keys_function=None):
"""
Print the items in a key value format.
:param keys: show a specific list of keys (optional)
"""
output_keys = keys or self.keys
if not self.items:
print("No items to show")
else:
for item in self.__get_items(sort_keys_function):
for output_key in output_keys:
print("{0:25}: {1!s}".format(output_key, getattr(item, self.mapping[output_key])))
print("-" * 25)
def length(self):
"""Return number of items in Output."""
return len(self.items)
def __get_items(self, sort_keys_function=None):
"""Return a sorted copy of self.items if sort_keys_function is given, a reference to self.items otherwise."""
if sort_keys_function:
return sorted(list(self.items), key=sort_keys_function)
return self.items
class Boto3ClientFactory:
"""Boto3 configuration object."""
def __init__(self, region, proxy="NONE"):
"""Initialize the object."""
self.region = region
self.proxy_config = Config()
if proxy != "NONE":
self.proxy_config = Config(proxies={"https": proxy})
def get_client(self, service):
"""
Initialize the boto3 client for a given service.
:param service: boto3 service.
:return: the boto3 client
"""
try:
return boto3.client(service, region_name=self.region, config=self.proxy_config)
except ClientError as e:
fail("AWS %s service failed with exception: %s" % (service, e))
CliRequirement = namedtuple("Requirement", "package operator version")
class CliRequirementsMatcher:
"""Utility class to match requirements specified in CFN stack output."""
COMPARISON_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def __init__(self, requirements_string):
try:
self.requirements = []
for requirement_string in requirements_string.split(","):
match = re.search(r"([\w+_-]+)([<>=]+)([\d.]+)", requirement_string)
self.requirements.append(
CliRequirement(package=match.group(1), operator=match.group(2), version=match.group(3))
)
except IndexError:
fail(f"Unable to parse ParallelCluster AWS Batch CLI requirements: '{requirements_string}'")
def check(self):
"""Verify if CLI requirements are satisfied."""
for req in self.requirements:
if not self.COMPARISON_OPERATORS[req.operator](
packaging.version.parse(get_installed_version(req.package)),
packaging.version.parse(req.version),
):
fail(f"The cluster requires {req.package}{req.operator}{req.version}")
class AWSBatchCliConfig:
"""AWS ParallelCluster AWS Batch CLI configuration object."""
def __init__(self, log, cluster):
"""
Initialize the object.
Search for the [cluster cluster-name] section in the /etc/awsbatch-cli.cfg configuration file, if there
or ask to the pcluster status.
:param log: log
:param cluster: cluster name
"""
self.region = None
self.env_blacklist = None
# search for awsbatch-cli config
cli_config_file = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.cfg"))
if os.path.isfile(cli_config_file):
self.__init_from_config(cli_config_file, cluster, log)
elif cluster:
self.__init_from_stack(cluster, log)
else:
fail("Error: cluster parameter is required")
self.__verify_initialization(log)
def __str__(self):
return "{0}({1})".format(self.__class__.__name__, self.__dict__)
def __verify_initialization(self, log):
config_to_cfn_map = [
("s3_bucket", "ResourcesS3Bucket", "parameter"),
("artifact_directory", "ArtifactS3RootDirectory", "parameter"),
("batch_cli_requirements", "BatchCliRequirements", "output"),
("compute_environment", "BatchComputeEnvironmentArn", "output"),
("job_queue", "BatchJobQueueArn", "output"),
("job_definition", "BatchJobDefinitionArn", "output"),
("head_node_ip", "HeadNodePrivateIP", "output"),
]
for config_param, cfn_param, cfn_prop_type in config_to_cfn_map:
try:
log.debug("%s = %s", config_param, getattr(self, config_param))
except AttributeError:
fail(
"Error getting cluster information from AWS CloudFormation. "
f"Missing {cfn_prop_type} '{cfn_param}' from the CloudFormation stack."
)
CliRequirementsMatcher(self.batch_cli_requirements).check()
def __init_from_config(self, cli_config_file, cluster, log): # noqa: C901 FIXME
"""
Init object attributes from awsbatch-cli configuration file.
:param cli_config_file: awsbatch-cli config
:param cluster: cluster name
:param log: log
"""
with open(cli_config_file, encoding="utf-8") as config_file:
log.info("Searching for configuration file %s" % cli_config_file)
config = ConfigParser()
config.read_file(config_file)
# use cluster if there or search for default value in [main] section of the config file
try:
cluster_name = cluster if cluster else config.get("main", "cluster_name")
except NoSectionError as e:
fail("Error getting the section [%s] from the configuration file (%s)" % (e.section, cli_config_file))
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
cluster_section = "cluster {0}".format(cluster_name)
try:
self.region = config.get("main", "region")
except NoOptionError:
pass
try:
self.env_blacklist = config.get("main", "env_blacklist")
except NoOptionError:
pass
try:
self.stack_name = cluster_name
log.info("Stack name is (%s)" % self.stack_name)
# if region is set for the current stack, override the region from the AWS ParallelCluster config file
# or the region from the [main] section
self.region = config.get(cluster_section, "region")
self.s3_bucket = config.get(cluster_section, "s3_bucket")
self.artifact_directory = config.get(cluster_section, "artifact_directory")
self.batch_cli_requirements = config.get(cluster_section, "batch_cli_requirements")
self.compute_environment = config.get(cluster_section, "compute_environment")
self.job_queue = config.get(cluster_section, "job_queue")
self.job_definition = config.get(cluster_section, "job_definition")
try:
self.job_definition_mnp = config.get(cluster_section, "job_definition_mnp")
except NoOptionError:
pass
self.head_node_ip = config.get(cluster_section, "head_node_ip")
# get proxy
self.proxy = config.get(cluster_section, "proxy")
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
except NoSectionError:
# initialize by getting stack info
self.__init_from_stack(cluster_name, log)
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
def __init_from_stack(self, cluster, log): # noqa: C901 FIXME
"""
Init object attributes by asking to the stack.
:param cluster: cluster name
:param log: log
"""
try:
self.stack_name = cluster
log.info("Describing stack (%s)" % self.stack_name)
# get required values from the output of the describe-stack command
# don't use proxy because we are in the client and use default region
boto3_factory = Boto3ClientFactory(region=self.region)
cfn_client = boto3_factory.get_client("cloudformation")
stack = cfn_client.describe_stacks(StackName=self.stack_name).get("Stacks")[0]
log.debug(stack)
if self.region is None:
self.region = get_region_by_stack_id(stack.get("StackId"))
self.proxy = "NONE"
scheduler = None
stack_status = stack.get("StackStatus")
if stack_status in ["CREATE_COMPLETE", "UPDATE_COMPLETE"]:
for output in stack.get("Outputs", []):
output_key = output.get("OutputKey")
output_value = output.get("OutputValue")
if output_key == "BatchComputeEnvironmentArn":
self.compute_environment = output_value
elif output_key == "BatchJobQueueArn":
self.job_queue = output_value
elif output_key == "BatchJobDefinitionArn":
self.job_definition = output_value
elif output_key == "HeadNodePrivateIP":
self.head_node_ip = output_value
elif output_key == "BatchJobDefinitionMnpArn":
self.job_definition_mnp = output_value
elif output_key == "BatchCliRequirements":
self.batch_cli_requirements = output_value
for parameter in stack.get("Parameters", []):
parameter_key = parameter.get("ParameterKey")
parameter_value = parameter.get("ParameterValue")
if parameter_key == "ProxyServer":
self.proxy = parameter_value
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
elif parameter_key == "ResourcesS3Bucket":
self.s3_bucket = parameter_value
elif parameter_key == "ArtifactS3RootDirectory":
self.artifact_directory = parameter_value
elif parameter_key == "Scheduler":
scheduler = parameter_value
else:
fail(f"The cluster is in the ({stack_status}) status.")
if scheduler is None:
fail("Unable to retrieve cluster's scheduler. Double check CloudFormation stack parameters.")
elif scheduler != "awsbatch":
fail(f"This command cannot be used with a {scheduler} cluster.")
except (ClientError, ParamValidationError) as e:
fail("Error getting cluster information from AWS CloudFormation. Failed with exception: %s" % e)
def config_logger(log_level):
"""
Define a logger for aws-parallelcluster-awsbatch-cli.
:param log_level logging level
:return: the logger
"""
try:
logfile = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.log"))
logdir = os.path.dirname(logfile)
os.makedirs(logdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(logdir):
pass
else:
fail("Cannot create log file (%s). Failed with exception: %s" % (logfile, e))
formatter = logging.Formatter("%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s")
logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)
logfile_handler.setFormatter(formatter)
logger = logging.getLogger("awsbatch-cli")
logger.addHandler(logfile_handler)
try:
logger.setLevel(log_level.upper())
except (TypeError, ValueError) as e:
fail("Error setting log level. Failed with exception: %s" % e)
return logger
| show_table | identifier_name |
common.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import errno
import logging
import operator
import os
import re
from collections import namedtuple
from logging.handlers import RotatingFileHandler
import boto3
from botocore.config import Config
from botocore.exceptions import ClientError, ParamValidationError
from configparser import ConfigParser, NoOptionError, NoSectionError
from pkg_resources import packaging
from tabulate import tabulate
from awsbatch.utils import fail, get_installed_version, get_region_by_stack_id
class Output:
"""Generic Output object."""
def __init__(self, mapping, items=None):
"""
Create a table of generic items.
:param items: list of items
:param mapping: association between keys and item attributes
"""
self.items = items if items else []
self.mapping = mapping
self.keys = []
for key in mapping.keys():
self.keys.append(key)
def add(self, items):
"""Add items to output."""
if isinstance(items, list):
self.items.extend(items)
else:
self.items.append(items)
def show_table(self, keys=None, sort_keys_function=None):
"""
Print the items table.
:param keys: show a specific list of keys (optional)
:param sort_keys_function: function to sort table rows (optional)
"""
rows = []
output_keys = keys or self.keys
for item in self.__get_items(sort_keys_function):
row = []
for output_key in output_keys:
row.append(getattr(item, self.mapping[output_key]))
rows.append(row)
print(tabulate(rows, output_keys))
def show(self, keys=None, sort_keys_function=None):
"""
Print the items in a key value format.
:param keys: show a specific list of keys (optional)
"""
output_keys = keys or self.keys
if not self.items:
print("No items to show")
else:
for item in self.__get_items(sort_keys_function):
for output_key in output_keys:
print("{0:25}: {1!s}".format(output_key, getattr(item, self.mapping[output_key])))
print("-" * 25)
def length(self):
"""Return number of items in Output."""
return len(self.items)
def __get_items(self, sort_keys_function=None):
"""Return a sorted copy of self.items if sort_keys_function is given, a reference to self.items otherwise."""
if sort_keys_function:
return sorted(list(self.items), key=sort_keys_function)
return self.items
class Boto3ClientFactory:
"""Boto3 configuration object."""
def __init__(self, region, proxy="NONE"):
"""Initialize the object."""
self.region = region
self.proxy_config = Config()
if proxy != "NONE":
self.proxy_config = Config(proxies={"https": proxy})
def get_client(self, service):
"""
Initialize the boto3 client for a given service.
:param service: boto3 service.
:return: the boto3 client
"""
try:
return boto3.client(service, region_name=self.region, config=self.proxy_config)
except ClientError as e:
fail("AWS %s service failed with exception: %s" % (service, e))
CliRequirement = namedtuple("Requirement", "package operator version")
class CliRequirementsMatcher:
"""Utility class to match requirements specified in CFN stack output."""
COMPARISON_OPERATORS = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def __init__(self, requirements_string):
try:
self.requirements = []
for requirement_string in requirements_string.split(","):
match = re.search(r"([\w+_-]+)([<>=]+)([\d.]+)", requirement_string)
self.requirements.append(
CliRequirement(package=match.group(1), operator=match.group(2), version=match.group(3))
)
except IndexError:
fail(f"Unable to parse ParallelCluster AWS Batch CLI requirements: '{requirements_string}'")
def check(self):
"""Verify if CLI requirements are satisfied."""
for req in self.requirements:
|
class AWSBatchCliConfig:
"""AWS ParallelCluster AWS Batch CLI configuration object."""
def __init__(self, log, cluster):
"""
Initialize the object.
Search for the [cluster cluster-name] section in the /etc/awsbatch-cli.cfg configuration file, if there
or ask to the pcluster status.
:param log: log
:param cluster: cluster name
"""
self.region = None
self.env_blacklist = None
# search for awsbatch-cli config
cli_config_file = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.cfg"))
if os.path.isfile(cli_config_file):
self.__init_from_config(cli_config_file, cluster, log)
elif cluster:
self.__init_from_stack(cluster, log)
else:
fail("Error: cluster parameter is required")
self.__verify_initialization(log)
def __str__(self):
return "{0}({1})".format(self.__class__.__name__, self.__dict__)
def __verify_initialization(self, log):
config_to_cfn_map = [
("s3_bucket", "ResourcesS3Bucket", "parameter"),
("artifact_directory", "ArtifactS3RootDirectory", "parameter"),
("batch_cli_requirements", "BatchCliRequirements", "output"),
("compute_environment", "BatchComputeEnvironmentArn", "output"),
("job_queue", "BatchJobQueueArn", "output"),
("job_definition", "BatchJobDefinitionArn", "output"),
("head_node_ip", "HeadNodePrivateIP", "output"),
]
for config_param, cfn_param, cfn_prop_type in config_to_cfn_map:
try:
log.debug("%s = %s", config_param, getattr(self, config_param))
except AttributeError:
fail(
"Error getting cluster information from AWS CloudFormation. "
f"Missing {cfn_prop_type} '{cfn_param}' from the CloudFormation stack."
)
CliRequirementsMatcher(self.batch_cli_requirements).check()
def __init_from_config(self, cli_config_file, cluster, log): # noqa: C901 FIXME
"""
Init object attributes from awsbatch-cli configuration file.
:param cli_config_file: awsbatch-cli config
:param cluster: cluster name
:param log: log
"""
with open(cli_config_file, encoding="utf-8") as config_file:
log.info("Searching for configuration file %s" % cli_config_file)
config = ConfigParser()
config.read_file(config_file)
# use cluster if there or search for default value in [main] section of the config file
try:
cluster_name = cluster if cluster else config.get("main", "cluster_name")
except NoSectionError as e:
fail("Error getting the section [%s] from the configuration file (%s)" % (e.section, cli_config_file))
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
cluster_section = "cluster {0}".format(cluster_name)
try:
self.region = config.get("main", "region")
except NoOptionError:
pass
try:
self.env_blacklist = config.get("main", "env_blacklist")
except NoOptionError:
pass
try:
self.stack_name = cluster_name
log.info("Stack name is (%s)" % self.stack_name)
# if region is set for the current stack, override the region from the AWS ParallelCluster config file
# or the region from the [main] section
self.region = config.get(cluster_section, "region")
self.s3_bucket = config.get(cluster_section, "s3_bucket")
self.artifact_directory = config.get(cluster_section, "artifact_directory")
self.batch_cli_requirements = config.get(cluster_section, "batch_cli_requirements")
self.compute_environment = config.get(cluster_section, "compute_environment")
self.job_queue = config.get(cluster_section, "job_queue")
self.job_definition = config.get(cluster_section, "job_definition")
try:
self.job_definition_mnp = config.get(cluster_section, "job_definition_mnp")
except NoOptionError:
pass
self.head_node_ip = config.get(cluster_section, "head_node_ip")
# get proxy
self.proxy = config.get(cluster_section, "proxy")
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
except NoSectionError:
# initialize by getting stack info
self.__init_from_stack(cluster_name, log)
except NoOptionError as e:
fail(
"Error getting the option (%s) from the section [%s] of the configuration file (%s)"
% (e.option, e.section, cli_config_file)
)
def __init_from_stack(self, cluster, log): # noqa: C901 FIXME
"""
Init object attributes by asking to the stack.
:param cluster: cluster name
:param log: log
"""
try:
self.stack_name = cluster
log.info("Describing stack (%s)" % self.stack_name)
# get required values from the output of the describe-stack command
# don't use proxy because we are in the client and use default region
boto3_factory = Boto3ClientFactory(region=self.region)
cfn_client = boto3_factory.get_client("cloudformation")
stack = cfn_client.describe_stacks(StackName=self.stack_name).get("Stacks")[0]
log.debug(stack)
if self.region is None:
self.region = get_region_by_stack_id(stack.get("StackId"))
self.proxy = "NONE"
scheduler = None
stack_status = stack.get("StackStatus")
if stack_status in ["CREATE_COMPLETE", "UPDATE_COMPLETE"]:
for output in stack.get("Outputs", []):
output_key = output.get("OutputKey")
output_value = output.get("OutputValue")
if output_key == "BatchComputeEnvironmentArn":
self.compute_environment = output_value
elif output_key == "BatchJobQueueArn":
self.job_queue = output_value
elif output_key == "BatchJobDefinitionArn":
self.job_definition = output_value
elif output_key == "HeadNodePrivateIP":
self.head_node_ip = output_value
elif output_key == "BatchJobDefinitionMnpArn":
self.job_definition_mnp = output_value
elif output_key == "BatchCliRequirements":
self.batch_cli_requirements = output_value
for parameter in stack.get("Parameters", []):
parameter_key = parameter.get("ParameterKey")
parameter_value = parameter.get("ParameterValue")
if parameter_key == "ProxyServer":
self.proxy = parameter_value
if self.proxy != "NONE":
log.info("Configured proxy is: %s" % self.proxy)
elif parameter_key == "ResourcesS3Bucket":
self.s3_bucket = parameter_value
elif parameter_key == "ArtifactS3RootDirectory":
self.artifact_directory = parameter_value
elif parameter_key == "Scheduler":
scheduler = parameter_value
else:
fail(f"The cluster is in the ({stack_status}) status.")
if scheduler is None:
fail("Unable to retrieve cluster's scheduler. Double check CloudFormation stack parameters.")
elif scheduler != "awsbatch":
fail(f"This command cannot be used with a {scheduler} cluster.")
except (ClientError, ParamValidationError) as e:
fail("Error getting cluster information from AWS CloudFormation. Failed with exception: %s" % e)
def config_logger(log_level):
"""
Define a logger for aws-parallelcluster-awsbatch-cli.
:param log_level logging level
:return: the logger
"""
try:
logfile = os.path.expanduser(os.path.join("~", ".parallelcluster", "awsbatch-cli.log"))
logdir = os.path.dirname(logfile)
os.makedirs(logdir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(logdir):
pass
else:
fail("Cannot create log file (%s). Failed with exception: %s" % (logfile, e))
formatter = logging.Formatter("%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s")
logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)
logfile_handler.setFormatter(formatter)
logger = logging.getLogger("awsbatch-cli")
logger.addHandler(logfile_handler)
try:
logger.setLevel(log_level.upper())
except (TypeError, ValueError) as e:
fail("Error setting log level. Failed with exception: %s" % e)
return logger
| if not self.COMPARISON_OPERATORS[req.operator](
packaging.version.parse(get_installed_version(req.package)),
packaging.version.parse(req.version),
):
fail(f"The cluster requires {req.package}{req.operator}{req.version}") | conditional_block |
client.go | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package sse
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"sync"
"sync/atomic"
"time"
"gopkg.in/cenkalti/backoff.v1"
)
var (
headerID = []byte("id:")
headerData = []byte("data:")
headerEvent = []byte("event:")
headerRetry = []byte("retry:")
)
func ClientMaxBufferSize(s int) func(c *Client) {
return func(c *Client) {
c.maxBufferSize = s
}
}
// ConnCallback defines a function to be called on a particular connection event
type ConnCallback func(c *Client)
// ResponseValidator validates a response
type ResponseValidator func(c *Client, resp *http.Response) error
// Client handles an incoming server stream
type Client struct {
Retry time.Time
ReconnectStrategy backoff.BackOff
disconnectcb ConnCallback
connectedcb ConnCallback
subscribed map[chan *Event]chan struct{}
Headers map[string]string
ReconnectNotify backoff.Notify
ResponseValidator ResponseValidator
Connection *http.Client
URL string
LastEventID atomic.Value // []byte
maxBufferSize int
mu sync.Mutex
EncodingBase64 bool
Connected bool
}
// NewClient creates a new client
func NewClient(url string, opts ...func(c *Client)) *Client {
c := &Client{
URL: url,
Connection: &http.Client{},
Headers: make(map[string]string),
subscribed: make(map[chan *Event]chan struct{}),
maxBufferSize: 1 << 16,
}
for _, opt := range opts {
opt(c)
}
return c
}
// Subscribe to a data stream
func (c *Client) Subscribe(stream string, handler func(msg *Event)) error {
return c.SubscribeWithContext(context.Background(), stream, handler)
}
// SubscribeWithContext to a data stream with context
func (c *Client) SubscribeWithContext(ctx context.Context, stream string, handler func(msg *Event)) error {
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
select {
case err = <-errorChan:
return err
case msg := <-eventChan:
handler(msg)
}
} | var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
return err
}
// SubscribeChan sends all events to the provided channel
func (c *Client) SubscribeChan(stream string, ch chan *Event) error {
return c.SubscribeChanWithContext(context.Background(), stream, ch)
}
// SubscribeChanWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanWithContext(ctx context.Context, stream string, ch chan *Event) error {
var connected bool
errch := make(chan error)
c.mu.Lock()
c.subscribed[ch] = make(chan struct{})
c.mu.Unlock()
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
if !connected {
// Notify connect
errch <- nil
connected = true
}
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
var msg *Event
// Wait for message to arrive or exit
select {
case <-c.subscribed[ch]:
return nil
case err = <-errorChan:
return err
case msg = <-eventChan:
}
// Wait for message to be sent or exit
if msg != nil {
select {
case <-c.subscribed[ch]:
return nil
case ch <- msg:
// message sent
}
}
}
}
go func() {
defer c.cleanup(ch)
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
// channel closed once connected
if err != nil && !connected {
errch <- err
}
}()
err := <-errch
close(errch)
return err
}
func (c *Client) startReadLoop(reader *EventStreamReader) (chan *Event, chan error) {
outCh := make(chan *Event)
erChan := make(chan error)
go c.readLoop(reader, outCh, erChan)
return outCh, erChan
}
func (c *Client) readLoop(reader *EventStreamReader, outCh chan *Event, erChan chan error) {
for {
// Read each new line and process the type of event
event, err := reader.ReadEvent()
if err != nil {
if err == io.EOF {
erChan <- nil
return
}
// run user specified disconnect function
if c.disconnectcb != nil {
c.Connected = false
c.disconnectcb(c)
}
erChan <- err
return
}
if !c.Connected && c.connectedcb != nil {
c.Connected = true
c.connectedcb(c)
}
// If we get an error, ignore it.
var msg *Event
if msg, err = c.processEvent(event); err == nil {
if len(msg.ID) > 0 {
c.LastEventID.Store(msg.ID)
} else {
msg.ID, _ = c.LastEventID.Load().([]byte)
}
// Send downstream if the event has something useful
if msg.hasContent() {
outCh <- msg
}
}
}
}
// SubscribeRaw to an sse endpoint
func (c *Client) SubscribeRaw(handler func(msg *Event)) error {
return c.Subscribe("", handler)
}
// SubscribeRawWithContext to an sse endpoint with context
func (c *Client) SubscribeRawWithContext(ctx context.Context, handler func(msg *Event)) error {
return c.SubscribeWithContext(ctx, "", handler)
}
// SubscribeChanRaw sends all events to the provided channel
func (c *Client) SubscribeChanRaw(ch chan *Event) error {
return c.SubscribeChan("", ch)
}
// SubscribeChanRawWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanRawWithContext(ctx context.Context, ch chan *Event) error {
return c.SubscribeChanWithContext(ctx, "", ch)
}
// Unsubscribe unsubscribes a channel
func (c *Client) Unsubscribe(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
c.subscribed[ch] <- struct{}{}
}
}
// OnDisconnect specifies the function to run when the connection disconnects
func (c *Client) OnDisconnect(fn ConnCallback) {
c.disconnectcb = fn
}
// OnConnect specifies the function to run when the connection is successful
func (c *Client) OnConnect(fn ConnCallback) {
c.connectedcb = fn
}
func (c *Client) request(ctx context.Context, stream string) (*http.Response, error) {
req, err := http.NewRequest("GET", c.URL, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
// Setup request, specify stream to connect to
if stream != "" {
query := req.URL.Query()
query.Add("stream", stream)
req.URL.RawQuery = query.Encode()
}
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Accept", "text/event-stream")
req.Header.Set("Connection", "keep-alive")
lastID, exists := c.LastEventID.Load().([]byte)
if exists && lastID != nil {
req.Header.Set("Last-Event-ID", string(lastID))
}
// Add user specified headers
for k, v := range c.Headers {
req.Header.Set(k, v)
}
return c.Connection.Do(req)
}
func (c *Client) processEvent(msg []byte) (event *Event, err error) {
var e Event
if len(msg) < 1 {
return nil, errors.New("event message was empty")
}
// Normalize the crlf to lf to make it easier to split the lines.
// Split the line by "\n" or "\r", per the spec.
for _, line := range bytes.FieldsFunc(msg, func(r rune) bool { return r == '\n' || r == '\r' }) {
switch {
case bytes.HasPrefix(line, headerID):
e.ID = append([]byte(nil), trimHeader(len(headerID), line)...)
case bytes.HasPrefix(line, headerData):
// The spec allows for multiple data fields per event, concatenated them with "\n".
e.Data = append(e.Data[:], append(trimHeader(len(headerData), line), byte('\n'))...)
// The spec says that a line that simply contains the string "data" should be treated as a data field with an empty body.
case bytes.Equal(line, bytes.TrimSuffix(headerData, []byte(":"))):
e.Data = append(e.Data, byte('\n'))
case bytes.HasPrefix(line, headerEvent):
e.Event = append([]byte(nil), trimHeader(len(headerEvent), line)...)
case bytes.HasPrefix(line, headerRetry):
e.Retry = append([]byte(nil), trimHeader(len(headerRetry), line)...)
default:
// Ignore any garbage that doesn't match what we're looking for.
}
}
// Trim the last "\n" per the spec.
e.Data = bytes.TrimSuffix(e.Data, []byte("\n"))
if c.EncodingBase64 {
buf := make([]byte, base64.StdEncoding.DecodedLen(len(e.Data)))
n, err := base64.StdEncoding.Decode(buf, e.Data)
if err != nil {
err = fmt.Errorf("failed to decode event message: %s", err)
}
e.Data = buf[:n]
}
return &e, err
}
func (c *Client) cleanup(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
close(c.subscribed[ch])
delete(c.subscribed, ch)
}
}
func trimHeader(size int, data []byte) []byte {
if data == nil || len(data) < size {
return data
}
data = data[size:]
// Remove optional leading whitespace
if len(data) > 0 && data[0] == 32 {
data = data[1:]
}
// Remove trailing new line
if len(data) > 0 && data[len(data)-1] == 10 {
data = data[:len(data)-1]
}
return data
} | }
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method | random_line_split |
client.go | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package sse
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"sync"
"sync/atomic"
"time"
"gopkg.in/cenkalti/backoff.v1"
)
var (
headerID = []byte("id:")
headerData = []byte("data:")
headerEvent = []byte("event:")
headerRetry = []byte("retry:")
)
func ClientMaxBufferSize(s int) func(c *Client) {
return func(c *Client) {
c.maxBufferSize = s
}
}
// ConnCallback defines a function to be called on a particular connection event
type ConnCallback func(c *Client)
// ResponseValidator validates a response
type ResponseValidator func(c *Client, resp *http.Response) error
// Client handles an incoming server stream
type Client struct {
Retry time.Time
ReconnectStrategy backoff.BackOff
disconnectcb ConnCallback
connectedcb ConnCallback
subscribed map[chan *Event]chan struct{}
Headers map[string]string
ReconnectNotify backoff.Notify
ResponseValidator ResponseValidator
Connection *http.Client
URL string
LastEventID atomic.Value // []byte
maxBufferSize int
mu sync.Mutex
EncodingBase64 bool
Connected bool
}
// NewClient creates a new client
func NewClient(url string, opts ...func(c *Client)) *Client {
c := &Client{
URL: url,
Connection: &http.Client{},
Headers: make(map[string]string),
subscribed: make(map[chan *Event]chan struct{}),
maxBufferSize: 1 << 16,
}
for _, opt := range opts {
opt(c)
}
return c
}
// Subscribe to a data stream
func (c *Client) Subscribe(stream string, handler func(msg *Event)) error {
return c.SubscribeWithContext(context.Background(), stream, handler)
}
// SubscribeWithContext to a data stream with context
func (c *Client) SubscribeWithContext(ctx context.Context, stream string, handler func(msg *Event)) error {
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
select {
case err = <-errorChan:
return err
case msg := <-eventChan:
handler(msg)
}
}
}
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
return err
}
// SubscribeChan sends all events to the provided channel
func (c *Client) SubscribeChan(stream string, ch chan *Event) error {
return c.SubscribeChanWithContext(context.Background(), stream, ch)
}
// SubscribeChanWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanWithContext(ctx context.Context, stream string, ch chan *Event) error {
var connected bool
errch := make(chan error)
c.mu.Lock()
c.subscribed[ch] = make(chan struct{})
c.mu.Unlock()
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
if !connected {
// Notify connect
errch <- nil
connected = true
}
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
var msg *Event
// Wait for message to arrive or exit
select {
case <-c.subscribed[ch]:
return nil
case err = <-errorChan:
return err
case msg = <-eventChan:
}
// Wait for message to be sent or exit
if msg != nil {
select {
case <-c.subscribed[ch]:
return nil
case ch <- msg:
// message sent
}
}
}
}
go func() {
defer c.cleanup(ch)
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
// channel closed once connected
if err != nil && !connected {
errch <- err
}
}()
err := <-errch
close(errch)
return err
}
func (c *Client) startReadLoop(reader *EventStreamReader) (chan *Event, chan error) {
outCh := make(chan *Event)
erChan := make(chan error)
go c.readLoop(reader, outCh, erChan)
return outCh, erChan
}
func (c *Client) readLoop(reader *EventStreamReader, outCh chan *Event, erChan chan error) {
for {
// Read each new line and process the type of event
event, err := reader.ReadEvent()
if err != nil {
if err == io.EOF {
erChan <- nil
return
}
// run user specified disconnect function
if c.disconnectcb != nil {
c.Connected = false
c.disconnectcb(c)
}
erChan <- err
return
}
if !c.Connected && c.connectedcb != nil {
c.Connected = true
c.connectedcb(c)
}
// If we get an error, ignore it.
var msg *Event
if msg, err = c.processEvent(event); err == nil {
if len(msg.ID) > 0 {
c.LastEventID.Store(msg.ID)
} else {
msg.ID, _ = c.LastEventID.Load().([]byte)
}
// Send downstream if the event has something useful
if msg.hasContent() {
outCh <- msg
}
}
}
}
// SubscribeRaw to an sse endpoint
func (c *Client) SubscribeRaw(handler func(msg *Event)) error {
return c.Subscribe("", handler)
}
// SubscribeRawWithContext to an sse endpoint with context
func (c *Client) SubscribeRawWithContext(ctx context.Context, handler func(msg *Event)) error {
return c.SubscribeWithContext(ctx, "", handler)
}
// SubscribeChanRaw sends all events to the provided channel
func (c *Client) SubscribeChanRaw(ch chan *Event) error |
// SubscribeChanRawWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanRawWithContext(ctx context.Context, ch chan *Event) error {
return c.SubscribeChanWithContext(ctx, "", ch)
}
// Unsubscribe unsubscribes a channel
func (c *Client) Unsubscribe(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
c.subscribed[ch] <- struct{}{}
}
}
// OnDisconnect specifies the function to run when the connection disconnects
func (c *Client) OnDisconnect(fn ConnCallback) {
c.disconnectcb = fn
}
// OnConnect specifies the function to run when the connection is successful
func (c *Client) OnConnect(fn ConnCallback) {
c.connectedcb = fn
}
func (c *Client) request(ctx context.Context, stream string) (*http.Response, error) {
req, err := http.NewRequest("GET", c.URL, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
// Setup request, specify stream to connect to
if stream != "" {
query := req.URL.Query()
query.Add("stream", stream)
req.URL.RawQuery = query.Encode()
}
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Accept", "text/event-stream")
req.Header.Set("Connection", "keep-alive")
lastID, exists := c.LastEventID.Load().([]byte)
if exists && lastID != nil {
req.Header.Set("Last-Event-ID", string(lastID))
}
// Add user specified headers
for k, v := range c.Headers {
req.Header.Set(k, v)
}
return c.Connection.Do(req)
}
func (c *Client) processEvent(msg []byte) (event *Event, err error) {
var e Event
if len(msg) < 1 {
return nil, errors.New("event message was empty")
}
// Normalize the crlf to lf to make it easier to split the lines.
// Split the line by "\n" or "\r", per the spec.
for _, line := range bytes.FieldsFunc(msg, func(r rune) bool { return r == '\n' || r == '\r' }) {
switch {
case bytes.HasPrefix(line, headerID):
e.ID = append([]byte(nil), trimHeader(len(headerID), line)...)
case bytes.HasPrefix(line, headerData):
// The spec allows for multiple data fields per event, concatenated them with "\n".
e.Data = append(e.Data[:], append(trimHeader(len(headerData), line), byte('\n'))...)
// The spec says that a line that simply contains the string "data" should be treated as a data field with an empty body.
case bytes.Equal(line, bytes.TrimSuffix(headerData, []byte(":"))):
e.Data = append(e.Data, byte('\n'))
case bytes.HasPrefix(line, headerEvent):
e.Event = append([]byte(nil), trimHeader(len(headerEvent), line)...)
case bytes.HasPrefix(line, headerRetry):
e.Retry = append([]byte(nil), trimHeader(len(headerRetry), line)...)
default:
// Ignore any garbage that doesn't match what we're looking for.
}
}
// Trim the last "\n" per the spec.
e.Data = bytes.TrimSuffix(e.Data, []byte("\n"))
if c.EncodingBase64 {
buf := make([]byte, base64.StdEncoding.DecodedLen(len(e.Data)))
n, err := base64.StdEncoding.Decode(buf, e.Data)
if err != nil {
err = fmt.Errorf("failed to decode event message: %s", err)
}
e.Data = buf[:n]
}
return &e, err
}
func (c *Client) cleanup(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
close(c.subscribed[ch])
delete(c.subscribed, ch)
}
}
func trimHeader(size int, data []byte) []byte {
if data == nil || len(data) < size {
return data
}
data = data[size:]
// Remove optional leading whitespace
if len(data) > 0 && data[0] == 32 {
data = data[1:]
}
// Remove trailing new line
if len(data) > 0 && data[len(data)-1] == 10 {
data = data[:len(data)-1]
}
return data
}
| {
return c.SubscribeChan("", ch)
} | identifier_body |
client.go | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package sse
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"sync"
"sync/atomic"
"time"
"gopkg.in/cenkalti/backoff.v1"
)
var (
headerID = []byte("id:")
headerData = []byte("data:")
headerEvent = []byte("event:")
headerRetry = []byte("retry:")
)
func ClientMaxBufferSize(s int) func(c *Client) {
return func(c *Client) {
c.maxBufferSize = s
}
}
// ConnCallback defines a function to be called on a particular connection event
type ConnCallback func(c *Client)
// ResponseValidator validates a response
type ResponseValidator func(c *Client, resp *http.Response) error
// Client handles an incoming server stream
type Client struct {
Retry time.Time
ReconnectStrategy backoff.BackOff
disconnectcb ConnCallback
connectedcb ConnCallback
subscribed map[chan *Event]chan struct{}
Headers map[string]string
ReconnectNotify backoff.Notify
ResponseValidator ResponseValidator
Connection *http.Client
URL string
LastEventID atomic.Value // []byte
maxBufferSize int
mu sync.Mutex
EncodingBase64 bool
Connected bool
}
// NewClient creates a new client
func NewClient(url string, opts ...func(c *Client)) *Client {
c := &Client{
URL: url,
Connection: &http.Client{},
Headers: make(map[string]string),
subscribed: make(map[chan *Event]chan struct{}),
maxBufferSize: 1 << 16,
}
for _, opt := range opts {
opt(c)
}
return c
}
// Subscribe to a data stream
func (c *Client) Subscribe(stream string, handler func(msg *Event)) error {
return c.SubscribeWithContext(context.Background(), stream, handler)
}
// SubscribeWithContext to a data stream with context
func (c *Client) SubscribeWithContext(ctx context.Context, stream string, handler func(msg *Event)) error {
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil | else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
select {
case err = <-errorChan:
return err
case msg := <-eventChan:
handler(msg)
}
}
}
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
return err
}
// SubscribeChan sends all events to the provided channel
func (c *Client) SubscribeChan(stream string, ch chan *Event) error {
return c.SubscribeChanWithContext(context.Background(), stream, ch)
}
// SubscribeChanWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanWithContext(ctx context.Context, stream string, ch chan *Event) error {
var connected bool
errch := make(chan error)
c.mu.Lock()
c.subscribed[ch] = make(chan struct{})
c.mu.Unlock()
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
if !connected {
// Notify connect
errch <- nil
connected = true
}
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
var msg *Event
// Wait for message to arrive or exit
select {
case <-c.subscribed[ch]:
return nil
case err = <-errorChan:
return err
case msg = <-eventChan:
}
// Wait for message to be sent or exit
if msg != nil {
select {
case <-c.subscribed[ch]:
return nil
case ch <- msg:
// message sent
}
}
}
}
go func() {
defer c.cleanup(ch)
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
// channel closed once connected
if err != nil && !connected {
errch <- err
}
}()
err := <-errch
close(errch)
return err
}
func (c *Client) startReadLoop(reader *EventStreamReader) (chan *Event, chan error) {
outCh := make(chan *Event)
erChan := make(chan error)
go c.readLoop(reader, outCh, erChan)
return outCh, erChan
}
func (c *Client) readLoop(reader *EventStreamReader, outCh chan *Event, erChan chan error) {
for {
// Read each new line and process the type of event
event, err := reader.ReadEvent()
if err != nil {
if err == io.EOF {
erChan <- nil
return
}
// run user specified disconnect function
if c.disconnectcb != nil {
c.Connected = false
c.disconnectcb(c)
}
erChan <- err
return
}
if !c.Connected && c.connectedcb != nil {
c.Connected = true
c.connectedcb(c)
}
// If we get an error, ignore it.
var msg *Event
if msg, err = c.processEvent(event); err == nil {
if len(msg.ID) > 0 {
c.LastEventID.Store(msg.ID)
} else {
msg.ID, _ = c.LastEventID.Load().([]byte)
}
// Send downstream if the event has something useful
if msg.hasContent() {
outCh <- msg
}
}
}
}
// SubscribeRaw to an sse endpoint
func (c *Client) SubscribeRaw(handler func(msg *Event)) error {
return c.Subscribe("", handler)
}
// SubscribeRawWithContext to an sse endpoint with context
func (c *Client) SubscribeRawWithContext(ctx context.Context, handler func(msg *Event)) error {
return c.SubscribeWithContext(ctx, "", handler)
}
// SubscribeChanRaw sends all events to the provided channel
func (c *Client) SubscribeChanRaw(ch chan *Event) error {
return c.SubscribeChan("", ch)
}
// SubscribeChanRawWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanRawWithContext(ctx context.Context, ch chan *Event) error {
return c.SubscribeChanWithContext(ctx, "", ch)
}
// Unsubscribe unsubscribes a channel
func (c *Client) Unsubscribe(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
c.subscribed[ch] <- struct{}{}
}
}
// OnDisconnect specifies the function to run when the connection disconnects
func (c *Client) OnDisconnect(fn ConnCallback) {
c.disconnectcb = fn
}
// OnConnect specifies the function to run when the connection is successful
func (c *Client) OnConnect(fn ConnCallback) {
c.connectedcb = fn
}
func (c *Client) request(ctx context.Context, stream string) (*http.Response, error) {
req, err := http.NewRequest("GET", c.URL, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
// Setup request, specify stream to connect to
if stream != "" {
query := req.URL.Query()
query.Add("stream", stream)
req.URL.RawQuery = query.Encode()
}
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Accept", "text/event-stream")
req.Header.Set("Connection", "keep-alive")
lastID, exists := c.LastEventID.Load().([]byte)
if exists && lastID != nil {
req.Header.Set("Last-Event-ID", string(lastID))
}
// Add user specified headers
for k, v := range c.Headers {
req.Header.Set(k, v)
}
return c.Connection.Do(req)
}
func (c *Client) processEvent(msg []byte) (event *Event, err error) {
var e Event
if len(msg) < 1 {
return nil, errors.New("event message was empty")
}
// Normalize the crlf to lf to make it easier to split the lines.
// Split the line by "\n" or "\r", per the spec.
for _, line := range bytes.FieldsFunc(msg, func(r rune) bool { return r == '\n' || r == '\r' }) {
switch {
case bytes.HasPrefix(line, headerID):
e.ID = append([]byte(nil), trimHeader(len(headerID), line)...)
case bytes.HasPrefix(line, headerData):
// The spec allows for multiple data fields per event, concatenated them with "\n".
e.Data = append(e.Data[:], append(trimHeader(len(headerData), line), byte('\n'))...)
// The spec says that a line that simply contains the string "data" should be treated as a data field with an empty body.
case bytes.Equal(line, bytes.TrimSuffix(headerData, []byte(":"))):
e.Data = append(e.Data, byte('\n'))
case bytes.HasPrefix(line, headerEvent):
e.Event = append([]byte(nil), trimHeader(len(headerEvent), line)...)
case bytes.HasPrefix(line, headerRetry):
e.Retry = append([]byte(nil), trimHeader(len(headerRetry), line)...)
default:
// Ignore any garbage that doesn't match what we're looking for.
}
}
// Trim the last "\n" per the spec.
e.Data = bytes.TrimSuffix(e.Data, []byte("\n"))
if c.EncodingBase64 {
buf := make([]byte, base64.StdEncoding.DecodedLen(len(e.Data)))
n, err := base64.StdEncoding.Decode(buf, e.Data)
if err != nil {
err = fmt.Errorf("failed to decode event message: %s", err)
}
e.Data = buf[:n]
}
return &e, err
}
func (c *Client) cleanup(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
close(c.subscribed[ch])
delete(c.subscribed, ch)
}
}
func trimHeader(size int, data []byte) []byte {
if data == nil || len(data) < size {
return data
}
data = data[size:]
// Remove optional leading whitespace
if len(data) > 0 && data[0] == 32 {
data = data[1:]
}
// Remove trailing new line
if len(data) > 0 && data[len(data)-1] == 10 {
data = data[:len(data)-1]
}
return data
}
| {
err = validator(c, resp)
if err != nil {
return err
}
} | conditional_block |
client.go | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package sse
import (
"bytes"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"sync"
"sync/atomic"
"time"
"gopkg.in/cenkalti/backoff.v1"
)
var (
headerID = []byte("id:")
headerData = []byte("data:")
headerEvent = []byte("event:")
headerRetry = []byte("retry:")
)
func ClientMaxBufferSize(s int) func(c *Client) {
return func(c *Client) {
c.maxBufferSize = s
}
}
// ConnCallback defines a function to be called on a particular connection event
type ConnCallback func(c *Client)
// ResponseValidator validates a response
type ResponseValidator func(c *Client, resp *http.Response) error
// Client handles an incoming server stream
type Client struct {
Retry time.Time
ReconnectStrategy backoff.BackOff
disconnectcb ConnCallback
connectedcb ConnCallback
subscribed map[chan *Event]chan struct{}
Headers map[string]string
ReconnectNotify backoff.Notify
ResponseValidator ResponseValidator
Connection *http.Client
URL string
LastEventID atomic.Value // []byte
maxBufferSize int
mu sync.Mutex
EncodingBase64 bool
Connected bool
}
// NewClient creates a new client
func NewClient(url string, opts ...func(c *Client)) *Client {
c := &Client{
URL: url,
Connection: &http.Client{},
Headers: make(map[string]string),
subscribed: make(map[chan *Event]chan struct{}),
maxBufferSize: 1 << 16,
}
for _, opt := range opts {
opt(c)
}
return c
}
// Subscribe to a data stream
func (c *Client) Subscribe(stream string, handler func(msg *Event)) error {
return c.SubscribeWithContext(context.Background(), stream, handler)
}
// SubscribeWithContext to a data stream with context
func (c *Client) SubscribeWithContext(ctx context.Context, stream string, handler func(msg *Event)) error {
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
select {
case err = <-errorChan:
return err
case msg := <-eventChan:
handler(msg)
}
}
}
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
return err
}
// SubscribeChan sends all events to the provided channel
func (c *Client) | (stream string, ch chan *Event) error {
return c.SubscribeChanWithContext(context.Background(), stream, ch)
}
// SubscribeChanWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanWithContext(ctx context.Context, stream string, ch chan *Event) error {
var connected bool
errch := make(chan error)
c.mu.Lock()
c.subscribed[ch] = make(chan struct{})
c.mu.Unlock()
operation := func() error {
resp, err := c.request(ctx, stream)
if err != nil {
return err
}
if validator := c.ResponseValidator; validator != nil {
err = validator(c, resp)
if err != nil {
return err
}
} else if resp.StatusCode != 200 {
resp.Body.Close()
return fmt.Errorf("could not connect to stream: %s", http.StatusText(resp.StatusCode))
}
defer resp.Body.Close()
if !connected {
// Notify connect
errch <- nil
connected = true
}
reader := NewEventStreamReader(resp.Body, c.maxBufferSize)
eventChan, errorChan := c.startReadLoop(reader)
for {
var msg *Event
// Wait for message to arrive or exit
select {
case <-c.subscribed[ch]:
return nil
case err = <-errorChan:
return err
case msg = <-eventChan:
}
// Wait for message to be sent or exit
if msg != nil {
select {
case <-c.subscribed[ch]:
return nil
case ch <- msg:
// message sent
}
}
}
}
go func() {
defer c.cleanup(ch)
// Apply user specified reconnection strategy or default to standard NewExponentialBackOff() reconnection method
var err error
if c.ReconnectStrategy != nil {
err = backoff.RetryNotify(operation, c.ReconnectStrategy, c.ReconnectNotify)
} else {
err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), c.ReconnectNotify)
}
// channel closed once connected
if err != nil && !connected {
errch <- err
}
}()
err := <-errch
close(errch)
return err
}
func (c *Client) startReadLoop(reader *EventStreamReader) (chan *Event, chan error) {
outCh := make(chan *Event)
erChan := make(chan error)
go c.readLoop(reader, outCh, erChan)
return outCh, erChan
}
func (c *Client) readLoop(reader *EventStreamReader, outCh chan *Event, erChan chan error) {
for {
// Read each new line and process the type of event
event, err := reader.ReadEvent()
if err != nil {
if err == io.EOF {
erChan <- nil
return
}
// run user specified disconnect function
if c.disconnectcb != nil {
c.Connected = false
c.disconnectcb(c)
}
erChan <- err
return
}
if !c.Connected && c.connectedcb != nil {
c.Connected = true
c.connectedcb(c)
}
// If we get an error, ignore it.
var msg *Event
if msg, err = c.processEvent(event); err == nil {
if len(msg.ID) > 0 {
c.LastEventID.Store(msg.ID)
} else {
msg.ID, _ = c.LastEventID.Load().([]byte)
}
// Send downstream if the event has something useful
if msg.hasContent() {
outCh <- msg
}
}
}
}
// SubscribeRaw to an sse endpoint
func (c *Client) SubscribeRaw(handler func(msg *Event)) error {
return c.Subscribe("", handler)
}
// SubscribeRawWithContext to an sse endpoint with context
func (c *Client) SubscribeRawWithContext(ctx context.Context, handler func(msg *Event)) error {
return c.SubscribeWithContext(ctx, "", handler)
}
// SubscribeChanRaw sends all events to the provided channel
func (c *Client) SubscribeChanRaw(ch chan *Event) error {
return c.SubscribeChan("", ch)
}
// SubscribeChanRawWithContext sends all events to the provided channel with context
func (c *Client) SubscribeChanRawWithContext(ctx context.Context, ch chan *Event) error {
return c.SubscribeChanWithContext(ctx, "", ch)
}
// Unsubscribe unsubscribes a channel
func (c *Client) Unsubscribe(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
c.subscribed[ch] <- struct{}{}
}
}
// OnDisconnect specifies the function to run when the connection disconnects
func (c *Client) OnDisconnect(fn ConnCallback) {
c.disconnectcb = fn
}
// OnConnect specifies the function to run when the connection is successful
func (c *Client) OnConnect(fn ConnCallback) {
c.connectedcb = fn
}
func (c *Client) request(ctx context.Context, stream string) (*http.Response, error) {
req, err := http.NewRequest("GET", c.URL, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
// Setup request, specify stream to connect to
if stream != "" {
query := req.URL.Query()
query.Add("stream", stream)
req.URL.RawQuery = query.Encode()
}
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Accept", "text/event-stream")
req.Header.Set("Connection", "keep-alive")
lastID, exists := c.LastEventID.Load().([]byte)
if exists && lastID != nil {
req.Header.Set("Last-Event-ID", string(lastID))
}
// Add user specified headers
for k, v := range c.Headers {
req.Header.Set(k, v)
}
return c.Connection.Do(req)
}
func (c *Client) processEvent(msg []byte) (event *Event, err error) {
var e Event
if len(msg) < 1 {
return nil, errors.New("event message was empty")
}
// Normalize the crlf to lf to make it easier to split the lines.
// Split the line by "\n" or "\r", per the spec.
for _, line := range bytes.FieldsFunc(msg, func(r rune) bool { return r == '\n' || r == '\r' }) {
switch {
case bytes.HasPrefix(line, headerID):
e.ID = append([]byte(nil), trimHeader(len(headerID), line)...)
case bytes.HasPrefix(line, headerData):
// The spec allows for multiple data fields per event, concatenated them with "\n".
e.Data = append(e.Data[:], append(trimHeader(len(headerData), line), byte('\n'))...)
// The spec says that a line that simply contains the string "data" should be treated as a data field with an empty body.
case bytes.Equal(line, bytes.TrimSuffix(headerData, []byte(":"))):
e.Data = append(e.Data, byte('\n'))
case bytes.HasPrefix(line, headerEvent):
e.Event = append([]byte(nil), trimHeader(len(headerEvent), line)...)
case bytes.HasPrefix(line, headerRetry):
e.Retry = append([]byte(nil), trimHeader(len(headerRetry), line)...)
default:
// Ignore any garbage that doesn't match what we're looking for.
}
}
// Trim the last "\n" per the spec.
e.Data = bytes.TrimSuffix(e.Data, []byte("\n"))
if c.EncodingBase64 {
buf := make([]byte, base64.StdEncoding.DecodedLen(len(e.Data)))
n, err := base64.StdEncoding.Decode(buf, e.Data)
if err != nil {
err = fmt.Errorf("failed to decode event message: %s", err)
}
e.Data = buf[:n]
}
return &e, err
}
func (c *Client) cleanup(ch chan *Event) {
c.mu.Lock()
defer c.mu.Unlock()
if c.subscribed[ch] != nil {
close(c.subscribed[ch])
delete(c.subscribed, ch)
}
}
func trimHeader(size int, data []byte) []byte {
if data == nil || len(data) < size {
return data
}
data = data[size:]
// Remove optional leading whitespace
if len(data) > 0 && data[0] == 32 {
data = data[1:]
}
// Remove trailing new line
if len(data) > 0 && data[len(data)-1] == 10 {
data = data[:len(data)-1]
}
return data
}
| SubscribeChan | identifier_name |
sin-gen.go | ////////////////////////////////////////////////////////////////////////////
// Porgram: sin-gen
// Purpose: Social Insurance Number Generator
// Authors: Tong Sun (c) 2014, All rights reserved
////////////////////////////////////////////////////////////////////////////
// Style: gofmt -tabs=false -tabwidth=4 -w
package main
import (
"fmt"
"os"
//"strconv"
//"strings"
)
var progname string = "sin-gen" // os.Args[0]
func usage() {
fmt.Fprintf(os.Stderr, "Usage: %s SIN_Str\n", progname)
os.Exit(0)
}
// http://play.golang.org/p/j9gwGwr2FU
// Martin Schnabel mb0@mb0.org
func toDigits(s string) []int {
result := make([]int, 0, len(s))
// for _, c := range strings.Split(s, "") {
// n, err := strconv.Atoi(c)
for i, c := range s {
if c < '0' || c > '9' {
panic(fmt.Errorf("Character #%d from SIN_Str '%c' is invalid\n",
i, c))
}
result = append(result, int(c-'0'))
}
return result
}
/*
Validation Procedure
http://www.ryerson.ca/JavaScript/lectures/forms/textValidation/sinProject.html
Fortunately, the Canadian Government provides social insurance numbers that
can be checked using a fairly straight forward method. Here is an excerpt from
document T4127(E), Payroll Deductions Formulas for Computer Programs (71st
Edition Effective January 1, 2000) published by Revenue Canada that describes
it:
Validation of the Social Insurance Number (SIN)
Human Resources Development Canada uses the SIN and employee information we
provide them to maintain accurate records of employee contributions and
earnings. To minimize the enquiries you receive, we recommend that you include
a SIN validity check as part of your payroll program.
A SIN has nine digits. The first eight digits are the basic number while the
ninth digit is a check digit. You can check whether or not a SIN is valid by
using the following verification method.
Example
The employee provides Social Insurance Number 193-456-787. You can check the
validity of the number by calculating the check digit as follows:
Basic number (first eight digits) Check digit
193 456 78 7
Make a number from each alternate position to the left
beginning at the second digit
9 4 6 8
Add the number to itself
9 4 6 8
Sum
18 8 12 16
Cross-add the digits in the sum (1 + 8 + 8 + 1 + 2 + 1 + 6) =
27
Add each alternate digit beginning at the first digit (1 + 3 + 5 + 7) =
16
Total
43
If the total is a multiple of 10, the check digit should be 0; otherwise, subtract
the total calculated (43) from the next highest number ending in zero (50) 50
The check digit is (50 - 43)
7 = 7
Social Insurance Numbers that do not pass the validation check
If the SIN provided by an individual does not pass the verification check, the
preparer should confirm the SIN with the employer who received the original
number. If you are unable to obtain the correct number for the employee,
please do NOT leave the SIN field on the information slip blank. Instead,
report the SIN that was provided, even if it is not a valid number.
Frequently, even an incorrect number will enable us to find a match so that we
can correct the record and ensure the employee receives proper credit for the
deductions.
Instead of worrying about how the user formats the SIN number we can simply
extract the nine digits they provide and check that it is a proper SIN number
using the method described above.
Validation
http://en.wikipedia.org/wiki/Social_Insurance_Number#Validation
Social Insurance Numbers can be validated through a simple check digit process
called the Luhn Algorithm.
046 454 286 <--- A fictitious, but valid SIN
121 212 121 <--- Multiply each top number by the number below it.
In the case of a two-digit number, add the digits together and insert the
result (the digital root). Thus, in the second-to-last column, 8 multiplied by
2 is equal to 16. Add the digits (1 and 6) together (1 + 6 = 7) and insert the
result (7). So the result of the multiplication is:
086 858 276
Then, add all of the digits together:
0+8+6+8+5+8+2+7+6=50
If the SIN is valid, this number will be evenly divisible by 10.
United States Social Security number
https://sourcegraph.com/github.com/django/django-localflavor-us/symbols/python/django_localflavor_us/forms/USSocialSecurityNumberField
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (078-05-1120 or 219-09-9999).
*/
var multiply []int = []int{1, 2, 1, 2, 1, 2, 1, 2, 1}
/*
validate
Given the first 8 SIN digits in array,
return the last SIN digit (9th) that satisfy validation
*/
func validate(da []int) int {
if len(da) != 8 {
panic(fmt.Errorf("Internal error: func validate need 8 SIN digits in array as input\n"))
}
sum := 0
for i, d := range da |
return (10 - sum%10) % 10
}
func pow10(e int) int {
if e == 0 {
return 1
}
ret := 10
for ii := 1; ii < e; ii++ {
ret *= 10
}
return ret
}
func main() {
// There will be only one command line argument
if len(os.Args) != 2 {
usage()
}
// the first command line argument is SIN# prefix
sinStr := os.Args[1]
padlen := 8 - len(sinStr)
for ii := 0; ii < pow10(padlen); ii++ {
// Pad leading zero with the length from a varible
fmtstr := fmt.Sprintf("%%s%%0%dd", padlen)
// in case the SIN# is 8 digits already, use those 8 digits
fullstr := fmt.Sprintf(fmtstr, sinStr, ii)[:8]
digits := toDigits(fullstr)
d9 := validate(digits)
fmt.Printf("%s%d\n", fullstr, d9)
}
}
/*
Ref:
http://golang.org/pkg/os/
Canadian Social Insurance Number (SIN) Validation
http://www.runnersweb.com/running/sin_check.html
function clear(str) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
}
function isNum(text) {
if(text == "") {
alert("You left the SIN field blank.");
return false;
}
inStr = text;
sin = text;
inLen = inStr.length;
if (inLen > 11 || inLen < 11) {
alert("SIN must be 11 characters long");
return false;
}
for (var i = 0; i < text.length; i++) {
var ch = text.substring(i, i + 1)
if ((ch < "0" || "9" < ch) && (ch != "-")) {
alert("You must enter a 9 digits and two dashes.\nFormat 999-999-999.")
return false;
}
if ((i == 3 || i == 7) && (ch != "-")) {
alert("Invalid character in position 4 or 8;\nMust be a dash!");
return false;
}
}
lastdigit = text.substring(10, 10 + 1);
// add numbers in odd positions; IE 1, 3, 6, 8
var odd = ((text.substring(0,0 + 1)) * (1.0) + (text.substring(2,2 + 1)) * (1.0)
+(text.substring(5, 5+1)) * (1.0) + (text.substring(8,8 + 1)) * (1.0));
// form texting of numbers in even positions IE 2, 4, 6, 8
var enumbers = (text.substring(1,1 + 1)) + (text.substring(4,4 + 1))+
(text.substring(6,6 + 1)) + (text.substring(9,9 + 1));
// add together numbers in new text string
// take numbers in even positions; IE 2, 4, 6, 8
// and double them to form a new text string
// EG if numbers are 2,5,1,9 new text string is 410218
for (var i = 0; i < enumbers.length; i++) {
var ch = (enumbers.substring(i, i + 1) * 2);
ch_sum = ch_sum + ch;
}
for (var i = 0; i < ch_sum.length; i++) {
var ch = (ch_sum.substring(i, i + 1));
esum = ((esum * 1.0) + (ch * 1.0));
}
checknum = (odd + esum);
// subtextact checknum from next highest multiple of 10
// to give check digit which is last digit in valid SIN
if (checknum <= 10) {
(checdigit = (10 - checknum));
}
if (checknum > 10 && checknum <= 20) {
(checkdigit = (20 - checknum));
}
if (checknum > 20 && checknum <= 30) {
(checkdigit = (30 - checknum));
}
if (checknum > 30 && checknum <= 40) {
(checkdigit = (40 - checknum));
}
if (checknum > 40 && checknum <= 50) {
(checkdigit = (50 - checknum));
}
if (checknum > 50 && checknum <= 60) {
(checkdigit = (60 - checknum));
}
if (checkdigit != lastdigit) {
alert(sin + " is an invalid SIN; \nCheck digit incorrect!\nShould be: " + checkdigit);
history.go(0);
return false;
}
return true;
}
function validate(textfield) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
if (isNum(textfield.value))
alert(textfield.value + ' is a valid SIN');
history.go(0);
}
*/
| {
sum += (d * multiply[i]) % 9
} | conditional_block |
sin-gen.go | ////////////////////////////////////////////////////////////////////////////
// Porgram: sin-gen
// Purpose: Social Insurance Number Generator
// Authors: Tong Sun (c) 2014, All rights reserved
////////////////////////////////////////////////////////////////////////////
// Style: gofmt -tabs=false -tabwidth=4 -w
package main
import (
"fmt"
"os"
//"strconv"
//"strings"
)
var progname string = "sin-gen" // os.Args[0]
func usage() {
fmt.Fprintf(os.Stderr, "Usage: %s SIN_Str\n", progname)
os.Exit(0)
}
// http://play.golang.org/p/j9gwGwr2FU
// Martin Schnabel mb0@mb0.org
func toDigits(s string) []int {
result := make([]int, 0, len(s))
// for _, c := range strings.Split(s, "") {
// n, err := strconv.Atoi(c)
for i, c := range s {
if c < '0' || c > '9' {
panic(fmt.Errorf("Character #%d from SIN_Str '%c' is invalid\n",
i, c))
}
result = append(result, int(c-'0'))
}
return result
}
/*
Validation Procedure
http://www.ryerson.ca/JavaScript/lectures/forms/textValidation/sinProject.html
Fortunately, the Canadian Government provides social insurance numbers that
can be checked using a fairly straight forward method. Here is an excerpt from
document T4127(E), Payroll Deductions Formulas for Computer Programs (71st
Edition Effective January 1, 2000) published by Revenue Canada that describes
it:
Validation of the Social Insurance Number (SIN)
Human Resources Development Canada uses the SIN and employee information we
provide them to maintain accurate records of employee contributions and
earnings. To minimize the enquiries you receive, we recommend that you include
a SIN validity check as part of your payroll program.
A SIN has nine digits. The first eight digits are the basic number while the
ninth digit is a check digit. You can check whether or not a SIN is valid by
using the following verification method.
Example
The employee provides Social Insurance Number 193-456-787. You can check the
validity of the number by calculating the check digit as follows:
Basic number (first eight digits) Check digit
193 456 78 7
Make a number from each alternate position to the left
beginning at the second digit
9 4 6 8
Add the number to itself
9 4 6 8
Sum
18 8 12 16
Cross-add the digits in the sum (1 + 8 + 8 + 1 + 2 + 1 + 6) =
27
Add each alternate digit beginning at the first digit (1 + 3 + 5 + 7) =
16
Total
43
If the total is a multiple of 10, the check digit should be 0; otherwise, subtract
the total calculated (43) from the next highest number ending in zero (50) 50
The check digit is (50 - 43)
7 = 7
Social Insurance Numbers that do not pass the validation check
If the SIN provided by an individual does not pass the verification check, the
preparer should confirm the SIN with the employer who received the original
number. If you are unable to obtain the correct number for the employee,
please do NOT leave the SIN field on the information slip blank. Instead,
report the SIN that was provided, even if it is not a valid number.
Frequently, even an incorrect number will enable us to find a match so that we
can correct the record and ensure the employee receives proper credit for the
deductions.
Instead of worrying about how the user formats the SIN number we can simply
extract the nine digits they provide and check that it is a proper SIN number
using the method described above.
Validation
http://en.wikipedia.org/wiki/Social_Insurance_Number#Validation
Social Insurance Numbers can be validated through a simple check digit process
called the Luhn Algorithm.
046 454 286 <--- A fictitious, but valid SIN
121 212 121 <--- Multiply each top number by the number below it.
In the case of a two-digit number, add the digits together and insert the
result (the digital root). Thus, in the second-to-last column, 8 multiplied by
2 is equal to 16. Add the digits (1 and 6) together (1 + 6 = 7) and insert the
result (7). So the result of the multiplication is:
086 858 276
Then, add all of the digits together:
0+8+6+8+5+8+2+7+6=50
If the SIN is valid, this number will be evenly divisible by 10.
United States Social Security number
https://sourcegraph.com/github.com/django/django-localflavor-us/symbols/python/django_localflavor_us/forms/USSocialSecurityNumberField
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (078-05-1120 or 219-09-9999).
*/
var multiply []int = []int{1, 2, 1, 2, 1, 2, 1, 2, 1}
/*
validate
Given the first 8 SIN digits in array,
return the last SIN digit (9th) that satisfy validation
*/
func | (da []int) int {
if len(da) != 8 {
panic(fmt.Errorf("Internal error: func validate need 8 SIN digits in array as input\n"))
}
sum := 0
for i, d := range da {
sum += (d * multiply[i]) % 9
}
return (10 - sum%10) % 10
}
func pow10(e int) int {
if e == 0 {
return 1
}
ret := 10
for ii := 1; ii < e; ii++ {
ret *= 10
}
return ret
}
func main() {
// There will be only one command line argument
if len(os.Args) != 2 {
usage()
}
// the first command line argument is SIN# prefix
sinStr := os.Args[1]
padlen := 8 - len(sinStr)
for ii := 0; ii < pow10(padlen); ii++ {
// Pad leading zero with the length from a varible
fmtstr := fmt.Sprintf("%%s%%0%dd", padlen)
// in case the SIN# is 8 digits already, use those 8 digits
fullstr := fmt.Sprintf(fmtstr, sinStr, ii)[:8]
digits := toDigits(fullstr)
d9 := validate(digits)
fmt.Printf("%s%d\n", fullstr, d9)
}
}
/*
Ref:
http://golang.org/pkg/os/
Canadian Social Insurance Number (SIN) Validation
http://www.runnersweb.com/running/sin_check.html
function clear(str) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
}
function isNum(text) {
if(text == "") {
alert("You left the SIN field blank.");
return false;
}
inStr = text;
sin = text;
inLen = inStr.length;
if (inLen > 11 || inLen < 11) {
alert("SIN must be 11 characters long");
return false;
}
for (var i = 0; i < text.length; i++) {
var ch = text.substring(i, i + 1)
if ((ch < "0" || "9" < ch) && (ch != "-")) {
alert("You must enter a 9 digits and two dashes.\nFormat 999-999-999.")
return false;
}
if ((i == 3 || i == 7) && (ch != "-")) {
alert("Invalid character in position 4 or 8;\nMust be a dash!");
return false;
}
}
lastdigit = text.substring(10, 10 + 1);
// add numbers in odd positions; IE 1, 3, 6, 8
var odd = ((text.substring(0,0 + 1)) * (1.0) + (text.substring(2,2 + 1)) * (1.0)
+(text.substring(5, 5+1)) * (1.0) + (text.substring(8,8 + 1)) * (1.0));
// form texting of numbers in even positions IE 2, 4, 6, 8
var enumbers = (text.substring(1,1 + 1)) + (text.substring(4,4 + 1))+
(text.substring(6,6 + 1)) + (text.substring(9,9 + 1));
// add together numbers in new text string
// take numbers in even positions; IE 2, 4, 6, 8
// and double them to form a new text string
// EG if numbers are 2,5,1,9 new text string is 410218
for (var i = 0; i < enumbers.length; i++) {
var ch = (enumbers.substring(i, i + 1) * 2);
ch_sum = ch_sum + ch;
}
for (var i = 0; i < ch_sum.length; i++) {
var ch = (ch_sum.substring(i, i + 1));
esum = ((esum * 1.0) + (ch * 1.0));
}
checknum = (odd + esum);
// subtextact checknum from next highest multiple of 10
// to give check digit which is last digit in valid SIN
if (checknum <= 10) {
(checdigit = (10 - checknum));
}
if (checknum > 10 && checknum <= 20) {
(checkdigit = (20 - checknum));
}
if (checknum > 20 && checknum <= 30) {
(checkdigit = (30 - checknum));
}
if (checknum > 30 && checknum <= 40) {
(checkdigit = (40 - checknum));
}
if (checknum > 40 && checknum <= 50) {
(checkdigit = (50 - checknum));
}
if (checknum > 50 && checknum <= 60) {
(checkdigit = (60 - checknum));
}
if (checkdigit != lastdigit) {
alert(sin + " is an invalid SIN; \nCheck digit incorrect!\nShould be: " + checkdigit);
history.go(0);
return false;
}
return true;
}
function validate(textfield) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
if (isNum(textfield.value))
alert(textfield.value + ' is a valid SIN');
history.go(0);
}
*/
| validate | identifier_name |
sin-gen.go | ////////////////////////////////////////////////////////////////////////////
// Porgram: sin-gen
// Purpose: Social Insurance Number Generator
// Authors: Tong Sun (c) 2014, All rights reserved
////////////////////////////////////////////////////////////////////////////
// Style: gofmt -tabs=false -tabwidth=4 -w
package main
import (
"fmt"
"os"
//"strconv"
//"strings"
)
var progname string = "sin-gen" // os.Args[0]
func usage() {
fmt.Fprintf(os.Stderr, "Usage: %s SIN_Str\n", progname)
os.Exit(0)
}
// http://play.golang.org/p/j9gwGwr2FU
// Martin Schnabel mb0@mb0.org
func toDigits(s string) []int {
result := make([]int, 0, len(s))
// for _, c := range strings.Split(s, "") {
// n, err := strconv.Atoi(c)
for i, c := range s {
if c < '0' || c > '9' {
panic(fmt.Errorf("Character #%d from SIN_Str '%c' is invalid\n",
i, c))
}
result = append(result, int(c-'0'))
}
return result
}
/*
Validation Procedure
http://www.ryerson.ca/JavaScript/lectures/forms/textValidation/sinProject.html
Fortunately, the Canadian Government provides social insurance numbers that
can be checked using a fairly straight forward method. Here is an excerpt from
document T4127(E), Payroll Deductions Formulas for Computer Programs (71st
Edition Effective January 1, 2000) published by Revenue Canada that describes
it:
Validation of the Social Insurance Number (SIN)
Human Resources Development Canada uses the SIN and employee information we
provide them to maintain accurate records of employee contributions and
earnings. To minimize the enquiries you receive, we recommend that you include
a SIN validity check as part of your payroll program.
A SIN has nine digits. The first eight digits are the basic number while the
ninth digit is a check digit. You can check whether or not a SIN is valid by
using the following verification method.
Example
The employee provides Social Insurance Number 193-456-787. You can check the
validity of the number by calculating the check digit as follows:
Basic number (first eight digits) Check digit
193 456 78 7
Make a number from each alternate position to the left
beginning at the second digit
9 4 6 8
Add the number to itself
9 4 6 8
Sum
18 8 12 16
Cross-add the digits in the sum (1 + 8 + 8 + 1 + 2 + 1 + 6) =
27
Add each alternate digit beginning at the first digit (1 + 3 + 5 + 7) =
16
Total
43
If the total is a multiple of 10, the check digit should be 0; otherwise, subtract
the total calculated (43) from the next highest number ending in zero (50) 50 |
The check digit is (50 - 43)
7 = 7
Social Insurance Numbers that do not pass the validation check
If the SIN provided by an individual does not pass the verification check, the
preparer should confirm the SIN with the employer who received the original
number. If you are unable to obtain the correct number for the employee,
please do NOT leave the SIN field on the information slip blank. Instead,
report the SIN that was provided, even if it is not a valid number.
Frequently, even an incorrect number will enable us to find a match so that we
can correct the record and ensure the employee receives proper credit for the
deductions.
Instead of worrying about how the user formats the SIN number we can simply
extract the nine digits they provide and check that it is a proper SIN number
using the method described above.
Validation
http://en.wikipedia.org/wiki/Social_Insurance_Number#Validation
Social Insurance Numbers can be validated through a simple check digit process
called the Luhn Algorithm.
046 454 286 <--- A fictitious, but valid SIN
121 212 121 <--- Multiply each top number by the number below it.
In the case of a two-digit number, add the digits together and insert the
result (the digital root). Thus, in the second-to-last column, 8 multiplied by
2 is equal to 16. Add the digits (1 and 6) together (1 + 6 = 7) and insert the
result (7). So the result of the multiplication is:
086 858 276
Then, add all of the digits together:
0+8+6+8+5+8+2+7+6=50
If the SIN is valid, this number will be evenly divisible by 10.
United States Social Security number
https://sourcegraph.com/github.com/django/django-localflavor-us/symbols/python/django_localflavor_us/forms/USSocialSecurityNumberField
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (078-05-1120 or 219-09-9999).
*/
var multiply []int = []int{1, 2, 1, 2, 1, 2, 1, 2, 1}
/*
validate
Given the first 8 SIN digits in array,
return the last SIN digit (9th) that satisfy validation
*/
func validate(da []int) int {
if len(da) != 8 {
panic(fmt.Errorf("Internal error: func validate need 8 SIN digits in array as input\n"))
}
sum := 0
for i, d := range da {
sum += (d * multiply[i]) % 9
}
return (10 - sum%10) % 10
}
func pow10(e int) int {
if e == 0 {
return 1
}
ret := 10
for ii := 1; ii < e; ii++ {
ret *= 10
}
return ret
}
func main() {
// There will be only one command line argument
if len(os.Args) != 2 {
usage()
}
// the first command line argument is SIN# prefix
sinStr := os.Args[1]
padlen := 8 - len(sinStr)
for ii := 0; ii < pow10(padlen); ii++ {
// Pad leading zero with the length from a varible
fmtstr := fmt.Sprintf("%%s%%0%dd", padlen)
// in case the SIN# is 8 digits already, use those 8 digits
fullstr := fmt.Sprintf(fmtstr, sinStr, ii)[:8]
digits := toDigits(fullstr)
d9 := validate(digits)
fmt.Printf("%s%d\n", fullstr, d9)
}
}
/*
Ref:
http://golang.org/pkg/os/
Canadian Social Insurance Number (SIN) Validation
http://www.runnersweb.com/running/sin_check.html
function clear(str) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
}
function isNum(text) {
if(text == "") {
alert("You left the SIN field blank.");
return false;
}
inStr = text;
sin = text;
inLen = inStr.length;
if (inLen > 11 || inLen < 11) {
alert("SIN must be 11 characters long");
return false;
}
for (var i = 0; i < text.length; i++) {
var ch = text.substring(i, i + 1)
if ((ch < "0" || "9" < ch) && (ch != "-")) {
alert("You must enter a 9 digits and two dashes.\nFormat 999-999-999.")
return false;
}
if ((i == 3 || i == 7) && (ch != "-")) {
alert("Invalid character in position 4 or 8;\nMust be a dash!");
return false;
}
}
lastdigit = text.substring(10, 10 + 1);
// add numbers in odd positions; IE 1, 3, 6, 8
var odd = ((text.substring(0,0 + 1)) * (1.0) + (text.substring(2,2 + 1)) * (1.0)
+(text.substring(5, 5+1)) * (1.0) + (text.substring(8,8 + 1)) * (1.0));
// form texting of numbers in even positions IE 2, 4, 6, 8
var enumbers = (text.substring(1,1 + 1)) + (text.substring(4,4 + 1))+
(text.substring(6,6 + 1)) + (text.substring(9,9 + 1));
// add together numbers in new text string
// take numbers in even positions; IE 2, 4, 6, 8
// and double them to form a new text string
// EG if numbers are 2,5,1,9 new text string is 410218
for (var i = 0; i < enumbers.length; i++) {
var ch = (enumbers.substring(i, i + 1) * 2);
ch_sum = ch_sum + ch;
}
for (var i = 0; i < ch_sum.length; i++) {
var ch = (ch_sum.substring(i, i + 1));
esum = ((esum * 1.0) + (ch * 1.0));
}
checknum = (odd + esum);
// subtextact checknum from next highest multiple of 10
// to give check digit which is last digit in valid SIN
if (checknum <= 10) {
(checdigit = (10 - checknum));
}
if (checknum > 10 && checknum <= 20) {
(checkdigit = (20 - checknum));
}
if (checknum > 20 && checknum <= 30) {
(checkdigit = (30 - checknum));
}
if (checknum > 30 && checknum <= 40) {
(checkdigit = (40 - checknum));
}
if (checknum > 40 && checknum <= 50) {
(checkdigit = (50 - checknum));
}
if (checknum > 50 && checknum <= 60) {
(checkdigit = (60 - checknum));
}
if (checkdigit != lastdigit) {
alert(sin + " is an invalid SIN; \nCheck digit incorrect!\nShould be: " + checkdigit);
history.go(0);
return false;
}
return true;
}
function validate(textfield) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
if (isNum(textfield.value))
alert(textfield.value + ' is a valid SIN');
history.go(0);
}
*/ | random_line_split | |
sin-gen.go | ////////////////////////////////////////////////////////////////////////////
// Porgram: sin-gen
// Purpose: Social Insurance Number Generator
// Authors: Tong Sun (c) 2014, All rights reserved
////////////////////////////////////////////////////////////////////////////
// Style: gofmt -tabs=false -tabwidth=4 -w
package main
import (
"fmt"
"os"
//"strconv"
//"strings"
)
var progname string = "sin-gen" // os.Args[0]
func usage() {
fmt.Fprintf(os.Stderr, "Usage: %s SIN_Str\n", progname)
os.Exit(0)
}
// http://play.golang.org/p/j9gwGwr2FU
// Martin Schnabel mb0@mb0.org
func toDigits(s string) []int {
result := make([]int, 0, len(s))
// for _, c := range strings.Split(s, "") {
// n, err := strconv.Atoi(c)
for i, c := range s {
if c < '0' || c > '9' {
panic(fmt.Errorf("Character #%d from SIN_Str '%c' is invalid\n",
i, c))
}
result = append(result, int(c-'0'))
}
return result
}
/*
Validation Procedure
http://www.ryerson.ca/JavaScript/lectures/forms/textValidation/sinProject.html
Fortunately, the Canadian Government provides social insurance numbers that
can be checked using a fairly straight forward method. Here is an excerpt from
document T4127(E), Payroll Deductions Formulas for Computer Programs (71st
Edition Effective January 1, 2000) published by Revenue Canada that describes
it:
Validation of the Social Insurance Number (SIN)
Human Resources Development Canada uses the SIN and employee information we
provide them to maintain accurate records of employee contributions and
earnings. To minimize the enquiries you receive, we recommend that you include
a SIN validity check as part of your payroll program.
A SIN has nine digits. The first eight digits are the basic number while the
ninth digit is a check digit. You can check whether or not a SIN is valid by
using the following verification method.
Example
The employee provides Social Insurance Number 193-456-787. You can check the
validity of the number by calculating the check digit as follows:
Basic number (first eight digits) Check digit
193 456 78 7
Make a number from each alternate position to the left
beginning at the second digit
9 4 6 8
Add the number to itself
9 4 6 8
Sum
18 8 12 16
Cross-add the digits in the sum (1 + 8 + 8 + 1 + 2 + 1 + 6) =
27
Add each alternate digit beginning at the first digit (1 + 3 + 5 + 7) =
16
Total
43
If the total is a multiple of 10, the check digit should be 0; otherwise, subtract
the total calculated (43) from the next highest number ending in zero (50) 50
The check digit is (50 - 43)
7 = 7
Social Insurance Numbers that do not pass the validation check
If the SIN provided by an individual does not pass the verification check, the
preparer should confirm the SIN with the employer who received the original
number. If you are unable to obtain the correct number for the employee,
please do NOT leave the SIN field on the information slip blank. Instead,
report the SIN that was provided, even if it is not a valid number.
Frequently, even an incorrect number will enable us to find a match so that we
can correct the record and ensure the employee receives proper credit for the
deductions.
Instead of worrying about how the user formats the SIN number we can simply
extract the nine digits they provide and check that it is a proper SIN number
using the method described above.
Validation
http://en.wikipedia.org/wiki/Social_Insurance_Number#Validation
Social Insurance Numbers can be validated through a simple check digit process
called the Luhn Algorithm.
046 454 286 <--- A fictitious, but valid SIN
121 212 121 <--- Multiply each top number by the number below it.
In the case of a two-digit number, add the digits together and insert the
result (the digital root). Thus, in the second-to-last column, 8 multiplied by
2 is equal to 16. Add the digits (1 and 6) together (1 + 6 = 7) and insert the
result (7). So the result of the multiplication is:
086 858 276
Then, add all of the digits together:
0+8+6+8+5+8+2+7+6=50
If the SIN is valid, this number will be evenly divisible by 10.
United States Social Security number
https://sourcegraph.com/github.com/django/django-localflavor-us/symbols/python/django_localflavor_us/forms/USSocialSecurityNumberField
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (078-05-1120 or 219-09-9999).
*/
var multiply []int = []int{1, 2, 1, 2, 1, 2, 1, 2, 1}
/*
validate
Given the first 8 SIN digits in array,
return the last SIN digit (9th) that satisfy validation
*/
func validate(da []int) int |
func pow10(e int) int {
if e == 0 {
return 1
}
ret := 10
for ii := 1; ii < e; ii++ {
ret *= 10
}
return ret
}
func main() {
// There will be only one command line argument
if len(os.Args) != 2 {
usage()
}
// the first command line argument is SIN# prefix
sinStr := os.Args[1]
padlen := 8 - len(sinStr)
for ii := 0; ii < pow10(padlen); ii++ {
// Pad leading zero with the length from a varible
fmtstr := fmt.Sprintf("%%s%%0%dd", padlen)
// in case the SIN# is 8 digits already, use those 8 digits
fullstr := fmt.Sprintf(fmtstr, sinStr, ii)[:8]
digits := toDigits(fullstr)
d9 := validate(digits)
fmt.Printf("%s%d\n", fullstr, d9)
}
}
/*
Ref:
http://golang.org/pkg/os/
Canadian Social Insurance Number (SIN) Validation
http://www.runnersweb.com/running/sin_check.html
function clear(str) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
}
function isNum(text) {
if(text == "") {
alert("You left the SIN field blank.");
return false;
}
inStr = text;
sin = text;
inLen = inStr.length;
if (inLen > 11 || inLen < 11) {
alert("SIN must be 11 characters long");
return false;
}
for (var i = 0; i < text.length; i++) {
var ch = text.substring(i, i + 1)
if ((ch < "0" || "9" < ch) && (ch != "-")) {
alert("You must enter a 9 digits and two dashes.\nFormat 999-999-999.")
return false;
}
if ((i == 3 || i == 7) && (ch != "-")) {
alert("Invalid character in position 4 or 8;\nMust be a dash!");
return false;
}
}
lastdigit = text.substring(10, 10 + 1);
// add numbers in odd positions; IE 1, 3, 6, 8
var odd = ((text.substring(0,0 + 1)) * (1.0) + (text.substring(2,2 + 1)) * (1.0)
+(text.substring(5, 5+1)) * (1.0) + (text.substring(8,8 + 1)) * (1.0));
// form texting of numbers in even positions IE 2, 4, 6, 8
var enumbers = (text.substring(1,1 + 1)) + (text.substring(4,4 + 1))+
(text.substring(6,6 + 1)) + (text.substring(9,9 + 1));
// add together numbers in new text string
// take numbers in even positions; IE 2, 4, 6, 8
// and double them to form a new text string
// EG if numbers are 2,5,1,9 new text string is 410218
for (var i = 0; i < enumbers.length; i++) {
var ch = (enumbers.substring(i, i + 1) * 2);
ch_sum = ch_sum + ch;
}
for (var i = 0; i < ch_sum.length; i++) {
var ch = (ch_sum.substring(i, i + 1));
esum = ((esum * 1.0) + (ch * 1.0));
}
checknum = (odd + esum);
// subtextact checknum from next highest multiple of 10
// to give check digit which is last digit in valid SIN
if (checknum <= 10) {
(checdigit = (10 - checknum));
}
if (checknum > 10 && checknum <= 20) {
(checkdigit = (20 - checknum));
}
if (checknum > 20 && checknum <= 30) {
(checkdigit = (30 - checknum));
}
if (checknum > 30 && checknum <= 40) {
(checkdigit = (40 - checknum));
}
if (checknum > 40 && checknum <= 50) {
(checkdigit = (50 - checknum));
}
if (checknum > 50 && checknum <= 60) {
(checkdigit = (60 - checknum));
}
if (checkdigit != lastdigit) {
alert(sin + " is an invalid SIN; \nCheck digit incorrect!\nShould be: " + checkdigit);
history.go(0);
return false;
}
return true;
}
function validate(textfield) {
var esum = 0;
var enumbers = "";
var checknum = 0;
var ch_sum = "";
var checkdigit = 0;
var sin = "";
var lastdigit = 0;
if (isNum(textfield.value))
alert(textfield.value + ' is a valid SIN');
history.go(0);
}
*/
| {
if len(da) != 8 {
panic(fmt.Errorf("Internal error: func validate need 8 SIN digits in array as input\n"))
}
sum := 0
for i, d := range da {
sum += (d * multiply[i]) % 9
}
return (10 - sum%10) % 10
} | identifier_body |
_stats.py | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to scores and statistics."""
from __future__ import annotations
import random
import weakref
from typing import TYPE_CHECKING
from dataclasses import dataclass
import _ba
from ba._error import (print_exception, print_error, SessionTeamNotFoundError,
SessionPlayerNotFoundError, NotFoundError)
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str,
sessionplayer: ba.SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) -> ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) -> ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool = False) -> str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) -> Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) -> None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) -> Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self,
sessionplayer: ba.SessionPlayer) -> None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) -> None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) -> ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool = True) -> None:
"""Submit a kill for this player entry."""
# FIXME Clean this up.
# pylint: disable=too-many-statements
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = (0.0, 0.0, 0.0, 1.0)
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = (0.1, 1.0, 0.0, 1)
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = (1.0, 0.7, 0.0, 1)
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = (1.0, 1.0, 0.0, 1)
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
score = 100
name = Lstr(resource='multiKillText',
subs=[('${COUNT}', str(self._multi_kill_count))])
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4
def _apply(name2: Lstr, score2: int, showpoints2: bool,
color2: Tuple[float, float, float, float], scale2: float,
sound2: Optional[ba.Sound]) -> None:
from bastd.actor.popuptext import PopupText
# Only award this if they're still alive and we can get
# a current position for them.
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
# Jitter position a bit since these often come in clusters.
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0,
our_pos[2] + (random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(
value=(('+' + str(score2) + ' ') if showpoints2 else '') +
'${N}',
subs=[('${N}', name2)]),
color=color2,
scale=scale2,
position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
# Inform a running game of the score.
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2))
if name is not None:
_ba.timer(
0.3 + delay,
Call(_apply, name, score, showpoints, color, scale, sound))
# Keep the tally rollin'...
# set a timer for a bit in the future.
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) -> None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) -> None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
# Load our media into this activity's context.
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) -> Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) -> None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) -> None:
"""Reset the stats instance completely."""
# Just to be safe, lets make sure no multi-kill timers are gonna go off
# for no-longer-on-the-list players.
for p_entry in list(self._player_records.values()): | for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) -> None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists() # Invalid refs should never be passed to funcs.
name = player.getname()
if name in self._player_records:
# If the player already exists, update his character and such as
# it may have changed.
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full, player,
self)
def get_records(self) -> Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
# Go through our player records and return ones whose player id still
# corresponds to a player with that name.
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self,
player: ba.Player,
base_points: int = 1,
target: Sequence[float] = None,
kill: bool = False,
victim_player: ba.Player = None,
scale: float = 1.0,
color: Sequence[float] = None,
title: Union[str, ba.Lstr] = None,
screenmessage: bool = True,
display: bool = True,
importance: int = 1,
showpoints: bool = True,
big_message: bool = False) -> int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
# FIXME: Tidy this up.
# pylint: disable=cyclic-import
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player # Currently unused.
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = (1.0, 1.0, 0.4, 1.0)
points = base_points
# If they want a big announcement, throw a zoom-text up there.
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(
Lstr(resource='nameScoresText',
subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
# If we currently have a actor, pop up a score over it.
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
# If display-pos is *way* lower than us, raise it up
# (so we can still see scores from dudes that fell off cliffs).
display_pos = (target[0], max(target[1], our_pos[1] - 2.0),
min(target[2], our_pos[2] + 2.0))
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}',
subs=[('${A}', str(points)),
('${B}', title)])
else:
sval = Lstr(value='+${A}',
subs=[('${A}', str(points))])
PopupText(sval,
color=display_color,
scale=1.2 * scale,
position=display_pos).autoretain()
# Tally kills.
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
# Report non-kill scorings.
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
# Inform a running game of the score.
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self,
player: ba.Player,
killed: bool = False,
killer: ba.Player = None) -> None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill') | p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) -> None:
"""Reset per-sound sub-scores.""" | random_line_split |
_stats.py | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to scores and statistics."""
from __future__ import annotations
import random
import weakref
from typing import TYPE_CHECKING
from dataclasses import dataclass
import _ba
from ba._error import (print_exception, print_error, SessionTeamNotFoundError,
SessionPlayerNotFoundError, NotFoundError)
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str,
sessionplayer: ba.SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) -> ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) -> ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool = False) -> str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) -> Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) -> None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) -> Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self,
sessionplayer: ba.SessionPlayer) -> None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) -> None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) -> ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool = True) -> None:
"""Submit a kill for this player entry."""
# FIXME Clean this up.
# pylint: disable=too-many-statements
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = (0.0, 0.0, 0.0, 1.0)
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = (0.1, 1.0, 0.0, 1)
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = (1.0, 0.7, 0.0, 1)
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = (1.0, 1.0, 0.0, 1)
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
score = 100
name = Lstr(resource='multiKillText',
subs=[('${COUNT}', str(self._multi_kill_count))])
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4
def _apply(name2: Lstr, score2: int, showpoints2: bool,
color2: Tuple[float, float, float, float], scale2: float,
sound2: Optional[ba.Sound]) -> None:
from bastd.actor.popuptext import PopupText
# Only award this if they're still alive and we can get
# a current position for them.
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
# Jitter position a bit since these often come in clusters.
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0,
our_pos[2] + (random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(
value=(('+' + str(score2) + ' ') if showpoints2 else '') +
'${N}',
subs=[('${N}', name2)]),
color=color2,
scale=scale2,
position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
# Inform a running game of the score.
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2))
if name is not None:
_ba.timer(
0.3 + delay,
Call(_apply, name, score, showpoints, color, scale, sound))
# Keep the tally rollin'...
# set a timer for a bit in the future.
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) -> None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) -> None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
# Load our media into this activity's context.
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) -> Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) -> None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) -> None:
"""Reset the stats instance completely."""
# Just to be safe, lets make sure no multi-kill timers are gonna go off
# for no-longer-on-the-list players.
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) -> None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def | (self, player: ba.SessionPlayer) -> None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists() # Invalid refs should never be passed to funcs.
name = player.getname()
if name in self._player_records:
# If the player already exists, update his character and such as
# it may have changed.
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full, player,
self)
def get_records(self) -> Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
# Go through our player records and return ones whose player id still
# corresponds to a player with that name.
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self,
player: ba.Player,
base_points: int = 1,
target: Sequence[float] = None,
kill: bool = False,
victim_player: ba.Player = None,
scale: float = 1.0,
color: Sequence[float] = None,
title: Union[str, ba.Lstr] = None,
screenmessage: bool = True,
display: bool = True,
importance: int = 1,
showpoints: bool = True,
big_message: bool = False) -> int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
# FIXME: Tidy this up.
# pylint: disable=cyclic-import
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player # Currently unused.
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = (1.0, 1.0, 0.4, 1.0)
points = base_points
# If they want a big announcement, throw a zoom-text up there.
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(
Lstr(resource='nameScoresText',
subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
# If we currently have a actor, pop up a score over it.
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
# If display-pos is *way* lower than us, raise it up
# (so we can still see scores from dudes that fell off cliffs).
display_pos = (target[0], max(target[1], our_pos[1] - 2.0),
min(target[2], our_pos[2] + 2.0))
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}',
subs=[('${A}', str(points)),
('${B}', title)])
else:
sval = Lstr(value='+${A}',
subs=[('${A}', str(points))])
PopupText(sval,
color=display_color,
scale=1.2 * scale,
position=display_pos).autoretain()
# Tally kills.
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
# Report non-kill scorings.
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
# Inform a running game of the score.
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self,
player: ba.Player,
killed: bool = False,
killer: ba.Player = None) -> None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| register_sessionplayer | identifier_name |
_stats.py | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to scores and statistics."""
from __future__ import annotations
import random
import weakref
from typing import TYPE_CHECKING
from dataclasses import dataclass
import _ba
from ba._error import (print_exception, print_error, SessionTeamNotFoundError,
SessionPlayerNotFoundError, NotFoundError)
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str,
sessionplayer: ba.SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) -> ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) -> ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool = False) -> str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) -> Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) -> None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) -> Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self,
sessionplayer: ba.SessionPlayer) -> None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) -> None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) -> ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool = True) -> None:
"""Submit a kill for this player entry."""
# FIXME Clean this up.
# pylint: disable=too-many-statements
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = (0.0, 0.0, 0.0, 1.0)
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = (0.1, 1.0, 0.0, 1)
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = (1.0, 0.7, 0.0, 1)
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = (1.0, 1.0, 0.0, 1)
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
score = 100
name = Lstr(resource='multiKillText',
subs=[('${COUNT}', str(self._multi_kill_count))])
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4
def _apply(name2: Lstr, score2: int, showpoints2: bool,
color2: Tuple[float, float, float, float], scale2: float,
sound2: Optional[ba.Sound]) -> None:
|
if name is not None:
_ba.timer(
0.3 + delay,
Call(_apply, name, score, showpoints, color, scale, sound))
# Keep the tally rollin'...
# set a timer for a bit in the future.
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) -> None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) -> None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
# Load our media into this activity's context.
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) -> Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) -> None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) -> None:
"""Reset the stats instance completely."""
# Just to be safe, lets make sure no multi-kill timers are gonna go off
# for no-longer-on-the-list players.
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) -> None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) -> None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists() # Invalid refs should never be passed to funcs.
name = player.getname()
if name in self._player_records:
# If the player already exists, update his character and such as
# it may have changed.
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full, player,
self)
def get_records(self) -> Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
# Go through our player records and return ones whose player id still
# corresponds to a player with that name.
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self,
player: ba.Player,
base_points: int = 1,
target: Sequence[float] = None,
kill: bool = False,
victim_player: ba.Player = None,
scale: float = 1.0,
color: Sequence[float] = None,
title: Union[str, ba.Lstr] = None,
screenmessage: bool = True,
display: bool = True,
importance: int = 1,
showpoints: bool = True,
big_message: bool = False) -> int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
# FIXME: Tidy this up.
# pylint: disable=cyclic-import
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player # Currently unused.
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = (1.0, 1.0, 0.4, 1.0)
points = base_points
# If they want a big announcement, throw a zoom-text up there.
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(
Lstr(resource='nameScoresText',
subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
# If we currently have a actor, pop up a score over it.
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
# If display-pos is *way* lower than us, raise it up
# (so we can still see scores from dudes that fell off cliffs).
display_pos = (target[0], max(target[1], our_pos[1] - 2.0),
min(target[2], our_pos[2] + 2.0))
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}',
subs=[('${A}', str(points)),
('${B}', title)])
else:
sval = Lstr(value='+${A}',
subs=[('${A}', str(points))])
PopupText(sval,
color=display_color,
scale=1.2 * scale,
position=display_pos).autoretain()
# Tally kills.
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
# Report non-kill scorings.
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
# Inform a running game of the score.
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self,
player: ba.Player,
killed: bool = False,
killer: ba.Player = None) -> None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| from bastd.actor.popuptext import PopupText
# Only award this if they're still alive and we can get
# a current position for them.
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
# Jitter position a bit since these often come in clusters.
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0,
our_pos[2] + (random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(
value=(('+' + str(score2) + ' ') if showpoints2 else '') +
'${N}',
subs=[('${N}', name2)]),
color=color2,
scale=scale2,
position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
# Inform a running game of the score.
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2)) | identifier_body |
_stats.py | # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to scores and statistics."""
from __future__ import annotations
import random
import weakref
from typing import TYPE_CHECKING
from dataclasses import dataclass
import _ba
from ba._error import (print_exception, print_error, SessionTeamNotFoundError,
SessionPlayerNotFoundError, NotFoundError)
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str,
sessionplayer: ba.SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) -> ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) -> ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool = False) -> str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) -> Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) -> None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) -> Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self,
sessionplayer: ba.SessionPlayer) -> None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) -> None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) -> ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool = True) -> None:
"""Submit a kill for this player entry."""
# FIXME Clean this up.
# pylint: disable=too-many-statements
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = (0.0, 0.0, 0.0, 1.0)
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = (0.1, 1.0, 0.0, 1)
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = (1.0, 0.7, 0.0, 1)
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = (1.0, 1.0, 0.0, 1)
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
|
def _apply(name2: Lstr, score2: int, showpoints2: bool,
color2: Tuple[float, float, float, float], scale2: float,
sound2: Optional[ba.Sound]) -> None:
from bastd.actor.popuptext import PopupText
# Only award this if they're still alive and we can get
# a current position for them.
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
# Jitter position a bit since these often come in clusters.
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0,
our_pos[2] + (random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(
value=(('+' + str(score2) + ' ') if showpoints2 else '') +
'${N}',
subs=[('${N}', name2)]),
color=color2,
scale=scale2,
position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
# Inform a running game of the score.
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2))
if name is not None:
_ba.timer(
0.3 + delay,
Call(_apply, name, score, showpoints, color, scale, sound))
# Keep the tally rollin'...
# set a timer for a bit in the future.
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) -> None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) -> None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
# Load our media into this activity's context.
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) -> Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) -> None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) -> None:
"""Reset the stats instance completely."""
# Just to be safe, lets make sure no multi-kill timers are gonna go off
# for no-longer-on-the-list players.
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) -> None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) -> None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists() # Invalid refs should never be passed to funcs.
name = player.getname()
if name in self._player_records:
# If the player already exists, update his character and such as
# it may have changed.
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full, player,
self)
def get_records(self) -> Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
# Go through our player records and return ones whose player id still
# corresponds to a player with that name.
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self,
player: ba.Player,
base_points: int = 1,
target: Sequence[float] = None,
kill: bool = False,
victim_player: ba.Player = None,
scale: float = 1.0,
color: Sequence[float] = None,
title: Union[str, ba.Lstr] = None,
screenmessage: bool = True,
display: bool = True,
importance: int = 1,
showpoints: bool = True,
big_message: bool = False) -> int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
# FIXME: Tidy this up.
# pylint: disable=cyclic-import
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player # Currently unused.
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = (1.0, 1.0, 0.4, 1.0)
points = base_points
# If they want a big announcement, throw a zoom-text up there.
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(
Lstr(resource='nameScoresText',
subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
# If we currently have a actor, pop up a score over it.
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
# If display-pos is *way* lower than us, raise it up
# (so we can still see scores from dudes that fell off cliffs).
display_pos = (target[0], max(target[1], our_pos[1] - 2.0),
min(target[2], our_pos[2] + 2.0))
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}',
subs=[('${A}', str(points)),
('${B}', title)])
else:
sval = Lstr(value='+${A}',
subs=[('${A}', str(points))])
PopupText(sval,
color=display_color,
scale=1.2 * scale,
position=display_pos).autoretain()
# Tally kills.
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
# Report non-kill scorings.
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
# Inform a running game of the score.
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self,
player: ba.Player,
killed: bool = False,
killer: ba.Player = None) -> None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| score = 100
name = Lstr(resource='multiKillText',
subs=[('${COUNT}', str(self._multi_kill_count))])
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4 | conditional_block |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => {
self.order.sort();
}
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn minimum_len(&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id);
self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool |
}
| {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len() != y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1 != b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx != vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
} | identifier_body |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => {
self.order.sort();
}
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn minimum_len(&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> { | self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len() != y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1 != b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx != vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
} | if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id); | random_line_split |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => {
self.order.sort();
}
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn | (&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id);
self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len() != y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1 != b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx != vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
}
| minimum_len | identifier_name |
pattern.rs | use core::{cmp, fmt, mem, u16, usize};
use alloc::{string::String, vec, vec::Vec};
use crate::packed::api::MatchKind;
/// The type used for representing a pattern identifier.
///
/// We don't use `usize` here because our packed searchers don't scale to
/// huge numbers of patterns, so we keep things a bit smaller.
pub type PatternID = u16;
/// A non-empty collection of non-empty patterns to search for.
///
/// This collection of patterns is what is passed around to both execute
/// searches and to construct the searchers themselves. Namely, this permits
/// searches to avoid copying all of the patterns, and allows us to keep only
/// one copy throughout all packed searchers.
///
/// Note that this collection is not a set. The same pattern can appear more
/// than once.
#[derive(Clone, Debug)]
pub struct Patterns {
/// The match semantics supported by this collection of patterns.
///
/// The match semantics determines the order of the iterator over patterns.
/// For leftmost-first, patterns are provided in the same order as were
/// provided by the caller. For leftmost-longest, patterns are provided in
/// descending order of length, with ties broken by the order in which they
/// were provided by the caller.
kind: MatchKind,
/// The collection of patterns, indexed by their identifier.
by_id: Vec<Vec<u8>>,
/// The order of patterns defined for iteration, given by pattern
/// identifiers. The order of `by_id` and `order` is always the same for
/// leftmost-first semantics, but may be different for leftmost-longest
/// semantics.
order: Vec<PatternID>,
/// The length of the smallest pattern, in bytes.
minimum_len: usize,
/// The largest pattern identifier. This should always be equivalent to
/// the number of patterns minus one in this collection.
max_pattern_id: PatternID,
/// The total number of pattern bytes across the entire collection. This
/// is used for reporting total heap usage in constant time.
total_pattern_bytes: usize,
}
impl Patterns {
/// Create a new collection of patterns for the given match semantics. The
/// ID of each pattern is the index of the pattern at which it occurs in
/// the `by_id` slice.
///
/// If any of the patterns in the slice given are empty, then this panics.
/// Similarly, if the number of patterns given is zero, then this also
/// panics.
pub fn new() -> Patterns {
Patterns {
kind: MatchKind::default(),
by_id: vec![],
order: vec![],
minimum_len: usize::MAX,
max_pattern_id: 0,
total_pattern_bytes: 0,
}
}
/// Add a pattern to this collection.
///
/// This panics if the pattern given is empty.
pub fn add(&mut self, bytes: &[u8]) {
assert!(!bytes.is_empty());
assert!(self.by_id.len() <= u16::MAX as usize);
let id = self.by_id.len() as u16;
self.max_pattern_id = id;
self.order.push(id);
self.by_id.push(bytes.to_vec());
self.minimum_len = cmp::min(self.minimum_len, bytes.len());
self.total_pattern_bytes += bytes.len();
}
/// Set the match kind semantics for this collection of patterns.
///
/// If the kind is not set, then the default is leftmost-first.
pub fn set_match_kind(&mut self, kind: MatchKind) {
self.kind = kind;
match self.kind {
MatchKind::LeftmostFirst => |
MatchKind::LeftmostLongest => {
let (order, by_id) = (&mut self.order, &mut self.by_id);
order.sort_by(|&id1, &id2| {
by_id[id1 as usize]
.len()
.cmp(&by_id[id2 as usize].len())
.reverse()
});
}
}
}
/// Return the number of patterns in this collection.
///
/// This is guaranteed to be greater than zero.
pub fn len(&self) -> usize {
self.by_id.len()
}
/// Returns true if and only if this collection of patterns is empty.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns the approximate total amount of heap used by these patterns, in
/// units of bytes.
pub fn memory_usage(&self) -> usize {
self.order.len() * mem::size_of::<PatternID>()
+ self.by_id.len() * mem::size_of::<Vec<u8>>()
+ self.total_pattern_bytes
}
/// Clears all heap memory associated with this collection of patterns and
/// resets all state such that it is a valid empty collection.
pub fn reset(&mut self) {
self.kind = MatchKind::default();
self.by_id.clear();
self.order.clear();
self.minimum_len = usize::MAX;
self.max_pattern_id = 0;
}
/// Return the maximum pattern identifier in this collection. This can be
/// useful in searchers for ensuring that the collection of patterns they
/// are provided at search time and at build time have the same size.
pub fn max_pattern_id(&self) -> PatternID {
assert_eq!((self.max_pattern_id + 1) as usize, self.len());
self.max_pattern_id
}
/// Returns the length, in bytes, of the smallest pattern.
///
/// This is guaranteed to be at least one.
pub fn minimum_len(&self) -> usize {
self.minimum_len
}
/// Returns the match semantics used by these patterns.
pub fn match_kind(&self) -> &MatchKind {
&self.kind
}
/// Return the pattern with the given identifier. If such a pattern does
/// not exist, then this panics.
pub fn get(&self, id: PatternID) -> Pattern<'_> {
Pattern(&self.by_id[id as usize])
}
/// Return the pattern with the given identifier without performing bounds
/// checks.
///
/// # Safety
///
/// Callers must ensure that a pattern with the given identifier exists
/// before using this method.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub unsafe fn get_unchecked(&self, id: PatternID) -> Pattern<'_> {
Pattern(self.by_id.get_unchecked(id as usize))
}
/// Return an iterator over all the patterns in this collection, in the
/// order in which they should be matched.
///
/// Specifically, in a naive multi-pattern matcher, the following is
/// guaranteed to satisfy the match semantics of this collection of
/// patterns:
///
/// ```ignore
/// for i in 0..haystack.len():
/// for p in patterns.iter():
/// if haystack[i..].starts_with(p.bytes()):
/// return Match(p.id(), i, i + p.bytes().len())
/// ```
///
/// Namely, among the patterns in a collection, if they are matched in
/// the order provided by this iterator, then the result is guaranteed
/// to satisfy the correct match semantics. (Either leftmost-first or
/// leftmost-longest.)
pub fn iter(&self) -> PatternIter<'_> {
PatternIter { patterns: self, i: 0 }
}
}
/// An iterator over the patterns in the `Patterns` collection.
///
/// The order of the patterns provided by this iterator is consistent with the
/// match semantics of the originating collection of patterns.
///
/// The lifetime `'p` corresponds to the lifetime of the collection of patterns
/// this is iterating over.
#[derive(Debug)]
pub struct PatternIter<'p> {
patterns: &'p Patterns,
i: usize,
}
impl<'p> Iterator for PatternIter<'p> {
type Item = (PatternID, Pattern<'p>);
fn next(&mut self) -> Option<(PatternID, Pattern<'p>)> {
if self.i >= self.patterns.len() {
return None;
}
let id = self.patterns.order[self.i];
let p = self.patterns.get(id);
self.i += 1;
Some((id, p))
}
}
/// A pattern that is used in packed searching.
#[derive(Clone)]
pub struct Pattern<'a>(&'a [u8]);
impl<'a> fmt::Debug for Pattern<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Pattern")
.field("lit", &String::from_utf8_lossy(&self.0))
.finish()
}
}
impl<'p> Pattern<'p> {
/// Returns the length of this pattern, in bytes.
pub fn len(&self) -> usize {
self.0.len()
}
/// Returns the bytes of this pattern.
pub fn bytes(&self) -> &[u8] {
&self.0
}
/// Returns the first `len` low nybbles from this pattern. If this pattern
/// is shorter than `len`, then this panics.
#[cfg(all(feature = "std", target_arch = "x86_64"))]
pub fn low_nybbles(&self, len: usize) -> Vec<u8> {
let mut nybs = vec![];
for &b in self.bytes().iter().take(len) {
nybs.push(b & 0xF);
}
nybs
}
/// Returns true if this pattern is a prefix of the given bytes.
#[inline(always)]
pub fn is_prefix(&self, bytes: &[u8]) -> bool {
self.len() <= bytes.len() && self.equals(&bytes[..self.len()])
}
/// Returns true if and only if this pattern equals the given bytes.
#[inline(always)]
pub fn equals(&self, bytes: &[u8]) -> bool {
// Why not just use memcmp for this? Well, memcmp requires calling out
// to libc, and this routine is called in fairly hot code paths. Other
// than just calling out to libc, it also seems to result in worse
// codegen. By rolling our own memcpy in pure Rust, it seems to appear
// more friendly to the optimizer.
//
// This results in an improvement in just about every benchmark. Some
// smaller than others, but in some cases, up to 30% faster.
let (x, y) = (self.bytes(), bytes);
if x.len() != y.len() {
return false;
}
// If we don't have enough bytes to do 4-byte at a time loads, then
// fall back to the naive slow version.
if x.len() < 4 {
for (&b1, &b2) in x.iter().zip(y) {
if b1 != b2 {
return false;
}
}
return true;
}
// When we have 4 or more bytes to compare, then proceed in chunks of 4
// at a time using unaligned loads.
//
// Also, why do 4 byte loads instead of, say, 8 byte loads? The reason
// is that this particular version of memcmp is likely to be called
// with tiny needles. That means that if we do 8 byte loads, then a
// higher proportion of memcmp calls will use the slower variant above.
// With that said, this is a hypothesis and is only loosely supported
// by benchmarks. There's likely some improvement that could be made
// here. The main thing here though is to optimize for latency, not
// throughput.
// SAFETY: Via the conditional above, we know that both `px` and `py`
// have the same length, so `px < pxend` implies that `py < pyend`.
// Thus, derefencing both `px` and `py` in the loop below is safe.
//
// Moreover, we set `pxend` and `pyend` to be 4 bytes before the actual
// end of of `px` and `py`. Thus, the final dereference outside of the
// loop is guaranteed to be valid. (The final comparison will overlap
// with the last comparison done in the loop for lengths that aren't
// multiples of four.)
//
// Finally, we needn't worry about alignment here, since we do
// unaligned loads.
unsafe {
let (mut px, mut py) = (x.as_ptr(), y.as_ptr());
let (pxend, pyend) = (px.add(x.len() - 4), py.add(y.len() - 4));
while px < pxend {
let vx = (px as *const u32).read_unaligned();
let vy = (py as *const u32).read_unaligned();
if vx != vy {
return false;
}
px = px.add(4);
py = py.add(4);
}
let vx = (pxend as *const u32).read_unaligned();
let vy = (pyend as *const u32).read_unaligned();
vx == vy
}
}
}
| {
self.order.sort();
} | conditional_block |
pbb_wgangp.py | import os
import sys
import argparse
import pickle
import numpy as np
import tensorflow as tf
from tqdm import tqdm
### import tools
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools/lpips_tensorflow'))
from utils import *
import lpips_tf
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../gan_models/wgangp'))
from train import *
### Hyperparameters
LAMBDA2 = 0.2
LAMBDA3 = 0.001
RANDOM_SEED = 1000
#############################################################################################################
# get and save the arguments
#############################################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-name', type=str, required=True,
help='the name of the current experiment (used to set up the save_dir)')
parser.add_argument('--gan_model_dir', '-gdir', type=str, required=True,
help='directory for the Victim GAN model')
parser.add_argument('--pos_data_dir', '-posdir', type=str,
help='the directory for the positive (training) query images set')
parser.add_argument('--neg_data_dir', '-negdir', type=str,
help='the directory for the negative (testing) query images set')
parser.add_argument('--data_num', '-dnum', type=int, default=5,
help='the number of query images to be considered')
parser.add_argument('--batch_size', '-bs', type=int, default=1,
help='batch size')
parser.add_argument('--initialize_type', '-init', type=str, default='zero',
choices=['zero', # 'zero': initialize the z to be zeros
'random', # 'random': use normal distributed initialization
'nn', # 'nn' : use nearest-neighbor initialization
],
help='the initialization techniques')
parser.add_argument('--nn_dir', '-ndir', type=str,
help='directory for the fbb(KNN) results')
parser.add_argument('--distance', '-dist', type=str, default='l2-lpips', choices=['l2', 'l2-lpips'],
help='the objective function type')
parser.add_argument('--if_norm_reg', '-reg', action='store_true', default=True,
help='enable the norm regularizer')
parser.add_argument('--maxiter', type=int, default=10,
help='the maximum number of iterations')
return parser.parse_args()
def check_args(args):
'''
check and store the arguments as well as set up the save_dir
:param args: arguments
:return:
'''
## load dir
assert os.path.exists(args.gan_model_dir)
## set up save_dir
save_dir = os.path.join(os.path.dirname(__file__), 'results/pbb', args.exp_name)
check_folder(save_dir)
## store the parameters
with open(os.path.join(save_dir, 'params.txt'), 'w') as f:
for k, v in vars(args).items():
f.writelines(k + ":" + str(v) + "\n")
print(k + ":" + str(v))
pickle.dump(vars(args), open(os.path.join(save_dir, 'params.pkl'), 'wb'), protocol=2)
return args, save_dir, args.gan_model_dir
#############################################################################################################
# main optimization function
#############################################################################################################
def optimize_z(sess, z, x, x_hat,
init_val_ph, init_val,
query_imgs, save_dir,
opt, vec_loss, vec_loss_dict):
"""
z = argmin_z \lambda_1*|x_hat -x|^2 + \lambda_2 * LPIPS(x_hat,x)+ \lambda_3* L_reg
where x_hat = G(z)
:param sess: session
:param z: latent variable
:param x: query
:param x_hat: reconstruction
:param init_val_ph: placeholder for initialization value
:param init_val: dict that stores the initialization value
:param query_imgs: query data
:param save_dir: save directory
:param opt: optimization operator
:param vec_loss: full loss
:param vec_loss_dict: dict that stores each term in the objective
:return:
"""
### store results
all_loss = []
all_z = []
all_x_hat = []
### get the local variables
vars = [var for var in tf.global_variables() if
'latent_z' in var.name]
for v in vars:
print(v.name)
### callback function
global step, loss_progress
loss_progress = []
step = 0
def update(x_hat_curr, vec_loss_val):
'''
callback function for the lbfgs optimizer
:param x_hat_curr:
:param vec_loss_val:
:return:
'''
global step, loss_progress
loss_progress.append(vec_loss_val)
step += 1
### run the optimization for all query data
size = len(query_imgs)
for i in tqdm(range(size // BATCH_SIZE)):
save_dir_batch = os.path.join(save_dir, str(i))
try:
x_gt = query_imgs[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
if os.path.exists(save_dir_batch):
pass
else:
visualize_gt(x_gt, check_folder(save_dir_batch))
### initialize z
if init_val_ph is not None:
sess.run(tf.initialize_variables(vars),
feed_dict={init_val_ph: init_val[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]})
else:
sess.run(tf.initialize_variables(vars))
### optimize
loss_progress = []
step = 0
batch_idx = i
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize init
opt.minimize(sess, feed_dict={x: x_gt}, fetches=[x_hat, vec_loss], loss_callback=update)
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize final
### store results
all_loss.append(vec_loss_curr)
all_z.append(z_curr)
all_x_hat.append(x_hat_curr)
### save to disk
for key in vec_loss_dict.keys():
# each term in the objective
val = sess.run(vec_loss_dict[key], feed_dict={x: x_gt})
save_files(os.path.join(save_dir, str(i)), [key], [val])
save_files(os.path.join(save_dir, str(i)),
['full_loss', 'z', 'xhat', 'loss_progress'],
[vec_loss_curr, z_curr, x_hat_curr, np.array(loss_progress)])
except KeyboardInterrupt:
print('Stop optimization\n')
break
try:
all_loss = np.concatenate(all_loss)
all_z = np.concatenate(all_z)
all_x_hat = np.concatenate(all_x_hat)
except:
all_loss = np.array(all_loss)
all_z = np.array(all_z)
all_x_hat = np.array(all_x_hat)
return all_loss, all_z, all_x_hat
#############################################################################################################
# main
#############################################################################################################
def main():
|
if __name__ == '__main__':
main()
| args, save_dir, load_dir = check_args(parse_arguments())
config_path = os.path.join(load_dir, 'params.pkl')
if os.path.exists(config_path):
config = pickle.load(open(config_path, 'rb'))
OUTPUT_SIZE = config['OUTPUT_SIZE']
GAN_TYPE = config['Architecture']
Z_DIM = config['Z_DIM']
else:
OUTPUT_SIZE = 64
GAN_TYPE = 'good'
Z_DIM = 128
### set up the generator and the discriminator
Generator, Discriminator = GeneratorAndDiscriminator(GAN_TYPE)
### open session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
### define variables
global BATCH_SIZE
BATCH_SIZE = args.batch_size
x = tf.placeholder(tf.float32, shape=(None, OUTPUT_SIZE, OUTPUT_SIZE, 3), name='x')
### initialization
init_val_ph = None
init_val = {'pos': None, 'neg': None}
if args.initialize_type == 'zero':
z = tf.Variable(tf.zeros([BATCH_SIZE, Z_DIM], tf.float32), name='latent_z')
elif args.initialize_type == 'random':
np.random.seed(RANDOM_SEED)
init_val_np = np.random.normal(size=(Z_DIM,))
init = np.tile(init_val_np, (BATCH_SIZE, 1)).astype(np.float32)
z = tf.Variable(init, name='latent_z')
elif args.initialize_type == 'nn':
init_val['pos'] = np.load(os.path.join(args.nn_dir, 'pos_z.npy'))[:, 0, :]
init_val['neg'] = np.load(os.path.join(args.nn_dir, 'neg_z.npy'))[:, 0, :]
init_val_ph = tf.placeholder(dtype=tf.float32, name='init_ph', shape=(BATCH_SIZE, Z_DIM))
z = tf.Variable(init_val_ph, name='latent_z')
else:
raise NotImplementedError
### get the reconstruction (x_hat)
x_hat = Generator(BATCH_SIZE, noise=z, is_training=False, z_dim=Z_DIM)
x_hat = tf.reshape(x_hat, [-1, 3, OUTPUT_SIZE, OUTPUT_SIZE])
x_hat = tf.transpose(x_hat, perm=[0, 2, 3, 1])
### load model
vars = [v for v in tf.global_variables() if 'latent_z' not in v.name]
saver = tf.train.Saver(vars)
sess.run(tf.initialize_variables(vars))
if_load, counter = load_model_from_checkpoint(load_dir, saver, sess)
assert if_load is True
### loss
if args.distance == 'l2':
print('use distance: l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
vec_loss = loss_l2
vec_losses = {'l2': loss_l2}
elif args.distance == 'l2-lpips':
print('use distance: lpips + l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
loss_lpips = lpips_tf.lpips(x_hat, x, normalize=False, model='net-lin', net='vgg', version='0.1')
vec_losses = {'l2': loss_l2,
'lpips': loss_lpips}
vec_loss = loss_l2 + LAMBDA2 * loss_lpips
else:
raise NotImplementedError
## regularizer
norm = tf.reduce_sum(tf.square(z), axis=1)
norm_penalty = (norm - Z_DIM) ** 2
if args.if_norm_reg:
loss = tf.reduce_mean(vec_loss) + LAMBDA3 * tf.reduce_mean(norm_penalty)
vec_losses['norm'] = norm_penalty
else:
loss = tf.reduce_mean(vec_loss)
### set up optimizer
opt = tf.contrib.opt.ScipyOptimizerInterface(loss,
var_list=[z],
method='Powell',
options={'maxiter': args.maxiter})
### load query images
pos_data_paths = get_filepaths_from_dir(args.pos_data_dir, ext='png')[: args.data_num]
pos_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in pos_data_paths])
neg_data_paths = get_filepaths_from_dir(args.neg_data_dir, ext='png')[: args.data_num]
neg_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in neg_data_paths])
### run the optimization on query images
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['pos'],
pos_query_imgs,
check_folder(os.path.join(save_dir, 'pos_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['pos_loss'], [query_loss])
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['neg'],
neg_query_imgs,
check_folder(os.path.join(save_dir, 'neg_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['neg_loss'], [query_loss]) | identifier_body |
pbb_wgangp.py | import os
import sys
import argparse
import pickle
import numpy as np
import tensorflow as tf
from tqdm import tqdm
### import tools
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools/lpips_tensorflow'))
from utils import *
import lpips_tf
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../gan_models/wgangp'))
from train import *
### Hyperparameters
LAMBDA2 = 0.2
LAMBDA3 = 0.001
RANDOM_SEED = 1000
#############################################################################################################
# get and save the arguments
#############################################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-name', type=str, required=True,
help='the name of the current experiment (used to set up the save_dir)')
parser.add_argument('--gan_model_dir', '-gdir', type=str, required=True,
help='directory for the Victim GAN model')
parser.add_argument('--pos_data_dir', '-posdir', type=str,
help='the directory for the positive (training) query images set')
parser.add_argument('--neg_data_dir', '-negdir', type=str,
help='the directory for the negative (testing) query images set')
parser.add_argument('--data_num', '-dnum', type=int, default=5,
help='the number of query images to be considered')
parser.add_argument('--batch_size', '-bs', type=int, default=1,
help='batch size')
parser.add_argument('--initialize_type', '-init', type=str, default='zero',
choices=['zero', # 'zero': initialize the z to be zeros
'random', # 'random': use normal distributed initialization
'nn', # 'nn' : use nearest-neighbor initialization
],
help='the initialization techniques')
parser.add_argument('--nn_dir', '-ndir', type=str,
help='directory for the fbb(KNN) results')
parser.add_argument('--distance', '-dist', type=str, default='l2-lpips', choices=['l2', 'l2-lpips'],
help='the objective function type')
parser.add_argument('--if_norm_reg', '-reg', action='store_true', default=True,
help='enable the norm regularizer')
parser.add_argument('--maxiter', type=int, default=10,
help='the maximum number of iterations')
return parser.parse_args()
def check_args(args):
'''
check and store the arguments as well as set up the save_dir
:param args: arguments
:return:
'''
## load dir
assert os.path.exists(args.gan_model_dir)
## set up save_dir
save_dir = os.path.join(os.path.dirname(__file__), 'results/pbb', args.exp_name)
check_folder(save_dir)
## store the parameters
with open(os.path.join(save_dir, 'params.txt'), 'w') as f:
for k, v in vars(args).items():
f.writelines(k + ":" + str(v) + "\n")
print(k + ":" + str(v))
pickle.dump(vars(args), open(os.path.join(save_dir, 'params.pkl'), 'wb'), protocol=2)
return args, save_dir, args.gan_model_dir
#############################################################################################################
# main optimization function
#############################################################################################################
def optimize_z(sess, z, x, x_hat,
init_val_ph, init_val,
query_imgs, save_dir,
opt, vec_loss, vec_loss_dict):
"""
z = argmin_z \lambda_1*|x_hat -x|^2 + \lambda_2 * LPIPS(x_hat,x)+ \lambda_3* L_reg
where x_hat = G(z)
:param sess: session
:param z: latent variable
:param x: query
:param x_hat: reconstruction
:param init_val_ph: placeholder for initialization value
:param init_val: dict that stores the initialization value
:param query_imgs: query data
:param save_dir: save directory
:param opt: optimization operator
:param vec_loss: full loss
:param vec_loss_dict: dict that stores each term in the objective
:return:
"""
### store results
all_loss = []
all_z = []
all_x_hat = []
### get the local variables
vars = [var for var in tf.global_variables() if
'latent_z' in var.name]
for v in vars:
print(v.name)
### callback function
global step, loss_progress
loss_progress = []
step = 0
def update(x_hat_curr, vec_loss_val):
'''
callback function for the lbfgs optimizer
:param x_hat_curr:
:param vec_loss_val:
:return:
'''
global step, loss_progress
loss_progress.append(vec_loss_val)
step += 1
### run the optimization for all query data
size = len(query_imgs) |
try:
x_gt = query_imgs[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
if os.path.exists(save_dir_batch):
pass
else:
visualize_gt(x_gt, check_folder(save_dir_batch))
### initialize z
if init_val_ph is not None:
sess.run(tf.initialize_variables(vars),
feed_dict={init_val_ph: init_val[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]})
else:
sess.run(tf.initialize_variables(vars))
### optimize
loss_progress = []
step = 0
batch_idx = i
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize init
opt.minimize(sess, feed_dict={x: x_gt}, fetches=[x_hat, vec_loss], loss_callback=update)
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize final
### store results
all_loss.append(vec_loss_curr)
all_z.append(z_curr)
all_x_hat.append(x_hat_curr)
### save to disk
for key in vec_loss_dict.keys():
# each term in the objective
val = sess.run(vec_loss_dict[key], feed_dict={x: x_gt})
save_files(os.path.join(save_dir, str(i)), [key], [val])
save_files(os.path.join(save_dir, str(i)),
['full_loss', 'z', 'xhat', 'loss_progress'],
[vec_loss_curr, z_curr, x_hat_curr, np.array(loss_progress)])
except KeyboardInterrupt:
print('Stop optimization\n')
break
try:
all_loss = np.concatenate(all_loss)
all_z = np.concatenate(all_z)
all_x_hat = np.concatenate(all_x_hat)
except:
all_loss = np.array(all_loss)
all_z = np.array(all_z)
all_x_hat = np.array(all_x_hat)
return all_loss, all_z, all_x_hat
#############################################################################################################
# main
#############################################################################################################
def main():
args, save_dir, load_dir = check_args(parse_arguments())
config_path = os.path.join(load_dir, 'params.pkl')
if os.path.exists(config_path):
config = pickle.load(open(config_path, 'rb'))
OUTPUT_SIZE = config['OUTPUT_SIZE']
GAN_TYPE = config['Architecture']
Z_DIM = config['Z_DIM']
else:
OUTPUT_SIZE = 64
GAN_TYPE = 'good'
Z_DIM = 128
### set up the generator and the discriminator
Generator, Discriminator = GeneratorAndDiscriminator(GAN_TYPE)
### open session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
### define variables
global BATCH_SIZE
BATCH_SIZE = args.batch_size
x = tf.placeholder(tf.float32, shape=(None, OUTPUT_SIZE, OUTPUT_SIZE, 3), name='x')
### initialization
init_val_ph = None
init_val = {'pos': None, 'neg': None}
if args.initialize_type == 'zero':
z = tf.Variable(tf.zeros([BATCH_SIZE, Z_DIM], tf.float32), name='latent_z')
elif args.initialize_type == 'random':
np.random.seed(RANDOM_SEED)
init_val_np = np.random.normal(size=(Z_DIM,))
init = np.tile(init_val_np, (BATCH_SIZE, 1)).astype(np.float32)
z = tf.Variable(init, name='latent_z')
elif args.initialize_type == 'nn':
init_val['pos'] = np.load(os.path.join(args.nn_dir, 'pos_z.npy'))[:, 0, :]
init_val['neg'] = np.load(os.path.join(args.nn_dir, 'neg_z.npy'))[:, 0, :]
init_val_ph = tf.placeholder(dtype=tf.float32, name='init_ph', shape=(BATCH_SIZE, Z_DIM))
z = tf.Variable(init_val_ph, name='latent_z')
else:
raise NotImplementedError
### get the reconstruction (x_hat)
x_hat = Generator(BATCH_SIZE, noise=z, is_training=False, z_dim=Z_DIM)
x_hat = tf.reshape(x_hat, [-1, 3, OUTPUT_SIZE, OUTPUT_SIZE])
x_hat = tf.transpose(x_hat, perm=[0, 2, 3, 1])
### load model
vars = [v for v in tf.global_variables() if 'latent_z' not in v.name]
saver = tf.train.Saver(vars)
sess.run(tf.initialize_variables(vars))
if_load, counter = load_model_from_checkpoint(load_dir, saver, sess)
assert if_load is True
### loss
if args.distance == 'l2':
print('use distance: l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
vec_loss = loss_l2
vec_losses = {'l2': loss_l2}
elif args.distance == 'l2-lpips':
print('use distance: lpips + l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
loss_lpips = lpips_tf.lpips(x_hat, x, normalize=False, model='net-lin', net='vgg', version='0.1')
vec_losses = {'l2': loss_l2,
'lpips': loss_lpips}
vec_loss = loss_l2 + LAMBDA2 * loss_lpips
else:
raise NotImplementedError
## regularizer
norm = tf.reduce_sum(tf.square(z), axis=1)
norm_penalty = (norm - Z_DIM) ** 2
if args.if_norm_reg:
loss = tf.reduce_mean(vec_loss) + LAMBDA3 * tf.reduce_mean(norm_penalty)
vec_losses['norm'] = norm_penalty
else:
loss = tf.reduce_mean(vec_loss)
### set up optimizer
opt = tf.contrib.opt.ScipyOptimizerInterface(loss,
var_list=[z],
method='Powell',
options={'maxiter': args.maxiter})
### load query images
pos_data_paths = get_filepaths_from_dir(args.pos_data_dir, ext='png')[: args.data_num]
pos_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in pos_data_paths])
neg_data_paths = get_filepaths_from_dir(args.neg_data_dir, ext='png')[: args.data_num]
neg_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in neg_data_paths])
### run the optimization on query images
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['pos'],
pos_query_imgs,
check_folder(os.path.join(save_dir, 'pos_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['pos_loss'], [query_loss])
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['neg'],
neg_query_imgs,
check_folder(os.path.join(save_dir, 'neg_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['neg_loss'], [query_loss])
if __name__ == '__main__':
main() | for i in tqdm(range(size // BATCH_SIZE)):
save_dir_batch = os.path.join(save_dir, str(i)) | random_line_split |
pbb_wgangp.py | import os
import sys
import argparse
import pickle
import numpy as np
import tensorflow as tf
from tqdm import tqdm
### import tools
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools/lpips_tensorflow'))
from utils import *
import lpips_tf
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../gan_models/wgangp'))
from train import *
### Hyperparameters
LAMBDA2 = 0.2
LAMBDA3 = 0.001
RANDOM_SEED = 1000
#############################################################################################################
# get and save the arguments
#############################################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-name', type=str, required=True,
help='the name of the current experiment (used to set up the save_dir)')
parser.add_argument('--gan_model_dir', '-gdir', type=str, required=True,
help='directory for the Victim GAN model')
parser.add_argument('--pos_data_dir', '-posdir', type=str,
help='the directory for the positive (training) query images set')
parser.add_argument('--neg_data_dir', '-negdir', type=str,
help='the directory for the negative (testing) query images set')
parser.add_argument('--data_num', '-dnum', type=int, default=5,
help='the number of query images to be considered')
parser.add_argument('--batch_size', '-bs', type=int, default=1,
help='batch size')
parser.add_argument('--initialize_type', '-init', type=str, default='zero',
choices=['zero', # 'zero': initialize the z to be zeros
'random', # 'random': use normal distributed initialization
'nn', # 'nn' : use nearest-neighbor initialization
],
help='the initialization techniques')
parser.add_argument('--nn_dir', '-ndir', type=str,
help='directory for the fbb(KNN) results')
parser.add_argument('--distance', '-dist', type=str, default='l2-lpips', choices=['l2', 'l2-lpips'],
help='the objective function type')
parser.add_argument('--if_norm_reg', '-reg', action='store_true', default=True,
help='enable the norm regularizer')
parser.add_argument('--maxiter', type=int, default=10,
help='the maximum number of iterations')
return parser.parse_args()
def check_args(args):
'''
check and store the arguments as well as set up the save_dir
:param args: arguments
:return:
'''
## load dir
assert os.path.exists(args.gan_model_dir)
## set up save_dir
save_dir = os.path.join(os.path.dirname(__file__), 'results/pbb', args.exp_name)
check_folder(save_dir)
## store the parameters
with open(os.path.join(save_dir, 'params.txt'), 'w') as f:
for k, v in vars(args).items():
f.writelines(k + ":" + str(v) + "\n")
print(k + ":" + str(v))
pickle.dump(vars(args), open(os.path.join(save_dir, 'params.pkl'), 'wb'), protocol=2)
return args, save_dir, args.gan_model_dir
#############################################################################################################
# main optimization function
#############################################################################################################
def optimize_z(sess, z, x, x_hat,
init_val_ph, init_val,
query_imgs, save_dir,
opt, vec_loss, vec_loss_dict):
"""
z = argmin_z \lambda_1*|x_hat -x|^2 + \lambda_2 * LPIPS(x_hat,x)+ \lambda_3* L_reg
where x_hat = G(z)
:param sess: session
:param z: latent variable
:param x: query
:param x_hat: reconstruction
:param init_val_ph: placeholder for initialization value
:param init_val: dict that stores the initialization value
:param query_imgs: query data
:param save_dir: save directory
:param opt: optimization operator
:param vec_loss: full loss
:param vec_loss_dict: dict that stores each term in the objective
:return:
"""
### store results
all_loss = []
all_z = []
all_x_hat = []
### get the local variables
vars = [var for var in tf.global_variables() if
'latent_z' in var.name]
for v in vars:
print(v.name)
### callback function
global step, loss_progress
loss_progress = []
step = 0
def update(x_hat_curr, vec_loss_val):
'''
callback function for the lbfgs optimizer
:param x_hat_curr:
:param vec_loss_val:
:return:
'''
global step, loss_progress
loss_progress.append(vec_loss_val)
step += 1
### run the optimization for all query data
size = len(query_imgs)
for i in tqdm(range(size // BATCH_SIZE)):
save_dir_batch = os.path.join(save_dir, str(i))
try:
x_gt = query_imgs[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
if os.path.exists(save_dir_batch):
pass
else:
visualize_gt(x_gt, check_folder(save_dir_batch))
### initialize z
if init_val_ph is not None:
sess.run(tf.initialize_variables(vars),
feed_dict={init_val_ph: init_val[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]})
else:
sess.run(tf.initialize_variables(vars))
### optimize
loss_progress = []
step = 0
batch_idx = i
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize init
opt.minimize(sess, feed_dict={x: x_gt}, fetches=[x_hat, vec_loss], loss_callback=update)
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize final
### store results
all_loss.append(vec_loss_curr)
all_z.append(z_curr)
all_x_hat.append(x_hat_curr)
### save to disk
for key in vec_loss_dict.keys():
# each term in the objective
|
save_files(os.path.join(save_dir, str(i)),
['full_loss', 'z', 'xhat', 'loss_progress'],
[vec_loss_curr, z_curr, x_hat_curr, np.array(loss_progress)])
except KeyboardInterrupt:
print('Stop optimization\n')
break
try:
all_loss = np.concatenate(all_loss)
all_z = np.concatenate(all_z)
all_x_hat = np.concatenate(all_x_hat)
except:
all_loss = np.array(all_loss)
all_z = np.array(all_z)
all_x_hat = np.array(all_x_hat)
return all_loss, all_z, all_x_hat
#############################################################################################################
# main
#############################################################################################################
def main():
args, save_dir, load_dir = check_args(parse_arguments())
config_path = os.path.join(load_dir, 'params.pkl')
if os.path.exists(config_path):
config = pickle.load(open(config_path, 'rb'))
OUTPUT_SIZE = config['OUTPUT_SIZE']
GAN_TYPE = config['Architecture']
Z_DIM = config['Z_DIM']
else:
OUTPUT_SIZE = 64
GAN_TYPE = 'good'
Z_DIM = 128
### set up the generator and the discriminator
Generator, Discriminator = GeneratorAndDiscriminator(GAN_TYPE)
### open session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
### define variables
global BATCH_SIZE
BATCH_SIZE = args.batch_size
x = tf.placeholder(tf.float32, shape=(None, OUTPUT_SIZE, OUTPUT_SIZE, 3), name='x')
### initialization
init_val_ph = None
init_val = {'pos': None, 'neg': None}
if args.initialize_type == 'zero':
z = tf.Variable(tf.zeros([BATCH_SIZE, Z_DIM], tf.float32), name='latent_z')
elif args.initialize_type == 'random':
np.random.seed(RANDOM_SEED)
init_val_np = np.random.normal(size=(Z_DIM,))
init = np.tile(init_val_np, (BATCH_SIZE, 1)).astype(np.float32)
z = tf.Variable(init, name='latent_z')
elif args.initialize_type == 'nn':
init_val['pos'] = np.load(os.path.join(args.nn_dir, 'pos_z.npy'))[:, 0, :]
init_val['neg'] = np.load(os.path.join(args.nn_dir, 'neg_z.npy'))[:, 0, :]
init_val_ph = tf.placeholder(dtype=tf.float32, name='init_ph', shape=(BATCH_SIZE, Z_DIM))
z = tf.Variable(init_val_ph, name='latent_z')
else:
raise NotImplementedError
### get the reconstruction (x_hat)
x_hat = Generator(BATCH_SIZE, noise=z, is_training=False, z_dim=Z_DIM)
x_hat = tf.reshape(x_hat, [-1, 3, OUTPUT_SIZE, OUTPUT_SIZE])
x_hat = tf.transpose(x_hat, perm=[0, 2, 3, 1])
### load model
vars = [v for v in tf.global_variables() if 'latent_z' not in v.name]
saver = tf.train.Saver(vars)
sess.run(tf.initialize_variables(vars))
if_load, counter = load_model_from_checkpoint(load_dir, saver, sess)
assert if_load is True
### loss
if args.distance == 'l2':
print('use distance: l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
vec_loss = loss_l2
vec_losses = {'l2': loss_l2}
elif args.distance == 'l2-lpips':
print('use distance: lpips + l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
loss_lpips = lpips_tf.lpips(x_hat, x, normalize=False, model='net-lin', net='vgg', version='0.1')
vec_losses = {'l2': loss_l2,
'lpips': loss_lpips}
vec_loss = loss_l2 + LAMBDA2 * loss_lpips
else:
raise NotImplementedError
## regularizer
norm = tf.reduce_sum(tf.square(z), axis=1)
norm_penalty = (norm - Z_DIM) ** 2
if args.if_norm_reg:
loss = tf.reduce_mean(vec_loss) + LAMBDA3 * tf.reduce_mean(norm_penalty)
vec_losses['norm'] = norm_penalty
else:
loss = tf.reduce_mean(vec_loss)
### set up optimizer
opt = tf.contrib.opt.ScipyOptimizerInterface(loss,
var_list=[z],
method='Powell',
options={'maxiter': args.maxiter})
### load query images
pos_data_paths = get_filepaths_from_dir(args.pos_data_dir, ext='png')[: args.data_num]
pos_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in pos_data_paths])
neg_data_paths = get_filepaths_from_dir(args.neg_data_dir, ext='png')[: args.data_num]
neg_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in neg_data_paths])
### run the optimization on query images
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['pos'],
pos_query_imgs,
check_folder(os.path.join(save_dir, 'pos_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['pos_loss'], [query_loss])
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['neg'],
neg_query_imgs,
check_folder(os.path.join(save_dir, 'neg_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['neg_loss'], [query_loss])
if __name__ == '__main__':
main()
| val = sess.run(vec_loss_dict[key], feed_dict={x: x_gt})
save_files(os.path.join(save_dir, str(i)), [key], [val]) | conditional_block |
pbb_wgangp.py | import os
import sys
import argparse
import pickle
import numpy as np
import tensorflow as tf
from tqdm import tqdm
### import tools
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'tools/lpips_tensorflow'))
from utils import *
import lpips_tf
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../gan_models/wgangp'))
from train import *
### Hyperparameters
LAMBDA2 = 0.2
LAMBDA3 = 0.001
RANDOM_SEED = 1000
#############################################################################################################
# get and save the arguments
#############################################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-name', type=str, required=True,
help='the name of the current experiment (used to set up the save_dir)')
parser.add_argument('--gan_model_dir', '-gdir', type=str, required=True,
help='directory for the Victim GAN model')
parser.add_argument('--pos_data_dir', '-posdir', type=str,
help='the directory for the positive (training) query images set')
parser.add_argument('--neg_data_dir', '-negdir', type=str,
help='the directory for the negative (testing) query images set')
parser.add_argument('--data_num', '-dnum', type=int, default=5,
help='the number of query images to be considered')
parser.add_argument('--batch_size', '-bs', type=int, default=1,
help='batch size')
parser.add_argument('--initialize_type', '-init', type=str, default='zero',
choices=['zero', # 'zero': initialize the z to be zeros
'random', # 'random': use normal distributed initialization
'nn', # 'nn' : use nearest-neighbor initialization
],
help='the initialization techniques')
parser.add_argument('--nn_dir', '-ndir', type=str,
help='directory for the fbb(KNN) results')
parser.add_argument('--distance', '-dist', type=str, default='l2-lpips', choices=['l2', 'l2-lpips'],
help='the objective function type')
parser.add_argument('--if_norm_reg', '-reg', action='store_true', default=True,
help='enable the norm regularizer')
parser.add_argument('--maxiter', type=int, default=10,
help='the maximum number of iterations')
return parser.parse_args()
def check_args(args):
'''
check and store the arguments as well as set up the save_dir
:param args: arguments
:return:
'''
## load dir
assert os.path.exists(args.gan_model_dir)
## set up save_dir
save_dir = os.path.join(os.path.dirname(__file__), 'results/pbb', args.exp_name)
check_folder(save_dir)
## store the parameters
with open(os.path.join(save_dir, 'params.txt'), 'w') as f:
for k, v in vars(args).items():
f.writelines(k + ":" + str(v) + "\n")
print(k + ":" + str(v))
pickle.dump(vars(args), open(os.path.join(save_dir, 'params.pkl'), 'wb'), protocol=2)
return args, save_dir, args.gan_model_dir
#############################################################################################################
# main optimization function
#############################################################################################################
def | (sess, z, x, x_hat,
init_val_ph, init_val,
query_imgs, save_dir,
opt, vec_loss, vec_loss_dict):
"""
z = argmin_z \lambda_1*|x_hat -x|^2 + \lambda_2 * LPIPS(x_hat,x)+ \lambda_3* L_reg
where x_hat = G(z)
:param sess: session
:param z: latent variable
:param x: query
:param x_hat: reconstruction
:param init_val_ph: placeholder for initialization value
:param init_val: dict that stores the initialization value
:param query_imgs: query data
:param save_dir: save directory
:param opt: optimization operator
:param vec_loss: full loss
:param vec_loss_dict: dict that stores each term in the objective
:return:
"""
### store results
all_loss = []
all_z = []
all_x_hat = []
### get the local variables
vars = [var for var in tf.global_variables() if
'latent_z' in var.name]
for v in vars:
print(v.name)
### callback function
global step, loss_progress
loss_progress = []
step = 0
def update(x_hat_curr, vec_loss_val):
'''
callback function for the lbfgs optimizer
:param x_hat_curr:
:param vec_loss_val:
:return:
'''
global step, loss_progress
loss_progress.append(vec_loss_val)
step += 1
### run the optimization for all query data
size = len(query_imgs)
for i in tqdm(range(size // BATCH_SIZE)):
save_dir_batch = os.path.join(save_dir, str(i))
try:
x_gt = query_imgs[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
if os.path.exists(save_dir_batch):
pass
else:
visualize_gt(x_gt, check_folder(save_dir_batch))
### initialize z
if init_val_ph is not None:
sess.run(tf.initialize_variables(vars),
feed_dict={init_val_ph: init_val[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]})
else:
sess.run(tf.initialize_variables(vars))
### optimize
loss_progress = []
step = 0
batch_idx = i
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize init
opt.minimize(sess, feed_dict={x: x_gt}, fetches=[x_hat, vec_loss], loss_callback=update)
vec_loss_curr, z_curr, x_hat_curr = sess.run([vec_loss, z, x_hat], feed_dict={x: x_gt})
visualize_progress(x_hat_curr, vec_loss_curr, save_dir_batch, step) # visualize final
### store results
all_loss.append(vec_loss_curr)
all_z.append(z_curr)
all_x_hat.append(x_hat_curr)
### save to disk
for key in vec_loss_dict.keys():
# each term in the objective
val = sess.run(vec_loss_dict[key], feed_dict={x: x_gt})
save_files(os.path.join(save_dir, str(i)), [key], [val])
save_files(os.path.join(save_dir, str(i)),
['full_loss', 'z', 'xhat', 'loss_progress'],
[vec_loss_curr, z_curr, x_hat_curr, np.array(loss_progress)])
except KeyboardInterrupt:
print('Stop optimization\n')
break
try:
all_loss = np.concatenate(all_loss)
all_z = np.concatenate(all_z)
all_x_hat = np.concatenate(all_x_hat)
except:
all_loss = np.array(all_loss)
all_z = np.array(all_z)
all_x_hat = np.array(all_x_hat)
return all_loss, all_z, all_x_hat
#############################################################################################################
# main
#############################################################################################################
def main():
args, save_dir, load_dir = check_args(parse_arguments())
config_path = os.path.join(load_dir, 'params.pkl')
if os.path.exists(config_path):
config = pickle.load(open(config_path, 'rb'))
OUTPUT_SIZE = config['OUTPUT_SIZE']
GAN_TYPE = config['Architecture']
Z_DIM = config['Z_DIM']
else:
OUTPUT_SIZE = 64
GAN_TYPE = 'good'
Z_DIM = 128
### set up the generator and the discriminator
Generator, Discriminator = GeneratorAndDiscriminator(GAN_TYPE)
### open session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
### define variables
global BATCH_SIZE
BATCH_SIZE = args.batch_size
x = tf.placeholder(tf.float32, shape=(None, OUTPUT_SIZE, OUTPUT_SIZE, 3), name='x')
### initialization
init_val_ph = None
init_val = {'pos': None, 'neg': None}
if args.initialize_type == 'zero':
z = tf.Variable(tf.zeros([BATCH_SIZE, Z_DIM], tf.float32), name='latent_z')
elif args.initialize_type == 'random':
np.random.seed(RANDOM_SEED)
init_val_np = np.random.normal(size=(Z_DIM,))
init = np.tile(init_val_np, (BATCH_SIZE, 1)).astype(np.float32)
z = tf.Variable(init, name='latent_z')
elif args.initialize_type == 'nn':
init_val['pos'] = np.load(os.path.join(args.nn_dir, 'pos_z.npy'))[:, 0, :]
init_val['neg'] = np.load(os.path.join(args.nn_dir, 'neg_z.npy'))[:, 0, :]
init_val_ph = tf.placeholder(dtype=tf.float32, name='init_ph', shape=(BATCH_SIZE, Z_DIM))
z = tf.Variable(init_val_ph, name='latent_z')
else:
raise NotImplementedError
### get the reconstruction (x_hat)
x_hat = Generator(BATCH_SIZE, noise=z, is_training=False, z_dim=Z_DIM)
x_hat = tf.reshape(x_hat, [-1, 3, OUTPUT_SIZE, OUTPUT_SIZE])
x_hat = tf.transpose(x_hat, perm=[0, 2, 3, 1])
### load model
vars = [v for v in tf.global_variables() if 'latent_z' not in v.name]
saver = tf.train.Saver(vars)
sess.run(tf.initialize_variables(vars))
if_load, counter = load_model_from_checkpoint(load_dir, saver, sess)
assert if_load is True
### loss
if args.distance == 'l2':
print('use distance: l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
vec_loss = loss_l2
vec_losses = {'l2': loss_l2}
elif args.distance == 'l2-lpips':
print('use distance: lpips + l2')
loss_l2 = tf.reduce_mean(tf.square(x_hat - x), axis=[1, 2, 3])
loss_lpips = lpips_tf.lpips(x_hat, x, normalize=False, model='net-lin', net='vgg', version='0.1')
vec_losses = {'l2': loss_l2,
'lpips': loss_lpips}
vec_loss = loss_l2 + LAMBDA2 * loss_lpips
else:
raise NotImplementedError
## regularizer
norm = tf.reduce_sum(tf.square(z), axis=1)
norm_penalty = (norm - Z_DIM) ** 2
if args.if_norm_reg:
loss = tf.reduce_mean(vec_loss) + LAMBDA3 * tf.reduce_mean(norm_penalty)
vec_losses['norm'] = norm_penalty
else:
loss = tf.reduce_mean(vec_loss)
### set up optimizer
opt = tf.contrib.opt.ScipyOptimizerInterface(loss,
var_list=[z],
method='Powell',
options={'maxiter': args.maxiter})
### load query images
pos_data_paths = get_filepaths_from_dir(args.pos_data_dir, ext='png')[: args.data_num]
pos_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in pos_data_paths])
neg_data_paths = get_filepaths_from_dir(args.neg_data_dir, ext='png')[: args.data_num]
neg_query_imgs = np.array([read_image(f, OUTPUT_SIZE) for f in neg_data_paths])
### run the optimization on query images
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['pos'],
pos_query_imgs,
check_folder(os.path.join(save_dir, 'pos_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['pos_loss'], [query_loss])
query_loss, query_z, query_xhat = optimize_z(sess, z, x, x_hat,
init_val_ph, init_val['neg'],
neg_query_imgs,
check_folder(os.path.join(save_dir, 'neg_results')),
opt, vec_loss, vec_losses)
save_files(save_dir, ['neg_loss'], [query_loss])
if __name__ == '__main__':
main()
| optimize_z | identifier_name |
oci8_test.go | package oci8
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"os"
"reflect"
"testing"
"time"
)
// to run database tests
// go test -v github.com/mattn/go-oci8 -args -disableDatabase=false -hostValid type_hostname_here -username type_username_here -password "type_password_here"
// note minimum Go version for testing is 1.8
/* note that testing needs an Oracle user and the following:
create or replace function TYPE_USER_HERE.SLEEP_SECONDS (p_seconds number) return integer is
begin
dbms_lock.sleep(p_seconds);
return 1;
end SLEEP_SECONDS;
/
*/
var (
TestDisableDatabase bool
TestHostValid string
TestHostInvalid string
TestUsername string
TestPassword string
TestContextTimeoutString string
TestContextTimeout time.Duration
TestDatabase string
TestDisableDestructive bool
TestTimeString string
TestDB *sql.DB
TestTypeTime = reflect.TypeOf(time.Time{})
TestTypeByteSlice = reflect.TypeOf([]byte{})
testString1 string
testByteSlice1 []byte
testTimeLocUTC *time.Location
testTimeLocGMT *time.Location
testTimeLocEST *time.Location
testTimeLocMST *time.Location
testTimeLocNZ *time.Location
)
// testQueryResults is for testing a query
type testQueryResults struct {
query string
args [][]interface{}
results [][][]interface{}
}
// TestMain sets up testing
func TestMain(m *testing.M) {
code := setupForTesting()
if code != 0 {
os.Exit(code)
}
code = m.Run()
if !TestDisableDatabase {
err := TestDB.Close()
if err != nil {
fmt.Println("close error:", err)
os.Exit(2)
}
}
os.Exit(code)
}
// setupForTesting sets up flags and connects to test database
func setupForTesting() int {
flag.BoolVar(&TestDisableDatabase, "disableDatabase", true, "set to true to disable the Oracle tests")
flag.StringVar(&TestHostValid, "hostValid", "127.0.0.1", "a host where a Oracle database is running")
flag.StringVar(&TestHostInvalid, "hostInvalid", "169.254.200.200", "a host where a Oracle database is not running")
flag.StringVar(&TestUsername, "username", "", "the username for the Oracle database")
flag.StringVar(&TestPassword, "password", "", "the password for the Oracle database")
flag.StringVar(&TestContextTimeoutString, "contextTimeout", "30s", "the context timeout for queries")
flag.BoolVar(&TestDisableDestructive, "disableDestructive", false, "set to true to disable the destructive Oracle tests")
flag.Parse()
var err error
TestContextTimeout, err = time.ParseDuration(TestContextTimeoutString)
if err != nil {
fmt.Println("parse context timeout error:", err)
return 4
}
if !TestDisableDatabase {
TestDB = testGetDB()
if TestDB == nil {
return 6
}
}
TestTimeString = time.Now().UTC().Format("20060102150405")
var i int
var buffer bytes.Buffer
for i = 0; i < 1000; i++ {
buffer.WriteRune(rune(i))
}
testString1 = buffer.String()
testByteSlice1 = make([]byte, 2000)
for i = 0; i < 2000; i++ {
testByteSlice1[i] = byte(i)
}
testTimeLocUTC, _ = time.LoadLocation("UTC")
testTimeLocGMT, _ = time.LoadLocation("GMT")
testTimeLocEST, _ = time.LoadLocation("EST")
testTimeLocMST, _ = time.LoadLocation("MST")
testTimeLocNZ, _ = time.LoadLocation("NZ")
return 0
}
// TestParseDSN tests parsing the DSN
func TestParseDSN(t *testing.T) {
var (
pacific *time.Location
err error
)
if pacific, err = time.LoadLocation("America/Los_Angeles"); err != nil {
panic(err)
}
var dsnTests = []struct {
dsnString string
expectedDSN *DSN
}{
{"oracle://xxmc:xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"xxmc/xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"sys/syspwd@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles&as=sysdba", &DSN{Username: "sys", Password: "syspwd", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific, operationMode: 0x00000002}}, // with operationMode: 0x00000002 = C.OCI_SYDBA
{"xxmc/xxmc@107.20.30.169:1521/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: time.Local}},
{"xxmc/xxmc@107.20.30.169/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169/ORCL", prefetch_rows: 10, Location: time.Local}},
}
for _, tt := range dsnTests {
actualDSN, err := ParseDSN(tt.dsnString)
if err != nil {
t.Errorf("ParseDSN(%s) got error: %+v", tt.dsnString, err)
}
if !reflect.DeepEqual(actualDSN, tt.expectedDSN) {
t.Errorf("ParseDSN(%s): expected %+v, actual %+v", tt.dsnString, tt.expectedDSN, actualDSN)
}
}
}
// TestIsBadConn tests bad connection error codes
func TestIsBadConn(t *testing.T) {
var errorCode = "ORA-03114"
if !isBadConnection(errorCode) {
t.Errorf("TestIsBadConn: expected %+v, actual %+v", true, isBadConnection(errorCode))
}
}
// testGetDB connects to the test database and returns the database connection
func testGetDB() *sql.DB {
os.Setenv("NLS_LANG", "American_America.AL32UTF8")
var openString string
// [username/[password]@]host[:port][/instance_name][?param1=value1&...¶mN=valueN]
if len(TestUsername) > 0 {
if len(TestPassword) > 0 {
openString = TestUsername + "/" + TestPassword + "@"
} else {
openString = TestUsername + "@"
}
}
openString += TestHostValid
db, err := sql.Open("oci8", openString)
if err != nil {
fmt.Println("open error:", err)
return nil
}
if db == nil {
fmt.Println("db is nil")
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
err = db.PingContext(ctx)
cancel()
if err != nil {
fmt.Println("ping error:", err)
return nil
}
db.Exec("drop table foo")
_, err = db.Exec(sql1)
if err != nil {
fmt.Println("sql1 error:", err)
return nil
}
_, err = db.Exec("truncate table foo")
if err != nil {
fmt.Println("truncate error:", err)
return nil
}
return db
}
// testGetRows runs a statment and returns the rows as [][]interface{}
func testGetRows(t *testing.T, stmt *sql.Stmt, args []interface{}) ([][]interface{}, error) {
// get rows
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
defer cancel()
var rows *sql.Rows
rows, err := stmt.QueryContext(ctx, args...)
if err != nil {
return nil, fmt.Errorf("query error: %v", err)
}
// get column infomration
var columns []string
columns, err = rows.Columns()
if err != nil {
rows.Close()
return nil, fmt.Errorf("columns error: %v", err)
}
// create values
values := make([][]interface{}, 0, 1)
// get values
pRowInterface := make([]interface{}, len(columns))
for rows.Next() {
rowInterface := make([]interface{}, len(columns))
for i := 0; i < len(rowInterface); i++ {
pRowInterface[i] = &rowInterface[i]
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Scan(pRowInterface...)
if err != nil {
rows.Close()
return nil, fmt.Errorf("scan error: %v", err)
}
values = append(values, rowInterface)
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Close()
if err != nil {
return nil, fmt.Errorf("close error: %v", err)
}
// return values
return values, nil
}
// testExec runs an exec query and returns error
func testExec(t *testing.T, query string, args []interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec error: %v", err)
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testExecRows runs exec query for each arg row and returns error
func testExecRows(t *testing.T, query string, args [][]interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
for i := 0; i < len(args); i++ {
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args[i]...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec - row %v - error: %v", i, err)
}
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testRunQueryResults runs a slice of testQueryResults tests
func | (t *testing.T, queryResults []testQueryResults) {
for _, queryResult := range queryResults {
if len(queryResult.args) != len(queryResult.results) {
t.Errorf("args len %v and results len %v do not match - query: %v",
len(queryResult.args), len(queryResult.results), queryResult.query)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, queryResult.query)
cancel()
if err != nil {
t.Errorf("prepare error: %v - query: %v", err, queryResult.query)
continue
}
testRunQueryResult(t, queryResult, stmt)
err = stmt.Close()
if err != nil {
t.Errorf("close error: %v - query: %v", err, queryResult.query)
}
}
}
// testRunQueryResult runs a single testQueryResults test
func testRunQueryResult(t *testing.T, queryResult testQueryResults, stmt *sql.Stmt) {
for i := 0; i < len(queryResult.args); i++ {
result, err := testGetRows(t, stmt, queryResult.args[i])
if err != nil {
t.Errorf("get rows error: %v - query: %v", err, queryResult.query)
continue
}
if result == nil && queryResult.results[i] != nil {
t.Errorf("result is nil - query: %v", queryResult.query)
continue
}
if len(result) != len(queryResult.results[i]) {
t.Errorf("result rows len %v not equal to expected len %v - query: %v",
len(result), len(queryResult.results[i]), queryResult.query)
continue
}
for j := 0; j < len(result); j++ {
if len(result[j]) != len(queryResult.results[i][j]) {
t.Errorf("result columns len %v not equal to expected len %v - query: %v",
len(result[j]), len(queryResult.results[i][j]), queryResult.query)
continue
}
for k := 0; k < len(result[j]); k++ {
bad := false
type1 := reflect.TypeOf(result[j][k])
type2 := reflect.TypeOf(queryResult.results[i][j][k])
switch {
case type1 == nil || type2 == nil:
if type1 != type2 {
bad = true
}
case type1 == TestTypeTime || type2 == TestTypeTime:
if type1 != type2 {
bad = true
break
}
time1 := result[j][k].(time.Time)
time2 := queryResult.results[i][j][k].(time.Time)
if !time1.Equal(time2) {
bad = true
}
case type1.Kind() == reflect.Slice || type2.Kind() == reflect.Slice:
if !reflect.DeepEqual(result[j][k], queryResult.results[i][j][k]) {
bad = true
}
default:
if result[j][k] != queryResult.results[i][j][k] {
bad = true
}
}
if bad {
t.Errorf("result - %v row %v, %v - received: %T, %v - expected: %T, %v - query: %v", i, j, k,
result[j][k], result[j][k], queryResult.results[i][j][k], queryResult.results[i][j][k], queryResult.query)
}
}
}
}
}
var sql1 = `create table foo(
c1 varchar2(256),
c2 nvarchar2(256),
c3 number,
c4 float,
c6 date,
c7 BINARY_FLOAT,
c8 BINARY_DOUBLE,
c9 TIMESTAMP,
c10 TIMESTAMP WITH TIME ZONE,
c11 TIMESTAMP WITH LOCAL TIME ZONE,
c12 INTERVAL YEAR TO MONTH,
c13 INTERVAL DAY TO SECOND,
c14 RAW(80),
c15 ROWID,
c17 CHAR(15),
c18 NCHAR(20),
c19 CLOB,
c21 BLOB,
cend varchar2(12)
)`
var sql12 = `insert( c1,c2,c3,c4,c6,c7,c8,c9,c10,c11,c12,c13,c14,c17,c18,c19,c20,c21,cend) into foo values(
:1,
:2,
:3,
:4,
:6,
:7,
:8,
:9,
:10,
:11,
NUMTOYMINTERVAL( :12, 'MONTH'),
NUMTODSINTERVAL( :13 / 1000000000, 'SECOND'),
:14,
:17,
:18,
:19,
:21,
'END'
)`
| testRunQueryResults | identifier_name |
oci8_test.go | package oci8
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"os"
"reflect"
"testing"
"time"
)
// to run database tests
// go test -v github.com/mattn/go-oci8 -args -disableDatabase=false -hostValid type_hostname_here -username type_username_here -password "type_password_here"
// note minimum Go version for testing is 1.8
/* note that testing needs an Oracle user and the following:
create or replace function TYPE_USER_HERE.SLEEP_SECONDS (p_seconds number) return integer is
begin
dbms_lock.sleep(p_seconds);
return 1;
end SLEEP_SECONDS;
/
*/
var (
TestDisableDatabase bool
TestHostValid string
TestHostInvalid string
TestUsername string
TestPassword string
TestContextTimeoutString string
TestContextTimeout time.Duration
TestDatabase string
TestDisableDestructive bool
TestTimeString string
TestDB *sql.DB
TestTypeTime = reflect.TypeOf(time.Time{})
TestTypeByteSlice = reflect.TypeOf([]byte{})
testString1 string
testByteSlice1 []byte
testTimeLocUTC *time.Location
testTimeLocGMT *time.Location
testTimeLocEST *time.Location
testTimeLocMST *time.Location
testTimeLocNZ *time.Location
)
// testQueryResults is for testing a query
type testQueryResults struct {
query string
args [][]interface{}
results [][][]interface{}
}
// TestMain sets up testing
func TestMain(m *testing.M) {
code := setupForTesting()
if code != 0 {
os.Exit(code)
}
code = m.Run()
if !TestDisableDatabase {
err := TestDB.Close()
if err != nil {
fmt.Println("close error:", err)
os.Exit(2)
}
}
os.Exit(code)
}
// setupForTesting sets up flags and connects to test database
func setupForTesting() int {
flag.BoolVar(&TestDisableDatabase, "disableDatabase", true, "set to true to disable the Oracle tests")
flag.StringVar(&TestHostValid, "hostValid", "127.0.0.1", "a host where a Oracle database is running")
flag.StringVar(&TestHostInvalid, "hostInvalid", "169.254.200.200", "a host where a Oracle database is not running")
flag.StringVar(&TestUsername, "username", "", "the username for the Oracle database")
flag.StringVar(&TestPassword, "password", "", "the password for the Oracle database")
flag.StringVar(&TestContextTimeoutString, "contextTimeout", "30s", "the context timeout for queries")
flag.BoolVar(&TestDisableDestructive, "disableDestructive", false, "set to true to disable the destructive Oracle tests")
flag.Parse()
var err error
TestContextTimeout, err = time.ParseDuration(TestContextTimeoutString)
if err != nil {
fmt.Println("parse context timeout error:", err)
return 4
}
if !TestDisableDatabase {
TestDB = testGetDB()
if TestDB == nil {
return 6
}
}
TestTimeString = time.Now().UTC().Format("20060102150405") | var buffer bytes.Buffer
for i = 0; i < 1000; i++ {
buffer.WriteRune(rune(i))
}
testString1 = buffer.String()
testByteSlice1 = make([]byte, 2000)
for i = 0; i < 2000; i++ {
testByteSlice1[i] = byte(i)
}
testTimeLocUTC, _ = time.LoadLocation("UTC")
testTimeLocGMT, _ = time.LoadLocation("GMT")
testTimeLocEST, _ = time.LoadLocation("EST")
testTimeLocMST, _ = time.LoadLocation("MST")
testTimeLocNZ, _ = time.LoadLocation("NZ")
return 0
}
// TestParseDSN tests parsing the DSN
func TestParseDSN(t *testing.T) {
var (
pacific *time.Location
err error
)
if pacific, err = time.LoadLocation("America/Los_Angeles"); err != nil {
panic(err)
}
var dsnTests = []struct {
dsnString string
expectedDSN *DSN
}{
{"oracle://xxmc:xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"xxmc/xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"sys/syspwd@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles&as=sysdba", &DSN{Username: "sys", Password: "syspwd", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific, operationMode: 0x00000002}}, // with operationMode: 0x00000002 = C.OCI_SYDBA
{"xxmc/xxmc@107.20.30.169:1521/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: time.Local}},
{"xxmc/xxmc@107.20.30.169/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169/ORCL", prefetch_rows: 10, Location: time.Local}},
}
for _, tt := range dsnTests {
actualDSN, err := ParseDSN(tt.dsnString)
if err != nil {
t.Errorf("ParseDSN(%s) got error: %+v", tt.dsnString, err)
}
if !reflect.DeepEqual(actualDSN, tt.expectedDSN) {
t.Errorf("ParseDSN(%s): expected %+v, actual %+v", tt.dsnString, tt.expectedDSN, actualDSN)
}
}
}
// TestIsBadConn tests bad connection error codes
func TestIsBadConn(t *testing.T) {
var errorCode = "ORA-03114"
if !isBadConnection(errorCode) {
t.Errorf("TestIsBadConn: expected %+v, actual %+v", true, isBadConnection(errorCode))
}
}
// testGetDB connects to the test database and returns the database connection
func testGetDB() *sql.DB {
os.Setenv("NLS_LANG", "American_America.AL32UTF8")
var openString string
// [username/[password]@]host[:port][/instance_name][?param1=value1&...¶mN=valueN]
if len(TestUsername) > 0 {
if len(TestPassword) > 0 {
openString = TestUsername + "/" + TestPassword + "@"
} else {
openString = TestUsername + "@"
}
}
openString += TestHostValid
db, err := sql.Open("oci8", openString)
if err != nil {
fmt.Println("open error:", err)
return nil
}
if db == nil {
fmt.Println("db is nil")
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
err = db.PingContext(ctx)
cancel()
if err != nil {
fmt.Println("ping error:", err)
return nil
}
db.Exec("drop table foo")
_, err = db.Exec(sql1)
if err != nil {
fmt.Println("sql1 error:", err)
return nil
}
_, err = db.Exec("truncate table foo")
if err != nil {
fmt.Println("truncate error:", err)
return nil
}
return db
}
// testGetRows runs a statment and returns the rows as [][]interface{}
func testGetRows(t *testing.T, stmt *sql.Stmt, args []interface{}) ([][]interface{}, error) {
// get rows
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
defer cancel()
var rows *sql.Rows
rows, err := stmt.QueryContext(ctx, args...)
if err != nil {
return nil, fmt.Errorf("query error: %v", err)
}
// get column infomration
var columns []string
columns, err = rows.Columns()
if err != nil {
rows.Close()
return nil, fmt.Errorf("columns error: %v", err)
}
// create values
values := make([][]interface{}, 0, 1)
// get values
pRowInterface := make([]interface{}, len(columns))
for rows.Next() {
rowInterface := make([]interface{}, len(columns))
for i := 0; i < len(rowInterface); i++ {
pRowInterface[i] = &rowInterface[i]
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Scan(pRowInterface...)
if err != nil {
rows.Close()
return nil, fmt.Errorf("scan error: %v", err)
}
values = append(values, rowInterface)
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Close()
if err != nil {
return nil, fmt.Errorf("close error: %v", err)
}
// return values
return values, nil
}
// testExec runs an exec query and returns error
func testExec(t *testing.T, query string, args []interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec error: %v", err)
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testExecRows runs exec query for each arg row and returns error
func testExecRows(t *testing.T, query string, args [][]interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
for i := 0; i < len(args); i++ {
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args[i]...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec - row %v - error: %v", i, err)
}
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testRunQueryResults runs a slice of testQueryResults tests
func testRunQueryResults(t *testing.T, queryResults []testQueryResults) {
for _, queryResult := range queryResults {
if len(queryResult.args) != len(queryResult.results) {
t.Errorf("args len %v and results len %v do not match - query: %v",
len(queryResult.args), len(queryResult.results), queryResult.query)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, queryResult.query)
cancel()
if err != nil {
t.Errorf("prepare error: %v - query: %v", err, queryResult.query)
continue
}
testRunQueryResult(t, queryResult, stmt)
err = stmt.Close()
if err != nil {
t.Errorf("close error: %v - query: %v", err, queryResult.query)
}
}
}
// testRunQueryResult runs a single testQueryResults test
func testRunQueryResult(t *testing.T, queryResult testQueryResults, stmt *sql.Stmt) {
for i := 0; i < len(queryResult.args); i++ {
result, err := testGetRows(t, stmt, queryResult.args[i])
if err != nil {
t.Errorf("get rows error: %v - query: %v", err, queryResult.query)
continue
}
if result == nil && queryResult.results[i] != nil {
t.Errorf("result is nil - query: %v", queryResult.query)
continue
}
if len(result) != len(queryResult.results[i]) {
t.Errorf("result rows len %v not equal to expected len %v - query: %v",
len(result), len(queryResult.results[i]), queryResult.query)
continue
}
for j := 0; j < len(result); j++ {
if len(result[j]) != len(queryResult.results[i][j]) {
t.Errorf("result columns len %v not equal to expected len %v - query: %v",
len(result[j]), len(queryResult.results[i][j]), queryResult.query)
continue
}
for k := 0; k < len(result[j]); k++ {
bad := false
type1 := reflect.TypeOf(result[j][k])
type2 := reflect.TypeOf(queryResult.results[i][j][k])
switch {
case type1 == nil || type2 == nil:
if type1 != type2 {
bad = true
}
case type1 == TestTypeTime || type2 == TestTypeTime:
if type1 != type2 {
bad = true
break
}
time1 := result[j][k].(time.Time)
time2 := queryResult.results[i][j][k].(time.Time)
if !time1.Equal(time2) {
bad = true
}
case type1.Kind() == reflect.Slice || type2.Kind() == reflect.Slice:
if !reflect.DeepEqual(result[j][k], queryResult.results[i][j][k]) {
bad = true
}
default:
if result[j][k] != queryResult.results[i][j][k] {
bad = true
}
}
if bad {
t.Errorf("result - %v row %v, %v - received: %T, %v - expected: %T, %v - query: %v", i, j, k,
result[j][k], result[j][k], queryResult.results[i][j][k], queryResult.results[i][j][k], queryResult.query)
}
}
}
}
}
var sql1 = `create table foo(
c1 varchar2(256),
c2 nvarchar2(256),
c3 number,
c4 float,
c6 date,
c7 BINARY_FLOAT,
c8 BINARY_DOUBLE,
c9 TIMESTAMP,
c10 TIMESTAMP WITH TIME ZONE,
c11 TIMESTAMP WITH LOCAL TIME ZONE,
c12 INTERVAL YEAR TO MONTH,
c13 INTERVAL DAY TO SECOND,
c14 RAW(80),
c15 ROWID,
c17 CHAR(15),
c18 NCHAR(20),
c19 CLOB,
c21 BLOB,
cend varchar2(12)
)`
var sql12 = `insert( c1,c2,c3,c4,c6,c7,c8,c9,c10,c11,c12,c13,c14,c17,c18,c19,c20,c21,cend) into foo values(
:1,
:2,
:3,
:4,
:6,
:7,
:8,
:9,
:10,
:11,
NUMTOYMINTERVAL( :12, 'MONTH'),
NUMTODSINTERVAL( :13 / 1000000000, 'SECOND'),
:14,
:17,
:18,
:19,
:21,
'END'
)` |
var i int | random_line_split |
oci8_test.go | package oci8
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"os"
"reflect"
"testing"
"time"
)
// to run database tests
// go test -v github.com/mattn/go-oci8 -args -disableDatabase=false -hostValid type_hostname_here -username type_username_here -password "type_password_here"
// note minimum Go version for testing is 1.8
/* note that testing needs an Oracle user and the following:
create or replace function TYPE_USER_HERE.SLEEP_SECONDS (p_seconds number) return integer is
begin
dbms_lock.sleep(p_seconds);
return 1;
end SLEEP_SECONDS;
/
*/
var (
TestDisableDatabase bool
TestHostValid string
TestHostInvalid string
TestUsername string
TestPassword string
TestContextTimeoutString string
TestContextTimeout time.Duration
TestDatabase string
TestDisableDestructive bool
TestTimeString string
TestDB *sql.DB
TestTypeTime = reflect.TypeOf(time.Time{})
TestTypeByteSlice = reflect.TypeOf([]byte{})
testString1 string
testByteSlice1 []byte
testTimeLocUTC *time.Location
testTimeLocGMT *time.Location
testTimeLocEST *time.Location
testTimeLocMST *time.Location
testTimeLocNZ *time.Location
)
// testQueryResults is for testing a query
type testQueryResults struct {
query string
args [][]interface{}
results [][][]interface{}
}
// TestMain sets up testing
func TestMain(m *testing.M) {
code := setupForTesting()
if code != 0 {
os.Exit(code)
}
code = m.Run()
if !TestDisableDatabase {
err := TestDB.Close()
if err != nil {
fmt.Println("close error:", err)
os.Exit(2)
}
}
os.Exit(code)
}
// setupForTesting sets up flags and connects to test database
func setupForTesting() int {
flag.BoolVar(&TestDisableDatabase, "disableDatabase", true, "set to true to disable the Oracle tests")
flag.StringVar(&TestHostValid, "hostValid", "127.0.0.1", "a host where a Oracle database is running")
flag.StringVar(&TestHostInvalid, "hostInvalid", "169.254.200.200", "a host where a Oracle database is not running")
flag.StringVar(&TestUsername, "username", "", "the username for the Oracle database")
flag.StringVar(&TestPassword, "password", "", "the password for the Oracle database")
flag.StringVar(&TestContextTimeoutString, "contextTimeout", "30s", "the context timeout for queries")
flag.BoolVar(&TestDisableDestructive, "disableDestructive", false, "set to true to disable the destructive Oracle tests")
flag.Parse()
var err error
TestContextTimeout, err = time.ParseDuration(TestContextTimeoutString)
if err != nil {
fmt.Println("parse context timeout error:", err)
return 4
}
if !TestDisableDatabase {
TestDB = testGetDB()
if TestDB == nil {
return 6
}
}
TestTimeString = time.Now().UTC().Format("20060102150405")
var i int
var buffer bytes.Buffer
for i = 0; i < 1000; i++ {
buffer.WriteRune(rune(i))
}
testString1 = buffer.String()
testByteSlice1 = make([]byte, 2000)
for i = 0; i < 2000; i++ {
testByteSlice1[i] = byte(i)
}
testTimeLocUTC, _ = time.LoadLocation("UTC")
testTimeLocGMT, _ = time.LoadLocation("GMT")
testTimeLocEST, _ = time.LoadLocation("EST")
testTimeLocMST, _ = time.LoadLocation("MST")
testTimeLocNZ, _ = time.LoadLocation("NZ")
return 0
}
// TestParseDSN tests parsing the DSN
func TestParseDSN(t *testing.T) {
var (
pacific *time.Location
err error
)
if pacific, err = time.LoadLocation("America/Los_Angeles"); err != nil {
panic(err)
}
var dsnTests = []struct {
dsnString string
expectedDSN *DSN
}{
{"oracle://xxmc:xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"xxmc/xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"sys/syspwd@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles&as=sysdba", &DSN{Username: "sys", Password: "syspwd", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific, operationMode: 0x00000002}}, // with operationMode: 0x00000002 = C.OCI_SYDBA
{"xxmc/xxmc@107.20.30.169:1521/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: time.Local}},
{"xxmc/xxmc@107.20.30.169/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169/ORCL", prefetch_rows: 10, Location: time.Local}},
}
for _, tt := range dsnTests {
actualDSN, err := ParseDSN(tt.dsnString)
if err != nil {
t.Errorf("ParseDSN(%s) got error: %+v", tt.dsnString, err)
}
if !reflect.DeepEqual(actualDSN, tt.expectedDSN) {
t.Errorf("ParseDSN(%s): expected %+v, actual %+v", tt.dsnString, tt.expectedDSN, actualDSN)
}
}
}
// TestIsBadConn tests bad connection error codes
func TestIsBadConn(t *testing.T) {
var errorCode = "ORA-03114"
if !isBadConnection(errorCode) {
t.Errorf("TestIsBadConn: expected %+v, actual %+v", true, isBadConnection(errorCode))
}
}
// testGetDB connects to the test database and returns the database connection
func testGetDB() *sql.DB |
// testGetRows runs a statment and returns the rows as [][]interface{}
func testGetRows(t *testing.T, stmt *sql.Stmt, args []interface{}) ([][]interface{}, error) {
// get rows
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
defer cancel()
var rows *sql.Rows
rows, err := stmt.QueryContext(ctx, args...)
if err != nil {
return nil, fmt.Errorf("query error: %v", err)
}
// get column infomration
var columns []string
columns, err = rows.Columns()
if err != nil {
rows.Close()
return nil, fmt.Errorf("columns error: %v", err)
}
// create values
values := make([][]interface{}, 0, 1)
// get values
pRowInterface := make([]interface{}, len(columns))
for rows.Next() {
rowInterface := make([]interface{}, len(columns))
for i := 0; i < len(rowInterface); i++ {
pRowInterface[i] = &rowInterface[i]
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Scan(pRowInterface...)
if err != nil {
rows.Close()
return nil, fmt.Errorf("scan error: %v", err)
}
values = append(values, rowInterface)
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Close()
if err != nil {
return nil, fmt.Errorf("close error: %v", err)
}
// return values
return values, nil
}
// testExec runs an exec query and returns error
func testExec(t *testing.T, query string, args []interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec error: %v", err)
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testExecRows runs exec query for each arg row and returns error
func testExecRows(t *testing.T, query string, args [][]interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
for i := 0; i < len(args); i++ {
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args[i]...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec - row %v - error: %v", i, err)
}
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testRunQueryResults runs a slice of testQueryResults tests
func testRunQueryResults(t *testing.T, queryResults []testQueryResults) {
for _, queryResult := range queryResults {
if len(queryResult.args) != len(queryResult.results) {
t.Errorf("args len %v and results len %v do not match - query: %v",
len(queryResult.args), len(queryResult.results), queryResult.query)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, queryResult.query)
cancel()
if err != nil {
t.Errorf("prepare error: %v - query: %v", err, queryResult.query)
continue
}
testRunQueryResult(t, queryResult, stmt)
err = stmt.Close()
if err != nil {
t.Errorf("close error: %v - query: %v", err, queryResult.query)
}
}
}
// testRunQueryResult runs a single testQueryResults test
func testRunQueryResult(t *testing.T, queryResult testQueryResults, stmt *sql.Stmt) {
for i := 0; i < len(queryResult.args); i++ {
result, err := testGetRows(t, stmt, queryResult.args[i])
if err != nil {
t.Errorf("get rows error: %v - query: %v", err, queryResult.query)
continue
}
if result == nil && queryResult.results[i] != nil {
t.Errorf("result is nil - query: %v", queryResult.query)
continue
}
if len(result) != len(queryResult.results[i]) {
t.Errorf("result rows len %v not equal to expected len %v - query: %v",
len(result), len(queryResult.results[i]), queryResult.query)
continue
}
for j := 0; j < len(result); j++ {
if len(result[j]) != len(queryResult.results[i][j]) {
t.Errorf("result columns len %v not equal to expected len %v - query: %v",
len(result[j]), len(queryResult.results[i][j]), queryResult.query)
continue
}
for k := 0; k < len(result[j]); k++ {
bad := false
type1 := reflect.TypeOf(result[j][k])
type2 := reflect.TypeOf(queryResult.results[i][j][k])
switch {
case type1 == nil || type2 == nil:
if type1 != type2 {
bad = true
}
case type1 == TestTypeTime || type2 == TestTypeTime:
if type1 != type2 {
bad = true
break
}
time1 := result[j][k].(time.Time)
time2 := queryResult.results[i][j][k].(time.Time)
if !time1.Equal(time2) {
bad = true
}
case type1.Kind() == reflect.Slice || type2.Kind() == reflect.Slice:
if !reflect.DeepEqual(result[j][k], queryResult.results[i][j][k]) {
bad = true
}
default:
if result[j][k] != queryResult.results[i][j][k] {
bad = true
}
}
if bad {
t.Errorf("result - %v row %v, %v - received: %T, %v - expected: %T, %v - query: %v", i, j, k,
result[j][k], result[j][k], queryResult.results[i][j][k], queryResult.results[i][j][k], queryResult.query)
}
}
}
}
}
var sql1 = `create table foo(
c1 varchar2(256),
c2 nvarchar2(256),
c3 number,
c4 float,
c6 date,
c7 BINARY_FLOAT,
c8 BINARY_DOUBLE,
c9 TIMESTAMP,
c10 TIMESTAMP WITH TIME ZONE,
c11 TIMESTAMP WITH LOCAL TIME ZONE,
c12 INTERVAL YEAR TO MONTH,
c13 INTERVAL DAY TO SECOND,
c14 RAW(80),
c15 ROWID,
c17 CHAR(15),
c18 NCHAR(20),
c19 CLOB,
c21 BLOB,
cend varchar2(12)
)`
var sql12 = `insert( c1,c2,c3,c4,c6,c7,c8,c9,c10,c11,c12,c13,c14,c17,c18,c19,c20,c21,cend) into foo values(
:1,
:2,
:3,
:4,
:6,
:7,
:8,
:9,
:10,
:11,
NUMTOYMINTERVAL( :12, 'MONTH'),
NUMTODSINTERVAL( :13 / 1000000000, 'SECOND'),
:14,
:17,
:18,
:19,
:21,
'END'
)`
| {
os.Setenv("NLS_LANG", "American_America.AL32UTF8")
var openString string
// [username/[password]@]host[:port][/instance_name][?param1=value1&...¶mN=valueN]
if len(TestUsername) > 0 {
if len(TestPassword) > 0 {
openString = TestUsername + "/" + TestPassword + "@"
} else {
openString = TestUsername + "@"
}
}
openString += TestHostValid
db, err := sql.Open("oci8", openString)
if err != nil {
fmt.Println("open error:", err)
return nil
}
if db == nil {
fmt.Println("db is nil")
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
err = db.PingContext(ctx)
cancel()
if err != nil {
fmt.Println("ping error:", err)
return nil
}
db.Exec("drop table foo")
_, err = db.Exec(sql1)
if err != nil {
fmt.Println("sql1 error:", err)
return nil
}
_, err = db.Exec("truncate table foo")
if err != nil {
fmt.Println("truncate error:", err)
return nil
}
return db
} | identifier_body |
oci8_test.go | package oci8
import (
"bytes"
"context"
"database/sql"
"flag"
"fmt"
"os"
"reflect"
"testing"
"time"
)
// to run database tests
// go test -v github.com/mattn/go-oci8 -args -disableDatabase=false -hostValid type_hostname_here -username type_username_here -password "type_password_here"
// note minimum Go version for testing is 1.8
/* note that testing needs an Oracle user and the following:
create or replace function TYPE_USER_HERE.SLEEP_SECONDS (p_seconds number) return integer is
begin
dbms_lock.sleep(p_seconds);
return 1;
end SLEEP_SECONDS;
/
*/
var (
TestDisableDatabase bool
TestHostValid string
TestHostInvalid string
TestUsername string
TestPassword string
TestContextTimeoutString string
TestContextTimeout time.Duration
TestDatabase string
TestDisableDestructive bool
TestTimeString string
TestDB *sql.DB
TestTypeTime = reflect.TypeOf(time.Time{})
TestTypeByteSlice = reflect.TypeOf([]byte{})
testString1 string
testByteSlice1 []byte
testTimeLocUTC *time.Location
testTimeLocGMT *time.Location
testTimeLocEST *time.Location
testTimeLocMST *time.Location
testTimeLocNZ *time.Location
)
// testQueryResults is for testing a query
type testQueryResults struct {
query string
args [][]interface{}
results [][][]interface{}
}
// TestMain sets up testing
func TestMain(m *testing.M) {
code := setupForTesting()
if code != 0 {
os.Exit(code)
}
code = m.Run()
if !TestDisableDatabase {
err := TestDB.Close()
if err != nil {
fmt.Println("close error:", err)
os.Exit(2)
}
}
os.Exit(code)
}
// setupForTesting sets up flags and connects to test database
func setupForTesting() int {
flag.BoolVar(&TestDisableDatabase, "disableDatabase", true, "set to true to disable the Oracle tests")
flag.StringVar(&TestHostValid, "hostValid", "127.0.0.1", "a host where a Oracle database is running")
flag.StringVar(&TestHostInvalid, "hostInvalid", "169.254.200.200", "a host where a Oracle database is not running")
flag.StringVar(&TestUsername, "username", "", "the username for the Oracle database")
flag.StringVar(&TestPassword, "password", "", "the password for the Oracle database")
flag.StringVar(&TestContextTimeoutString, "contextTimeout", "30s", "the context timeout for queries")
flag.BoolVar(&TestDisableDestructive, "disableDestructive", false, "set to true to disable the destructive Oracle tests")
flag.Parse()
var err error
TestContextTimeout, err = time.ParseDuration(TestContextTimeoutString)
if err != nil {
fmt.Println("parse context timeout error:", err)
return 4
}
if !TestDisableDatabase {
TestDB = testGetDB()
if TestDB == nil {
return 6
}
}
TestTimeString = time.Now().UTC().Format("20060102150405")
var i int
var buffer bytes.Buffer
for i = 0; i < 1000; i++ {
buffer.WriteRune(rune(i))
}
testString1 = buffer.String()
testByteSlice1 = make([]byte, 2000)
for i = 0; i < 2000; i++ {
testByteSlice1[i] = byte(i)
}
testTimeLocUTC, _ = time.LoadLocation("UTC")
testTimeLocGMT, _ = time.LoadLocation("GMT")
testTimeLocEST, _ = time.LoadLocation("EST")
testTimeLocMST, _ = time.LoadLocation("MST")
testTimeLocNZ, _ = time.LoadLocation("NZ")
return 0
}
// TestParseDSN tests parsing the DSN
func TestParseDSN(t *testing.T) {
var (
pacific *time.Location
err error
)
if pacific, err = time.LoadLocation("America/Los_Angeles"); err != nil {
panic(err)
}
var dsnTests = []struct {
dsnString string
expectedDSN *DSN
}{
{"oracle://xxmc:xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"xxmc/xxmc@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific}},
{"sys/syspwd@107.20.30.169:1521/ORCL?loc=America%2FLos_Angeles&as=sysdba", &DSN{Username: "sys", Password: "syspwd", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: pacific, operationMode: 0x00000002}}, // with operationMode: 0x00000002 = C.OCI_SYDBA
{"xxmc/xxmc@107.20.30.169:1521/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169:1521/ORCL", prefetch_rows: 10, Location: time.Local}},
{"xxmc/xxmc@107.20.30.169/ORCL", &DSN{Username: "xxmc", Password: "xxmc", Connect: "107.20.30.169/ORCL", prefetch_rows: 10, Location: time.Local}},
}
for _, tt := range dsnTests {
actualDSN, err := ParseDSN(tt.dsnString)
if err != nil {
t.Errorf("ParseDSN(%s) got error: %+v", tt.dsnString, err)
}
if !reflect.DeepEqual(actualDSN, tt.expectedDSN) {
t.Errorf("ParseDSN(%s): expected %+v, actual %+v", tt.dsnString, tt.expectedDSN, actualDSN)
}
}
}
// TestIsBadConn tests bad connection error codes
func TestIsBadConn(t *testing.T) {
var errorCode = "ORA-03114"
if !isBadConnection(errorCode) {
t.Errorf("TestIsBadConn: expected %+v, actual %+v", true, isBadConnection(errorCode))
}
}
// testGetDB connects to the test database and returns the database connection
func testGetDB() *sql.DB {
os.Setenv("NLS_LANG", "American_America.AL32UTF8")
var openString string
// [username/[password]@]host[:port][/instance_name][?param1=value1&...¶mN=valueN]
if len(TestUsername) > 0 {
if len(TestPassword) > 0 {
openString = TestUsername + "/" + TestPassword + "@"
} else {
openString = TestUsername + "@"
}
}
openString += TestHostValid
db, err := sql.Open("oci8", openString)
if err != nil {
fmt.Println("open error:", err)
return nil
}
if db == nil {
fmt.Println("db is nil")
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
err = db.PingContext(ctx)
cancel()
if err != nil {
fmt.Println("ping error:", err)
return nil
}
db.Exec("drop table foo")
_, err = db.Exec(sql1)
if err != nil {
fmt.Println("sql1 error:", err)
return nil
}
_, err = db.Exec("truncate table foo")
if err != nil {
fmt.Println("truncate error:", err)
return nil
}
return db
}
// testGetRows runs a statment and returns the rows as [][]interface{}
func testGetRows(t *testing.T, stmt *sql.Stmt, args []interface{}) ([][]interface{}, error) {
// get rows
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
defer cancel()
var rows *sql.Rows
rows, err := stmt.QueryContext(ctx, args...)
if err != nil {
return nil, fmt.Errorf("query error: %v", err)
}
// get column infomration
var columns []string
columns, err = rows.Columns()
if err != nil {
rows.Close()
return nil, fmt.Errorf("columns error: %v", err)
}
// create values
values := make([][]interface{}, 0, 1)
// get values
pRowInterface := make([]interface{}, len(columns))
for rows.Next() {
rowInterface := make([]interface{}, len(columns))
for i := 0; i < len(rowInterface); i++ {
pRowInterface[i] = &rowInterface[i]
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Scan(pRowInterface...)
if err != nil {
rows.Close()
return nil, fmt.Errorf("scan error: %v", err)
}
values = append(values, rowInterface)
}
err = rows.Err()
if err != nil {
rows.Close()
return nil, fmt.Errorf("rows error: %v", err)
}
err = rows.Close()
if err != nil {
return nil, fmt.Errorf("close error: %v", err)
}
// return values
return values, nil
}
// testExec runs an exec query and returns error
func testExec(t *testing.T, query string, args []interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec error: %v", err)
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testExecRows runs exec query for each arg row and returns error
func testExecRows(t *testing.T, query string, args [][]interface{}) error {
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, query)
cancel()
if err != nil {
return fmt.Errorf("prepare error: %v", err)
}
for i := 0; i < len(args); i++ {
ctx, cancel = context.WithTimeout(context.Background(), TestContextTimeout)
_, err = stmt.ExecContext(ctx, args[i]...)
cancel()
if err != nil {
stmt.Close()
return fmt.Errorf("exec - row %v - error: %v", i, err)
}
}
err = stmt.Close()
if err != nil {
return fmt.Errorf("stmt close error: %v", err)
}
return nil
}
// testRunQueryResults runs a slice of testQueryResults tests
func testRunQueryResults(t *testing.T, queryResults []testQueryResults) {
for _, queryResult := range queryResults {
if len(queryResult.args) != len(queryResult.results) {
t.Errorf("args len %v and results len %v do not match - query: %v",
len(queryResult.args), len(queryResult.results), queryResult.query)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), TestContextTimeout)
stmt, err := TestDB.PrepareContext(ctx, queryResult.query)
cancel()
if err != nil {
t.Errorf("prepare error: %v - query: %v", err, queryResult.query)
continue
}
testRunQueryResult(t, queryResult, stmt)
err = stmt.Close()
if err != nil {
t.Errorf("close error: %v - query: %v", err, queryResult.query)
}
}
}
// testRunQueryResult runs a single testQueryResults test
func testRunQueryResult(t *testing.T, queryResult testQueryResults, stmt *sql.Stmt) {
for i := 0; i < len(queryResult.args); i++ {
result, err := testGetRows(t, stmt, queryResult.args[i])
if err != nil |
if result == nil && queryResult.results[i] != nil {
t.Errorf("result is nil - query: %v", queryResult.query)
continue
}
if len(result) != len(queryResult.results[i]) {
t.Errorf("result rows len %v not equal to expected len %v - query: %v",
len(result), len(queryResult.results[i]), queryResult.query)
continue
}
for j := 0; j < len(result); j++ {
if len(result[j]) != len(queryResult.results[i][j]) {
t.Errorf("result columns len %v not equal to expected len %v - query: %v",
len(result[j]), len(queryResult.results[i][j]), queryResult.query)
continue
}
for k := 0; k < len(result[j]); k++ {
bad := false
type1 := reflect.TypeOf(result[j][k])
type2 := reflect.TypeOf(queryResult.results[i][j][k])
switch {
case type1 == nil || type2 == nil:
if type1 != type2 {
bad = true
}
case type1 == TestTypeTime || type2 == TestTypeTime:
if type1 != type2 {
bad = true
break
}
time1 := result[j][k].(time.Time)
time2 := queryResult.results[i][j][k].(time.Time)
if !time1.Equal(time2) {
bad = true
}
case type1.Kind() == reflect.Slice || type2.Kind() == reflect.Slice:
if !reflect.DeepEqual(result[j][k], queryResult.results[i][j][k]) {
bad = true
}
default:
if result[j][k] != queryResult.results[i][j][k] {
bad = true
}
}
if bad {
t.Errorf("result - %v row %v, %v - received: %T, %v - expected: %T, %v - query: %v", i, j, k,
result[j][k], result[j][k], queryResult.results[i][j][k], queryResult.results[i][j][k], queryResult.query)
}
}
}
}
}
var sql1 = `create table foo(
c1 varchar2(256),
c2 nvarchar2(256),
c3 number,
c4 float,
c6 date,
c7 BINARY_FLOAT,
c8 BINARY_DOUBLE,
c9 TIMESTAMP,
c10 TIMESTAMP WITH TIME ZONE,
c11 TIMESTAMP WITH LOCAL TIME ZONE,
c12 INTERVAL YEAR TO MONTH,
c13 INTERVAL DAY TO SECOND,
c14 RAW(80),
c15 ROWID,
c17 CHAR(15),
c18 NCHAR(20),
c19 CLOB,
c21 BLOB,
cend varchar2(12)
)`
var sql12 = `insert( c1,c2,c3,c4,c6,c7,c8,c9,c10,c11,c12,c13,c14,c17,c18,c19,c20,c21,cend) into foo values(
:1,
:2,
:3,
:4,
:6,
:7,
:8,
:9,
:10,
:11,
NUMTOYMINTERVAL( :12, 'MONTH'),
NUMTODSINTERVAL( :13 / 1000000000, 'SECOND'),
:14,
:17,
:18,
:19,
:21,
'END'
)`
| {
t.Errorf("get rows error: %v - query: %v", err, queryResult.query)
continue
} | conditional_block |
segment_accountant.rs | //! The `SegmentAccountant` is an allocator for equally-
//! sized chunks of the underlying storage file (segments).
//!
//! It must maintain these critical safety properties:
//!
//! A. We must not overwrite existing segments when they
//! contain the most-recent stable state for a page.
//! B. We must not overwrite existing segments when active
//! threads may have references to LogID's that point
//! into those segments.
//!
//! To complicate matters, the `PageCache` only knows
//! when it has put a page into an IO buffer, but it
//! doesn't keep track of when that IO buffer is
//! stabilized (until write coalescing is implemented).
//!
//! To address these safety concerns, we rely on
//! these techniques:
//!
//! 1. We delay the reuse of any existing segment
//! by ensuring there are at least <# io buffers>
//! freed segments in front of the newly freed
//! segment in the free list. This ensures that
//! any pending IO buffer writes will hit
//! stable storage before we overwrite the
//! segment that may have contained the previous
//! latest stable copy of a page's state.
//! 2. we use a `SegmentDropper` that guarantees
//! any segment that has been logically freed
//! or emptied by the `PageCache` will have its
//! addition to the free segment list be delayed
//! until any active threads that were acting on
//! the shared state have checked-out.
//!
//! Another concern that arises due to the fact that
//! IO buffers may be written out-of-order is the
//! correct recovery of segments. If there is data
//! loss in recently written segments, we must be
//! careful to preserve linearizability in the log.
//! To do this, we must detect "torn segments" that
//! were not able to be fully written before a crash
//! happened. We detect torn individual segments by
//! writing a `SegmentTrailer` to the end of the
//! segment AFTER we have sync'd it. If the trailer
//! is not present during recovery, the recovery
//! process will not continue to a segment that
//! may contain logically later data.
//!
//! But what if we wrote a later segment, and its
//! trailer, before we were able to write its
//! immediate predecessor segment, and then a
//! crash happened? We must preserve linearizability,
//! so we can not accidentally recover the later
//! segment when its predecessor was lost in the crash.
//!
//! 3. This case is solved again by having used
//! <# io buffers> segments before reuse. We guarantee
//! that the last <# io buffers> segments will be
//! present, which means we can write a "previous
//! log sequence number pointer" to the header of
//! each segment. During recovery, if these previous
//! segment Lsn pointers don't match up, we know we
//! have encountered a lost segment, and we will not
//! continue the recovery past the detected gap.
use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque};
use std::fs::File;
use std::sync::{Arc, Mutex};
use std::mem;
use coco::epoch::{Owned, pin};
use super::*;
/// The segment accountant keeps track of the logical blocks
/// of storage. It scans through all segments quickly during
/// recovery and attempts to locate torn segments.
#[derive(Default, Debug)]
pub struct SegmentAccountant {
// static or one-time set
config: Config,
recovered_lsn: Lsn,
recovered_lid: LogID,
// TODO these should be sharded to improve performance
segments: Vec<Segment>,
pending_clean: HashSet<PageID>,
// TODO put behind a single mutex
// NB MUST group pause_rewriting with ordering
// and free!
free: Arc<Mutex<VecDeque<LogID>>>,
tip: LogID,
to_clean: BTreeSet<LogID>,
pause_rewriting: bool,
last_given: LogID,
ordering: BTreeMap<Lsn, LogID>,
}
// We use a `SegmentDropper` to ensure that we never
// add a segment's LogID to the free deque while any
// active thread could be acting on it. This is necessary
// despite the "safe buffer" in the free queue because
// the safe buffer only prevents the sole remaining
// copy of a page from being overwritten. This prevents
// dangling references to segments that were rewritten after
// the `LogID` was read.
struct SegmentDropper(LogID, Arc<Mutex<VecDeque<LogID>>>);
impl Drop for SegmentDropper {
fn drop(&mut self) {
let mut deque = self.1.lock().unwrap();
deque.push_back(self.0);
}
}
/// A `Segment` holds the bookkeeping information for
/// a contiguous block of the disk. It may contain many
/// fragments from different pages. Over time, we track
/// when segments become reusable and allow them to be
/// overwritten for new data.
#[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct Segment {
present: BTreeSet<PageID>,
removed: HashSet<PageID>,
deferred_remove: HashSet<PageID>,
lsn: Option<Lsn>,
state: SegmentState,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum SegmentState {
/// the segment is marked for reuse, should never receive
/// new pids,
/// TODO consider: but may receive removals for pids that were
/// already removed?
Free,
/// the segment is being written to or actively recovered, and
/// will have pages assigned to it
Active,
/// the segment is no longer being written to or recovered, and
/// will have pages marked as relocated from it
Inactive,
/// the segment is having its resident pages relocated before
/// becoming free
Draining,
}
use self::SegmentState::*;
impl Default for SegmentState {
fn default() -> SegmentState {
Free
}
}
impl Segment {
fn _is_free(&self) -> bool {
match self.state {
Free => true,
_ => false,
}
}
fn is_inactive(&self) -> bool {
match self.state {
Inactive => true,
_ => false,
}
}
fn _is_active(&self) -> bool {
match self.state {
Active => true,
_ => false,
}
}
fn is_draining(&self) -> bool {
match self.state {
Draining => true,
_ => false,
}
}
fn free_to_active(&mut self, new_lsn: Lsn) {
trace!(
"setting Segment to Active with new lsn {:?}, was {:?}",
new_lsn,
self.lsn
);
assert_eq!(self.state, Free);
self.present.clear();
self.removed.clear();
self.lsn = Some(new_lsn);
self.state = Active;
}
/// Transitions a segment to being in the Inactive state.
/// Called in:
///
/// PageCache::advance_snapshot for marking when a
/// segment has been completely read
///
/// SegmentAccountant::recover for when
pub fn active_to_inactive(&mut self, lsn: Lsn, from_recovery: bool) {
trace!("setting Segment with lsn {:?} to Inactive", self.lsn());
assert_eq!(self.state, Active);
if from_recovery {
assert!(lsn >= self.lsn());
} else {
assert_eq!(self.lsn.unwrap(), lsn);
}
self.state = Inactive;
// now we can push any deferred removals to the removed set
let deferred = mem::replace(&mut self.deferred_remove, HashSet::new());
for pid in deferred {
self.remove_pid(pid, lsn);
}
}
pub fn inactive_to_draining(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Draining", self.lsn());
assert_eq!(self.state, Inactive);
assert!(lsn >= self.lsn());
self.state = Draining;
}
pub fn draining_to_free(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Free", self.lsn());
assert!(self.is_draining());
assert!(lsn >= self.lsn());
self.present.clear();
self.removed.clear();
self.state = Free;
}
pub fn recovery_ensure_initialized(&mut self, lsn: Lsn) {
if let Some(current_lsn) = self.lsn {
if current_lsn != lsn {
assert!(lsn > current_lsn);
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.state = Free;
self.free_to_active(lsn);
}
} else {
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.free_to_active(lsn);
}
}
fn lsn(&self) -> Lsn {
self.lsn.unwrap()
}
/// Add a pid to the Segment. The caller must provide
/// the Segment's LSN.
pub fn insert_pid(&mut self, pid: PageID, lsn: Lsn) {
assert_eq!(lsn, self.lsn.unwrap());
// if this breaks, we didn't implement the transition
// logic right in write_to_log, and maybe a thread is
// using the SA to add pids AFTER their calls to
// res.complete() worked.
assert_eq!(self.state, Active);
assert!(!self.removed.contains(&pid));
self.present.insert(pid);
}
/// Mark that a pid in this Segment has been relocated.
/// The caller must provide the LSN of the removal.
pub fn remove_pid(&mut self, pid: PageID, lsn: Lsn) {
// TODO this could be racy?
assert!(lsn >= self.lsn.unwrap());
match self.state {
Active => {
// we have received a removal before
// transferring this segment to Inactive, so
// we defer this pid's removal until the transfer.
self.deferred_remove.insert(pid);
}
Inactive | Draining => {
self.present.remove(&pid);
self.removed.insert(pid);
}
Free => panic!("remove_pid called on a Free Segment"),
}
}
fn live_pct(&self) -> f64 {
let total = self.present.len() + self.removed.len();
self.present.len() as f64 / total as f64
}
fn can_free(&self) -> bool {
self.state == Draining && self.is_empty()
}
pub fn is_empty(&self) -> bool {
self.present.is_empty()
}
}
impl SegmentAccountant {
pub fn new(config: Config) -> SegmentAccountant {
let mut ret = SegmentAccountant::default();
ret.config = config;
ret.scan_segment_lsns();
ret
}
/// Called from the `PageCache` recovery logic, this initializes the
/// `SegmentAccountant` based on recovered segment information.
pub fn initialize_from_segments(&mut self, mut segments: Vec<Segment>) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<LogID> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(_lsn, lid)| *lid)
.collect();
for (idx, ref mut segment) in segments.iter_mut().enumerate() {
if segment.lsn.is_none() {
continue;
}
let segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
let lsn = segment.lsn();
// populate free and to_clean if the segment has seen
if segment.is_empty() {
// can be reused immediately
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
if segment.state == Inactive {
segment.inactive_to_draining(lsn);
}
self.to_clean.remove(&segment_start);
trace!("pid {} freed @initialize_from_segments", segment_start);
if logical_tail.contains(&segment_start) {
// we depend on the invariant that the last segments
// always link together, so that we can detect torn
// segments during recovery.
self.ensure_safe_free_distance();
}
segment.draining_to_free(lsn);
if self.tip != segment_start &&
!self.free.lock().unwrap().contains(&segment_start)
{
// don't give out this segment twice
trace!(
"freeing segment {} from initialize_from_segments, tip: {}",
segment_start,
self.tip
);
self.free_segment(segment_start, true);
}
} else if segment.live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"setting segment {} to Draining from initialize_from_segments",
segment_start
);
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
segment.inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
self.free.lock().unwrap().retain(|&s| s != segment_start);
} else {
self.free.lock().unwrap().retain(|&s| s != segment_start);
}
}
self.set_last_given();
if !segments.is_empty() {
trace!("initialized self.segments to {:?}", segments);
for (i, segment) in segments.into_iter().enumerate() {
// we should not forget about segments that we've added
// during the initial segment scan, but freed for being
// empty, as that's where we set an LSN for them.
self.segments[i] = segment;
}
} else {
// this is basically just for when we recover with a single
// empty-yet-initialized segment
debug!(
"pagecache recovered no segments so not initializing from any"
);
}
}
fn set_last_given(&mut self) {
let new_max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((_lsn, lid)) = new_max {
trace!("setting last_given to {}", lid);
self.last_given = lid;
}
}
// Mark a specific segment as being present at a particular
// file offset.
fn recover(&mut self, lsn: Lsn, lid: LogID) {
trace!("recovered segment lsn {} at lid {}", lsn, lid);
let io_buf_size = self.config.get_io_buf_size() as LogID;
let idx = self.lid_to_idx(lid);
assert!(!(lsn == 0 && lid != 0), "lsn 0 provided with non-zero lid");
if !self.segments[idx].is_empty() {
self.segments[idx].free_to_active(lsn);
let segment_lsn = lsn / io_buf_size * io_buf_size;
self.segments[idx].active_to_inactive(segment_lsn, true);
} else {
// this is necessary for properly removing the ordering
// info later on, if this segment is found to be empty
// during recovery.
self.segments[idx].lsn = Some(lsn);
}
assert!(!self.ordering.contains_key(&lsn));
self.ordering.insert(lsn, lid);
}
// Scan the log file if we don't know of any Lsn offsets yet, and recover
// the order of segments, and the highest Lsn.
fn scan_segment_lsns(&mut self) {
assert!(self.segments.is_empty());
let segment_len = self.config.get_io_buf_size() as LogID;
let mut cursor = 0;
let cached_f = self.config.cached_file();
let mut f = cached_f.borrow_mut();
while let Ok(segment) = f.read_segment_header(cursor) {
// in the future this can be optimized to just read
// the initial header at that position... but we need to
// make sure the segment is not torn
trace!("SA scanned header during startup {:?}", segment);
if segment.ok && (segment.lsn != 0 || cursor == 0) {
// if lsn is 0, this is free
self.recover(segment.lsn, cursor);
} else {
// this segment was skipped or is free
trace!(
"freeing segment {} from scan_segment_lsns",
cursor,
);
self.free_segment(cursor, true);
}
cursor += segment_len;
}
// Check that the last <# io buffers> segments properly
// link their previous segment pointers.
self.clean_tail_tears(&mut f);
// Drop the file so that the `Iter` below is able to borrow
// the thread's file handle.
drop(f);
let mut empty_tip = true;
let max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((base_lsn, lid)) = max {
let segment_base = lid / segment_len * segment_len;
assert_eq!(lid, segment_base);
let mut tip = lid + SEG_HEADER_LEN as LogID;
let cur_lsn = base_lsn + SEG_HEADER_LEN as Lsn;
let segment_ceiling = base_lsn + segment_len -
SEG_TRAILER_LEN as LogID -
MSG_HEADER_LEN as LogID;
trace!(
"segment accountant recovering segment at lsn: {} \
read_offset: {}, ceiling: {}, cur_lsn: {}",
base_lsn,
lid,
segment_ceiling,
cur_lsn
);
let iter = Iter {
config: &self.config,
max_lsn: segment_ceiling,
cur_lsn: cur_lsn,
segment_base: None,
segment_iter: Box::new(vec![(base_lsn, lid)].into_iter()),
segment_len: segment_len as usize,
use_compression: self.config.get_use_compression(),
trailer: None,
};
for (_lsn, lid, _buf) in iter {
empty_tip = false;
tip = lid;
assert!(tip <= segment_ceiling);
}
if !empty_tip {
// if we found any later
let mut f = cached_f.borrow_mut();
let (_, _, len) = f.read_message(
tip,
segment_len as usize,
self.config.get_use_compression(),
).unwrap()
.flush()
.unwrap();
tip += MSG_HEADER_LEN as LogID + len as LogID;
self.recovered_lid = tip;
}
let segment_overhang = self.recovered_lid %
self.config.get_io_buf_size() as LogID;
self.recovered_lsn = base_lsn + segment_overhang;
} else {
assert!(
self.ordering.is_empty(),
"should have found recovered lsn {} in ordering {:?}",
self.recovered_lsn,
self.ordering
);
}
// determine the end of our valid entries
for &lid in self.ordering.values() {
if lid >= self.tip {
let new_tip = lid + self.config.get_io_buf_size() as LogID;
self.tip = new_tip;
}
}
if empty_tip && max.is_some() {
let (_lsn, lid) = max.unwrap();
debug!("freed empty tip segment {} while recovering segments", lid);
self.free_segment(lid, true);
}
// make sure we don't double-allocate a segment
while self.free.lock().unwrap().contains(&self.tip) {
self.tip += self.config.get_io_buf_size() as LogID;
}
debug!(
"segment accountant recovered max lsn:{}, lid: {}",
self.recovered_lsn,
self.recovered_lid
);
}
fn free_segment(&mut self, lid: LogID, in_recovery: bool) {
debug!("freeing segment {}", lid);
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
assert!(
!self.free.lock().unwrap().contains(&lid),
"double-free of a segment occurred"
);
if in_recovery {
self.free.lock().unwrap().push_front(lid);
// We only want to immediately remove the segment
// mapping if we're in recovery because otherwise
// we may be acting on updates relating to things
// in IO buffers, before they have been flushed.
// The latter will be removed from the mapping
// before being reused, in the next() method.
if let Some(old_lsn) = self.segments[idx].lsn {
trace!(
"removing segment {} with lsn {} from ordering",
lid,
old_lsn
);
self.ordering.remove(&old_lsn);
}
} else {
self.ensure_safe_free_distance();
pin(|scope| {
let pd = Owned::new(SegmentDropper(lid, self.free.clone()));
let ptr = pd.into_ptr(scope);
unsafe {
scope.defer_drop(ptr);
scope.flush();
}
});
}
}
// This ensures that the last <# io buffers> segments on
// disk connect via their previous segment pointers in
// the header. This is important because we expect that
// the last <# io buffers> segments will join up, and we
// never reuse buffers within this safety range.
fn | (&mut self, f: &mut File) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<(Lsn, LogID)> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(lsn, lid)| (*lsn, *lid))
.collect();
let mut tear_at = None;
for (i, &(_lsn, lid)) in logical_tail.iter().enumerate() {
if i + 1 == logical_tail.len() {
// we've reached the end, nothing to check after
break;
}
// check link
let segment_header = f.read_segment_header(lid).unwrap();
if !segment_header.ok {
error!(
"read corrupted segment header during recovery of segment {}",
lid
);
tear_at = Some(i);
continue;
}
let expected_prev = segment_header.prev;
let actual_prev = logical_tail[i + 1].1;
if expected_prev != actual_prev {
// detected a tear, everything after
error!(
"detected corruption during recovery for segment at {}! \
expected prev lid: {} actual: {} in last chain {:?}",
lid,
expected_prev,
actual_prev,
logical_tail
);
tear_at = Some(i);
}
}
if let Some(i) = tear_at {
// we need to chop off the elements after the tear
for &(_lsn_to_chop, lid_to_chop) in &logical_tail[0..i] {
error!("clearing corrupted segment at lid {}", lid_to_chop);
self.free_segment(lid_to_chop, true);
// TODO write zeroes to these segments to reduce
// false recovery.
}
}
}
pub fn recovered_lid(&self) -> LogID {
self.recovered_lid
}
pub fn recovered_lsn(&self) -> Lsn {
self.recovered_lsn
}
/// Causes all new allocations to occur at the end of the file, which
/// is necessary to preserve consistency while concurrently iterating through
/// the log during snapshot creation.
pub fn pause_rewriting(&mut self) {
self.pause_rewriting = true;
}
/// Re-enables segment rewriting after iteration is complete.
pub fn resume_rewriting(&mut self) {
self.pause_rewriting = false;
}
/// Called by the `PageCache` when a page has been rewritten completely.
/// We mark all of the old segments that contained the previous state
/// from the page, and if the old segments are empty or clear enough to
/// begin accelerated cleaning we mark them as so.
pub fn mark_replace(
&mut self,
pid: PageID,
lsn: Lsn,
old_lids: Vec<LogID>,
new_lid: LogID,
) {
trace!("mark_replace pid {} at lid {} with lsn {}", pid, new_lid, lsn);
self.pending_clean.remove(&pid);
let new_idx = new_lid as usize / self.config.get_io_buf_size();
// make sure we're not actively trying to replace the destination
let new_segment_start = new_idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
for old_lid in old_lids {
let old_idx = self.lid_to_idx(old_lid);
let segment_start = (old_idx * self.config.get_io_buf_size()) as
LogID;
if new_idx == old_idx {
// we probably haven't flushed this segment yet, so don't
// mark the pid as being removed from it
continue;
}
if self.segments[old_idx].lsn() > lsn {
// has been replaced after this call already,
// quite a big race happened
// TODO think about how this happens with our segment delay
continue;
}
if self.segments[old_idx].state == Free {
// this segment is already reused
// TODO should this be a panic?
continue;
}
self.segments[old_idx].remove_pid(pid, lsn);
if self.segments[old_idx].can_free() {
// can be reused immediately
self.segments[old_idx].draining_to_free(lsn);
self.to_clean.remove(&segment_start);
trace!("freed segment {} in replace", segment_start);
self.free_segment(segment_start, false);
} else if self.segments[old_idx].is_inactive() &&
self.segments[old_idx].live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"SA inserting {} into to_clean from mark_replace",
segment_start
);
self.segments[old_idx].inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
}
}
self.mark_link(pid, lsn, new_lid);
}
/// Called by the `PageCache` to find useful pages
/// it should try to rewrite.
pub fn clean(&mut self, ignore: Option<PageID>) -> Option<PageID> {
// try to maintain about twice the number of necessary
// on-deck segments, to reduce the amount of log growth.
if self.free.lock().unwrap().len() >=
self.config.get_min_free_segments() * 2
{
return None;
}
let to_clean = self.to_clean.clone();
for lid in to_clean {
let idx = self.lid_to_idx(lid);
let segment = &self.segments[idx];
assert_eq!(segment.state, Draining);
for pid in &segment.present {
if self.pending_clean.contains(pid) || ignore == Some(*pid) {
continue;
}
self.pending_clean.insert(*pid);
trace!(
"telling caller to clean {} from segment at {}",
pid,
lid,
);
return Some(*pid);
}
}
None
}
/// Called from `PageCache` when some state has been added
/// to a logical page at a particular offset. We ensure the
/// page is present in the segment's page set.
pub fn mark_link(&mut self, pid: PageID, lsn: Lsn, lid: LogID) {
trace!("mark_link pid {} at lid {}", pid, lid);
self.pending_clean.remove(&pid);
let idx = self.lid_to_idx(lid);
// make sure we're not actively trying to replace the destination
let new_segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
let segment = &mut self.segments[idx];
if segment.lsn() > lsn {
// a race happened, and our Lsn does not apply anymore
// TODO think about how this happens with segment delay
return;
}
let segment_lsn = lsn / self.config.get_io_buf_size() as Lsn *
self.config.get_io_buf_size() as Lsn;
segment.insert_pid(pid, segment_lsn);
}
/// Called after the trailer of a segment has been written to disk,
/// indicating that no more pids will be added to a segment. Moves
/// the segment into the Inactive state.
///
/// # Panics
/// The provided lsn and lid must exactly match the existing segment.
pub fn deactivate_segment(&mut self, lsn: Lsn, lid: LogID) {
let idx = self.lid_to_idx(lid);
self.segments[idx].active_to_inactive(lsn, false);
}
fn bump_tip(&mut self) -> LogID {
let lid = self.tip;
self.tip += self.config.get_io_buf_size() as LogID;
trace!("advancing file tip from {} to {}", lid, self.tip);
lid
}
fn ensure_safe_free_distance(&mut self) {
// NB we must maintain a queue of free segments that
// is at least as long as the number of io buffers.
// This is so that we will never give out a segment
// that has been placed on the free queue after its
// contained pages have all had updates added to an
// IO buffer during a PageCache replace, but whose
// replacing updates have not actually landed on disk
// yet. If updates always have to wait in a queue
// at least as long as the number of IO buffers, it
// guarantees that the old updates are actually safe
// somewhere else first. Note that we push_front here
// so that the log tip is used first.
while self.free.lock().unwrap().len() < self.config.get_io_bufs() {
let new_lid = self.bump_tip();
trace!(
"pushing segment {} to free from ensure_safe_free_distance",
new_lid
);
self.free.lock().unwrap().push_front(new_lid);
}
}
/// Returns the next offset to write a new segment in,
/// as well as the offset of the previous segment that
/// was allocated, so that we can detect missing
/// out-of-order segments during recovery.
pub fn next(&mut self, lsn: Lsn) -> (LogID, LogID) {
assert_eq!(
lsn % self.config.get_io_buf_size() as Lsn,
0,
"unaligned Lsn provided to next!"
);
// pop free or add to end
let lid = if self.pause_rewriting {
self.bump_tip()
} else {
let res = self.free.lock().unwrap().pop_front();
if res.is_none() {
self.bump_tip()
} else {
res.unwrap()
}
};
let last_given = self.last_given;
// pin lsn to this segment
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
// remove the ordering from our list
if let Some(old_lsn) = self.segments[idx].lsn {
self.ordering.remove(&old_lsn);
}
self.segments[idx].free_to_active(lsn);
self.ordering.insert(lsn, lid);
debug!(
"segment accountant returning offset: {} paused: {} last: {} on deck: {:?}",
lid,
self.pause_rewriting,
last_given,
self.free
);
self.last_given = lid;
if last_given != 0 {
assert_ne!(last_given, lid);
}
(lid, last_given)
}
/// Returns an iterator over a snapshot of current segment
/// log sequence numbers and their corresponding file offsets.
pub fn segment_snapshot_iter_from(
&mut self,
lsn: Lsn,
) -> Box<Iterator<Item = (Lsn, LogID)>> {
// assert!( self.pause_rewriting, "must pause rewriting before iterating over segments");
let segment_len = self.config.get_io_buf_size() as Lsn;
let normalized_lsn = lsn / segment_len * segment_len;
Box::new(self.ordering.clone().into_iter().filter(move |&(l, _)| {
l >= normalized_lsn
}))
}
fn lid_to_idx(&mut self, lid: LogID) -> usize {
let idx = lid as usize / self.config.get_io_buf_size();
if self.segments.len() < idx + 1 {
trace!(
"expanding self.segments to cover segment at {}",
(idx + 1) * self.config.get_io_buf_size()
);
self.segments.resize(idx + 1, Segment::default());
}
idx
}
}
| clean_tail_tears | identifier_name |
segment_accountant.rs | //! The `SegmentAccountant` is an allocator for equally-
//! sized chunks of the underlying storage file (segments).
//!
//! It must maintain these critical safety properties:
//!
//! A. We must not overwrite existing segments when they
//! contain the most-recent stable state for a page.
//! B. We must not overwrite existing segments when active
//! threads may have references to LogID's that point
//! into those segments.
//!
//! To complicate matters, the `PageCache` only knows
//! when it has put a page into an IO buffer, but it
//! doesn't keep track of when that IO buffer is
//! stabilized (until write coalescing is implemented).
//!
//! To address these safety concerns, we rely on
//! these techniques:
//!
//! 1. We delay the reuse of any existing segment
//! by ensuring there are at least <# io buffers>
//! freed segments in front of the newly freed
//! segment in the free list. This ensures that
//! any pending IO buffer writes will hit
//! stable storage before we overwrite the
//! segment that may have contained the previous
//! latest stable copy of a page's state.
//! 2. we use a `SegmentDropper` that guarantees
//! any segment that has been logically freed
//! or emptied by the `PageCache` will have its
//! addition to the free segment list be delayed
//! until any active threads that were acting on
//! the shared state have checked-out.
//!
//! Another concern that arises due to the fact that
//! IO buffers may be written out-of-order is the
//! correct recovery of segments. If there is data
//! loss in recently written segments, we must be
//! careful to preserve linearizability in the log.
//! To do this, we must detect "torn segments" that
//! were not able to be fully written before a crash
//! happened. We detect torn individual segments by
//! writing a `SegmentTrailer` to the end of the
//! segment AFTER we have sync'd it. If the trailer
//! is not present during recovery, the recovery
//! process will not continue to a segment that
//! may contain logically later data.
//!
//! But what if we wrote a later segment, and its
//! trailer, before we were able to write its
//! immediate predecessor segment, and then a
//! crash happened? We must preserve linearizability,
//! so we can not accidentally recover the later
//! segment when its predecessor was lost in the crash.
//!
//! 3. This case is solved again by having used
//! <# io buffers> segments before reuse. We guarantee
//! that the last <# io buffers> segments will be
//! present, which means we can write a "previous
//! log sequence number pointer" to the header of
//! each segment. During recovery, if these previous
//! segment Lsn pointers don't match up, we know we
//! have encountered a lost segment, and we will not
//! continue the recovery past the detected gap.
use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque};
use std::fs::File;
use std::sync::{Arc, Mutex};
use std::mem;
use coco::epoch::{Owned, pin};
use super::*;
/// The segment accountant keeps track of the logical blocks
/// of storage. It scans through all segments quickly during
/// recovery and attempts to locate torn segments.
#[derive(Default, Debug)]
pub struct SegmentAccountant {
// static or one-time set
config: Config,
recovered_lsn: Lsn,
recovered_lid: LogID,
// TODO these should be sharded to improve performance
segments: Vec<Segment>,
pending_clean: HashSet<PageID>,
// TODO put behind a single mutex
// NB MUST group pause_rewriting with ordering
// and free!
free: Arc<Mutex<VecDeque<LogID>>>,
tip: LogID,
to_clean: BTreeSet<LogID>,
pause_rewriting: bool,
last_given: LogID,
ordering: BTreeMap<Lsn, LogID>,
}
// We use a `SegmentDropper` to ensure that we never
// add a segment's LogID to the free deque while any
// active thread could be acting on it. This is necessary
// despite the "safe buffer" in the free queue because
// the safe buffer only prevents the sole remaining
// copy of a page from being overwritten. This prevents
// dangling references to segments that were rewritten after
// the `LogID` was read.
struct SegmentDropper(LogID, Arc<Mutex<VecDeque<LogID>>>);
impl Drop for SegmentDropper {
fn drop(&mut self) {
let mut deque = self.1.lock().unwrap();
deque.push_back(self.0);
}
}
/// A `Segment` holds the bookkeeping information for
/// a contiguous block of the disk. It may contain many
/// fragments from different pages. Over time, we track
/// when segments become reusable and allow them to be
/// overwritten for new data.
#[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct Segment {
present: BTreeSet<PageID>,
removed: HashSet<PageID>,
deferred_remove: HashSet<PageID>,
lsn: Option<Lsn>,
state: SegmentState,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum SegmentState {
/// the segment is marked for reuse, should never receive
/// new pids,
/// TODO consider: but may receive removals for pids that were
/// already removed?
Free,
/// the segment is being written to or actively recovered, and
/// will have pages assigned to it
Active,
/// the segment is no longer being written to or recovered, and
/// will have pages marked as relocated from it
Inactive,
/// the segment is having its resident pages relocated before
/// becoming free
Draining,
}
use self::SegmentState::*;
impl Default for SegmentState {
fn default() -> SegmentState {
Free
}
}
impl Segment {
fn _is_free(&self) -> bool {
match self.state {
Free => true,
_ => false,
}
}
fn is_inactive(&self) -> bool {
match self.state {
Inactive => true,
_ => false,
}
}
fn _is_active(&self) -> bool {
match self.state {
Active => true,
_ => false,
}
}
fn is_draining(&self) -> bool {
match self.state {
Draining => true,
_ => false,
}
}
fn free_to_active(&mut self, new_lsn: Lsn) {
trace!(
"setting Segment to Active with new lsn {:?}, was {:?}",
new_lsn,
self.lsn | self.lsn = Some(new_lsn);
self.state = Active;
}
/// Transitions a segment to being in the Inactive state.
/// Called in:
///
/// PageCache::advance_snapshot for marking when a
/// segment has been completely read
///
/// SegmentAccountant::recover for when
pub fn active_to_inactive(&mut self, lsn: Lsn, from_recovery: bool) {
trace!("setting Segment with lsn {:?} to Inactive", self.lsn());
assert_eq!(self.state, Active);
if from_recovery {
assert!(lsn >= self.lsn());
} else {
assert_eq!(self.lsn.unwrap(), lsn);
}
self.state = Inactive;
// now we can push any deferred removals to the removed set
let deferred = mem::replace(&mut self.deferred_remove, HashSet::new());
for pid in deferred {
self.remove_pid(pid, lsn);
}
}
pub fn inactive_to_draining(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Draining", self.lsn());
assert_eq!(self.state, Inactive);
assert!(lsn >= self.lsn());
self.state = Draining;
}
pub fn draining_to_free(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Free", self.lsn());
assert!(self.is_draining());
assert!(lsn >= self.lsn());
self.present.clear();
self.removed.clear();
self.state = Free;
}
pub fn recovery_ensure_initialized(&mut self, lsn: Lsn) {
if let Some(current_lsn) = self.lsn {
if current_lsn != lsn {
assert!(lsn > current_lsn);
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.state = Free;
self.free_to_active(lsn);
}
} else {
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.free_to_active(lsn);
}
}
fn lsn(&self) -> Lsn {
self.lsn.unwrap()
}
/// Add a pid to the Segment. The caller must provide
/// the Segment's LSN.
pub fn insert_pid(&mut self, pid: PageID, lsn: Lsn) {
assert_eq!(lsn, self.lsn.unwrap());
// if this breaks, we didn't implement the transition
// logic right in write_to_log, and maybe a thread is
// using the SA to add pids AFTER their calls to
// res.complete() worked.
assert_eq!(self.state, Active);
assert!(!self.removed.contains(&pid));
self.present.insert(pid);
}
/// Mark that a pid in this Segment has been relocated.
/// The caller must provide the LSN of the removal.
pub fn remove_pid(&mut self, pid: PageID, lsn: Lsn) {
// TODO this could be racy?
assert!(lsn >= self.lsn.unwrap());
match self.state {
Active => {
// we have received a removal before
// transferring this segment to Inactive, so
// we defer this pid's removal until the transfer.
self.deferred_remove.insert(pid);
}
Inactive | Draining => {
self.present.remove(&pid);
self.removed.insert(pid);
}
Free => panic!("remove_pid called on a Free Segment"),
}
}
fn live_pct(&self) -> f64 {
let total = self.present.len() + self.removed.len();
self.present.len() as f64 / total as f64
}
fn can_free(&self) -> bool {
self.state == Draining && self.is_empty()
}
pub fn is_empty(&self) -> bool {
self.present.is_empty()
}
}
impl SegmentAccountant {
pub fn new(config: Config) -> SegmentAccountant {
let mut ret = SegmentAccountant::default();
ret.config = config;
ret.scan_segment_lsns();
ret
}
/// Called from the `PageCache` recovery logic, this initializes the
/// `SegmentAccountant` based on recovered segment information.
pub fn initialize_from_segments(&mut self, mut segments: Vec<Segment>) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<LogID> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(_lsn, lid)| *lid)
.collect();
for (idx, ref mut segment) in segments.iter_mut().enumerate() {
if segment.lsn.is_none() {
continue;
}
let segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
let lsn = segment.lsn();
// populate free and to_clean if the segment has seen
if segment.is_empty() {
// can be reused immediately
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
if segment.state == Inactive {
segment.inactive_to_draining(lsn);
}
self.to_clean.remove(&segment_start);
trace!("pid {} freed @initialize_from_segments", segment_start);
if logical_tail.contains(&segment_start) {
// we depend on the invariant that the last segments
// always link together, so that we can detect torn
// segments during recovery.
self.ensure_safe_free_distance();
}
segment.draining_to_free(lsn);
if self.tip != segment_start &&
!self.free.lock().unwrap().contains(&segment_start)
{
// don't give out this segment twice
trace!(
"freeing segment {} from initialize_from_segments, tip: {}",
segment_start,
self.tip
);
self.free_segment(segment_start, true);
}
} else if segment.live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"setting segment {} to Draining from initialize_from_segments",
segment_start
);
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
segment.inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
self.free.lock().unwrap().retain(|&s| s != segment_start);
} else {
self.free.lock().unwrap().retain(|&s| s != segment_start);
}
}
self.set_last_given();
if !segments.is_empty() {
trace!("initialized self.segments to {:?}", segments);
for (i, segment) in segments.into_iter().enumerate() {
// we should not forget about segments that we've added
// during the initial segment scan, but freed for being
// empty, as that's where we set an LSN for them.
self.segments[i] = segment;
}
} else {
// this is basically just for when we recover with a single
// empty-yet-initialized segment
debug!(
"pagecache recovered no segments so not initializing from any"
);
}
}
fn set_last_given(&mut self) {
let new_max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((_lsn, lid)) = new_max {
trace!("setting last_given to {}", lid);
self.last_given = lid;
}
}
// Mark a specific segment as being present at a particular
// file offset.
fn recover(&mut self, lsn: Lsn, lid: LogID) {
trace!("recovered segment lsn {} at lid {}", lsn, lid);
let io_buf_size = self.config.get_io_buf_size() as LogID;
let idx = self.lid_to_idx(lid);
assert!(!(lsn == 0 && lid != 0), "lsn 0 provided with non-zero lid");
if !self.segments[idx].is_empty() {
self.segments[idx].free_to_active(lsn);
let segment_lsn = lsn / io_buf_size * io_buf_size;
self.segments[idx].active_to_inactive(segment_lsn, true);
} else {
// this is necessary for properly removing the ordering
// info later on, if this segment is found to be empty
// during recovery.
self.segments[idx].lsn = Some(lsn);
}
assert!(!self.ordering.contains_key(&lsn));
self.ordering.insert(lsn, lid);
}
// Scan the log file if we don't know of any Lsn offsets yet, and recover
// the order of segments, and the highest Lsn.
fn scan_segment_lsns(&mut self) {
assert!(self.segments.is_empty());
let segment_len = self.config.get_io_buf_size() as LogID;
let mut cursor = 0;
let cached_f = self.config.cached_file();
let mut f = cached_f.borrow_mut();
while let Ok(segment) = f.read_segment_header(cursor) {
// in the future this can be optimized to just read
// the initial header at that position... but we need to
// make sure the segment is not torn
trace!("SA scanned header during startup {:?}", segment);
if segment.ok && (segment.lsn != 0 || cursor == 0) {
// if lsn is 0, this is free
self.recover(segment.lsn, cursor);
} else {
// this segment was skipped or is free
trace!(
"freeing segment {} from scan_segment_lsns",
cursor,
);
self.free_segment(cursor, true);
}
cursor += segment_len;
}
// Check that the last <# io buffers> segments properly
// link their previous segment pointers.
self.clean_tail_tears(&mut f);
// Drop the file so that the `Iter` below is able to borrow
// the thread's file handle.
drop(f);
let mut empty_tip = true;
let max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((base_lsn, lid)) = max {
let segment_base = lid / segment_len * segment_len;
assert_eq!(lid, segment_base);
let mut tip = lid + SEG_HEADER_LEN as LogID;
let cur_lsn = base_lsn + SEG_HEADER_LEN as Lsn;
let segment_ceiling = base_lsn + segment_len -
SEG_TRAILER_LEN as LogID -
MSG_HEADER_LEN as LogID;
trace!(
"segment accountant recovering segment at lsn: {} \
read_offset: {}, ceiling: {}, cur_lsn: {}",
base_lsn,
lid,
segment_ceiling,
cur_lsn
);
let iter = Iter {
config: &self.config,
max_lsn: segment_ceiling,
cur_lsn: cur_lsn,
segment_base: None,
segment_iter: Box::new(vec![(base_lsn, lid)].into_iter()),
segment_len: segment_len as usize,
use_compression: self.config.get_use_compression(),
trailer: None,
};
for (_lsn, lid, _buf) in iter {
empty_tip = false;
tip = lid;
assert!(tip <= segment_ceiling);
}
if !empty_tip {
// if we found any later
let mut f = cached_f.borrow_mut();
let (_, _, len) = f.read_message(
tip,
segment_len as usize,
self.config.get_use_compression(),
).unwrap()
.flush()
.unwrap();
tip += MSG_HEADER_LEN as LogID + len as LogID;
self.recovered_lid = tip;
}
let segment_overhang = self.recovered_lid %
self.config.get_io_buf_size() as LogID;
self.recovered_lsn = base_lsn + segment_overhang;
} else {
assert!(
self.ordering.is_empty(),
"should have found recovered lsn {} in ordering {:?}",
self.recovered_lsn,
self.ordering
);
}
// determine the end of our valid entries
for &lid in self.ordering.values() {
if lid >= self.tip {
let new_tip = lid + self.config.get_io_buf_size() as LogID;
self.tip = new_tip;
}
}
if empty_tip && max.is_some() {
let (_lsn, lid) = max.unwrap();
debug!("freed empty tip segment {} while recovering segments", lid);
self.free_segment(lid, true);
}
// make sure we don't double-allocate a segment
while self.free.lock().unwrap().contains(&self.tip) {
self.tip += self.config.get_io_buf_size() as LogID;
}
debug!(
"segment accountant recovered max lsn:{}, lid: {}",
self.recovered_lsn,
self.recovered_lid
);
}
fn free_segment(&mut self, lid: LogID, in_recovery: bool) {
debug!("freeing segment {}", lid);
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
assert!(
!self.free.lock().unwrap().contains(&lid),
"double-free of a segment occurred"
);
if in_recovery {
self.free.lock().unwrap().push_front(lid);
// We only want to immediately remove the segment
// mapping if we're in recovery because otherwise
// we may be acting on updates relating to things
// in IO buffers, before they have been flushed.
// The latter will be removed from the mapping
// before being reused, in the next() method.
if let Some(old_lsn) = self.segments[idx].lsn {
trace!(
"removing segment {} with lsn {} from ordering",
lid,
old_lsn
);
self.ordering.remove(&old_lsn);
}
} else {
self.ensure_safe_free_distance();
pin(|scope| {
let pd = Owned::new(SegmentDropper(lid, self.free.clone()));
let ptr = pd.into_ptr(scope);
unsafe {
scope.defer_drop(ptr);
scope.flush();
}
});
}
}
// This ensures that the last <# io buffers> segments on
// disk connect via their previous segment pointers in
// the header. This is important because we expect that
// the last <# io buffers> segments will join up, and we
// never reuse buffers within this safety range.
fn clean_tail_tears(&mut self, f: &mut File) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<(Lsn, LogID)> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(lsn, lid)| (*lsn, *lid))
.collect();
let mut tear_at = None;
for (i, &(_lsn, lid)) in logical_tail.iter().enumerate() {
if i + 1 == logical_tail.len() {
// we've reached the end, nothing to check after
break;
}
// check link
let segment_header = f.read_segment_header(lid).unwrap();
if !segment_header.ok {
error!(
"read corrupted segment header during recovery of segment {}",
lid
);
tear_at = Some(i);
continue;
}
let expected_prev = segment_header.prev;
let actual_prev = logical_tail[i + 1].1;
if expected_prev != actual_prev {
// detected a tear, everything after
error!(
"detected corruption during recovery for segment at {}! \
expected prev lid: {} actual: {} in last chain {:?}",
lid,
expected_prev,
actual_prev,
logical_tail
);
tear_at = Some(i);
}
}
if let Some(i) = tear_at {
// we need to chop off the elements after the tear
for &(_lsn_to_chop, lid_to_chop) in &logical_tail[0..i] {
error!("clearing corrupted segment at lid {}", lid_to_chop);
self.free_segment(lid_to_chop, true);
// TODO write zeroes to these segments to reduce
// false recovery.
}
}
}
pub fn recovered_lid(&self) -> LogID {
self.recovered_lid
}
pub fn recovered_lsn(&self) -> Lsn {
self.recovered_lsn
}
/// Causes all new allocations to occur at the end of the file, which
/// is necessary to preserve consistency while concurrently iterating through
/// the log during snapshot creation.
pub fn pause_rewriting(&mut self) {
self.pause_rewriting = true;
}
/// Re-enables segment rewriting after iteration is complete.
pub fn resume_rewriting(&mut self) {
self.pause_rewriting = false;
}
/// Called by the `PageCache` when a page has been rewritten completely.
/// We mark all of the old segments that contained the previous state
/// from the page, and if the old segments are empty or clear enough to
/// begin accelerated cleaning we mark them as so.
pub fn mark_replace(
&mut self,
pid: PageID,
lsn: Lsn,
old_lids: Vec<LogID>,
new_lid: LogID,
) {
trace!("mark_replace pid {} at lid {} with lsn {}", pid, new_lid, lsn);
self.pending_clean.remove(&pid);
let new_idx = new_lid as usize / self.config.get_io_buf_size();
// make sure we're not actively trying to replace the destination
let new_segment_start = new_idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
for old_lid in old_lids {
let old_idx = self.lid_to_idx(old_lid);
let segment_start = (old_idx * self.config.get_io_buf_size()) as
LogID;
if new_idx == old_idx {
// we probably haven't flushed this segment yet, so don't
// mark the pid as being removed from it
continue;
}
if self.segments[old_idx].lsn() > lsn {
// has been replaced after this call already,
// quite a big race happened
// TODO think about how this happens with our segment delay
continue;
}
if self.segments[old_idx].state == Free {
// this segment is already reused
// TODO should this be a panic?
continue;
}
self.segments[old_idx].remove_pid(pid, lsn);
if self.segments[old_idx].can_free() {
// can be reused immediately
self.segments[old_idx].draining_to_free(lsn);
self.to_clean.remove(&segment_start);
trace!("freed segment {} in replace", segment_start);
self.free_segment(segment_start, false);
} else if self.segments[old_idx].is_inactive() &&
self.segments[old_idx].live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"SA inserting {} into to_clean from mark_replace",
segment_start
);
self.segments[old_idx].inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
}
}
self.mark_link(pid, lsn, new_lid);
}
/// Called by the `PageCache` to find useful pages
/// it should try to rewrite.
pub fn clean(&mut self, ignore: Option<PageID>) -> Option<PageID> {
// try to maintain about twice the number of necessary
// on-deck segments, to reduce the amount of log growth.
if self.free.lock().unwrap().len() >=
self.config.get_min_free_segments() * 2
{
return None;
}
let to_clean = self.to_clean.clone();
for lid in to_clean {
let idx = self.lid_to_idx(lid);
let segment = &self.segments[idx];
assert_eq!(segment.state, Draining);
for pid in &segment.present {
if self.pending_clean.contains(pid) || ignore == Some(*pid) {
continue;
}
self.pending_clean.insert(*pid);
trace!(
"telling caller to clean {} from segment at {}",
pid,
lid,
);
return Some(*pid);
}
}
None
}
/// Called from `PageCache` when some state has been added
/// to a logical page at a particular offset. We ensure the
/// page is present in the segment's page set.
pub fn mark_link(&mut self, pid: PageID, lsn: Lsn, lid: LogID) {
trace!("mark_link pid {} at lid {}", pid, lid);
self.pending_clean.remove(&pid);
let idx = self.lid_to_idx(lid);
// make sure we're not actively trying to replace the destination
let new_segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
let segment = &mut self.segments[idx];
if segment.lsn() > lsn {
// a race happened, and our Lsn does not apply anymore
// TODO think about how this happens with segment delay
return;
}
let segment_lsn = lsn / self.config.get_io_buf_size() as Lsn *
self.config.get_io_buf_size() as Lsn;
segment.insert_pid(pid, segment_lsn);
}
/// Called after the trailer of a segment has been written to disk,
/// indicating that no more pids will be added to a segment. Moves
/// the segment into the Inactive state.
///
/// # Panics
/// The provided lsn and lid must exactly match the existing segment.
pub fn deactivate_segment(&mut self, lsn: Lsn, lid: LogID) {
let idx = self.lid_to_idx(lid);
self.segments[idx].active_to_inactive(lsn, false);
}
fn bump_tip(&mut self) -> LogID {
let lid = self.tip;
self.tip += self.config.get_io_buf_size() as LogID;
trace!("advancing file tip from {} to {}", lid, self.tip);
lid
}
fn ensure_safe_free_distance(&mut self) {
// NB we must maintain a queue of free segments that
// is at least as long as the number of io buffers.
// This is so that we will never give out a segment
// that has been placed on the free queue after its
// contained pages have all had updates added to an
// IO buffer during a PageCache replace, but whose
// replacing updates have not actually landed on disk
// yet. If updates always have to wait in a queue
// at least as long as the number of IO buffers, it
// guarantees that the old updates are actually safe
// somewhere else first. Note that we push_front here
// so that the log tip is used first.
while self.free.lock().unwrap().len() < self.config.get_io_bufs() {
let new_lid = self.bump_tip();
trace!(
"pushing segment {} to free from ensure_safe_free_distance",
new_lid
);
self.free.lock().unwrap().push_front(new_lid);
}
}
/// Returns the next offset to write a new segment in,
/// as well as the offset of the previous segment that
/// was allocated, so that we can detect missing
/// out-of-order segments during recovery.
pub fn next(&mut self, lsn: Lsn) -> (LogID, LogID) {
assert_eq!(
lsn % self.config.get_io_buf_size() as Lsn,
0,
"unaligned Lsn provided to next!"
);
// pop free or add to end
let lid = if self.pause_rewriting {
self.bump_tip()
} else {
let res = self.free.lock().unwrap().pop_front();
if res.is_none() {
self.bump_tip()
} else {
res.unwrap()
}
};
let last_given = self.last_given;
// pin lsn to this segment
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
// remove the ordering from our list
if let Some(old_lsn) = self.segments[idx].lsn {
self.ordering.remove(&old_lsn);
}
self.segments[idx].free_to_active(lsn);
self.ordering.insert(lsn, lid);
debug!(
"segment accountant returning offset: {} paused: {} last: {} on deck: {:?}",
lid,
self.pause_rewriting,
last_given,
self.free
);
self.last_given = lid;
if last_given != 0 {
assert_ne!(last_given, lid);
}
(lid, last_given)
}
/// Returns an iterator over a snapshot of current segment
/// log sequence numbers and their corresponding file offsets.
pub fn segment_snapshot_iter_from(
&mut self,
lsn: Lsn,
) -> Box<Iterator<Item = (Lsn, LogID)>> {
// assert!( self.pause_rewriting, "must pause rewriting before iterating over segments");
let segment_len = self.config.get_io_buf_size() as Lsn;
let normalized_lsn = lsn / segment_len * segment_len;
Box::new(self.ordering.clone().into_iter().filter(move |&(l, _)| {
l >= normalized_lsn
}))
}
fn lid_to_idx(&mut self, lid: LogID) -> usize {
let idx = lid as usize / self.config.get_io_buf_size();
if self.segments.len() < idx + 1 {
trace!(
"expanding self.segments to cover segment at {}",
(idx + 1) * self.config.get_io_buf_size()
);
self.segments.resize(idx + 1, Segment::default());
}
idx
}
} | );
assert_eq!(self.state, Free);
self.present.clear();
self.removed.clear(); | random_line_split |
segment_accountant.rs | //! The `SegmentAccountant` is an allocator for equally-
//! sized chunks of the underlying storage file (segments).
//!
//! It must maintain these critical safety properties:
//!
//! A. We must not overwrite existing segments when they
//! contain the most-recent stable state for a page.
//! B. We must not overwrite existing segments when active
//! threads may have references to LogID's that point
//! into those segments.
//!
//! To complicate matters, the `PageCache` only knows
//! when it has put a page into an IO buffer, but it
//! doesn't keep track of when that IO buffer is
//! stabilized (until write coalescing is implemented).
//!
//! To address these safety concerns, we rely on
//! these techniques:
//!
//! 1. We delay the reuse of any existing segment
//! by ensuring there are at least <# io buffers>
//! freed segments in front of the newly freed
//! segment in the free list. This ensures that
//! any pending IO buffer writes will hit
//! stable storage before we overwrite the
//! segment that may have contained the previous
//! latest stable copy of a page's state.
//! 2. we use a `SegmentDropper` that guarantees
//! any segment that has been logically freed
//! or emptied by the `PageCache` will have its
//! addition to the free segment list be delayed
//! until any active threads that were acting on
//! the shared state have checked-out.
//!
//! Another concern that arises due to the fact that
//! IO buffers may be written out-of-order is the
//! correct recovery of segments. If there is data
//! loss in recently written segments, we must be
//! careful to preserve linearizability in the log.
//! To do this, we must detect "torn segments" that
//! were not able to be fully written before a crash
//! happened. We detect torn individual segments by
//! writing a `SegmentTrailer` to the end of the
//! segment AFTER we have sync'd it. If the trailer
//! is not present during recovery, the recovery
//! process will not continue to a segment that
//! may contain logically later data.
//!
//! But what if we wrote a later segment, and its
//! trailer, before we were able to write its
//! immediate predecessor segment, and then a
//! crash happened? We must preserve linearizability,
//! so we can not accidentally recover the later
//! segment when its predecessor was lost in the crash.
//!
//! 3. This case is solved again by having used
//! <# io buffers> segments before reuse. We guarantee
//! that the last <# io buffers> segments will be
//! present, which means we can write a "previous
//! log sequence number pointer" to the header of
//! each segment. During recovery, if these previous
//! segment Lsn pointers don't match up, we know we
//! have encountered a lost segment, and we will not
//! continue the recovery past the detected gap.
use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque};
use std::fs::File;
use std::sync::{Arc, Mutex};
use std::mem;
use coco::epoch::{Owned, pin};
use super::*;
/// The segment accountant keeps track of the logical blocks
/// of storage. It scans through all segments quickly during
/// recovery and attempts to locate torn segments.
#[derive(Default, Debug)]
pub struct SegmentAccountant {
// static or one-time set
config: Config,
recovered_lsn: Lsn,
recovered_lid: LogID,
// TODO these should be sharded to improve performance
segments: Vec<Segment>,
pending_clean: HashSet<PageID>,
// TODO put behind a single mutex
// NB MUST group pause_rewriting with ordering
// and free!
free: Arc<Mutex<VecDeque<LogID>>>,
tip: LogID,
to_clean: BTreeSet<LogID>,
pause_rewriting: bool,
last_given: LogID,
ordering: BTreeMap<Lsn, LogID>,
}
// We use a `SegmentDropper` to ensure that we never
// add a segment's LogID to the free deque while any
// active thread could be acting on it. This is necessary
// despite the "safe buffer" in the free queue because
// the safe buffer only prevents the sole remaining
// copy of a page from being overwritten. This prevents
// dangling references to segments that were rewritten after
// the `LogID` was read.
struct SegmentDropper(LogID, Arc<Mutex<VecDeque<LogID>>>);
impl Drop for SegmentDropper {
fn drop(&mut self) {
let mut deque = self.1.lock().unwrap();
deque.push_back(self.0);
}
}
/// A `Segment` holds the bookkeeping information for
/// a contiguous block of the disk. It may contain many
/// fragments from different pages. Over time, we track
/// when segments become reusable and allow them to be
/// overwritten for new data.
#[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct Segment {
present: BTreeSet<PageID>,
removed: HashSet<PageID>,
deferred_remove: HashSet<PageID>,
lsn: Option<Lsn>,
state: SegmentState,
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum SegmentState {
/// the segment is marked for reuse, should never receive
/// new pids,
/// TODO consider: but may receive removals for pids that were
/// already removed?
Free,
/// the segment is being written to or actively recovered, and
/// will have pages assigned to it
Active,
/// the segment is no longer being written to or recovered, and
/// will have pages marked as relocated from it
Inactive,
/// the segment is having its resident pages relocated before
/// becoming free
Draining,
}
use self::SegmentState::*;
impl Default for SegmentState {
fn default() -> SegmentState {
Free
}
}
impl Segment {
fn _is_free(&self) -> bool {
match self.state {
Free => true,
_ => false,
}
}
fn is_inactive(&self) -> bool {
match self.state {
Inactive => true,
_ => false,
}
}
fn _is_active(&self) -> bool {
match self.state {
Active => true,
_ => false,
}
}
fn is_draining(&self) -> bool {
match self.state {
Draining => true,
_ => false,
}
}
fn free_to_active(&mut self, new_lsn: Lsn) {
trace!(
"setting Segment to Active with new lsn {:?}, was {:?}",
new_lsn,
self.lsn
);
assert_eq!(self.state, Free);
self.present.clear();
self.removed.clear();
self.lsn = Some(new_lsn);
self.state = Active;
}
/// Transitions a segment to being in the Inactive state.
/// Called in:
///
/// PageCache::advance_snapshot for marking when a
/// segment has been completely read
///
/// SegmentAccountant::recover for when
pub fn active_to_inactive(&mut self, lsn: Lsn, from_recovery: bool) {
trace!("setting Segment with lsn {:?} to Inactive", self.lsn());
assert_eq!(self.state, Active);
if from_recovery {
assert!(lsn >= self.lsn());
} else {
assert_eq!(self.lsn.unwrap(), lsn);
}
self.state = Inactive;
// now we can push any deferred removals to the removed set
let deferred = mem::replace(&mut self.deferred_remove, HashSet::new());
for pid in deferred {
self.remove_pid(pid, lsn);
}
}
pub fn inactive_to_draining(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Draining", self.lsn());
assert_eq!(self.state, Inactive);
assert!(lsn >= self.lsn());
self.state = Draining;
}
pub fn draining_to_free(&mut self, lsn: Lsn) {
trace!("setting Segment with lsn {:?} to Free", self.lsn());
assert!(self.is_draining());
assert!(lsn >= self.lsn());
self.present.clear();
self.removed.clear();
self.state = Free;
}
pub fn recovery_ensure_initialized(&mut self, lsn: Lsn) {
if let Some(current_lsn) = self.lsn {
if current_lsn != lsn {
assert!(lsn > current_lsn);
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.state = Free;
self.free_to_active(lsn);
}
} else {
trace!("(snapshot) resetting segment to have lsn {}", lsn);
self.free_to_active(lsn);
}
}
fn lsn(&self) -> Lsn {
self.lsn.unwrap()
}
/// Add a pid to the Segment. The caller must provide
/// the Segment's LSN.
pub fn insert_pid(&mut self, pid: PageID, lsn: Lsn) {
assert_eq!(lsn, self.lsn.unwrap());
// if this breaks, we didn't implement the transition
// logic right in write_to_log, and maybe a thread is
// using the SA to add pids AFTER their calls to
// res.complete() worked.
assert_eq!(self.state, Active);
assert!(!self.removed.contains(&pid));
self.present.insert(pid);
}
/// Mark that a pid in this Segment has been relocated.
/// The caller must provide the LSN of the removal.
pub fn remove_pid(&mut self, pid: PageID, lsn: Lsn) {
// TODO this could be racy?
assert!(lsn >= self.lsn.unwrap());
match self.state {
Active => {
// we have received a removal before
// transferring this segment to Inactive, so
// we defer this pid's removal until the transfer.
self.deferred_remove.insert(pid);
}
Inactive | Draining => {
self.present.remove(&pid);
self.removed.insert(pid);
}
Free => panic!("remove_pid called on a Free Segment"),
}
}
fn live_pct(&self) -> f64 {
let total = self.present.len() + self.removed.len();
self.present.len() as f64 / total as f64
}
fn can_free(&self) -> bool {
self.state == Draining && self.is_empty()
}
pub fn is_empty(&self) -> bool {
self.present.is_empty()
}
}
impl SegmentAccountant {
pub fn new(config: Config) -> SegmentAccountant {
let mut ret = SegmentAccountant::default();
ret.config = config;
ret.scan_segment_lsns();
ret
}
/// Called from the `PageCache` recovery logic, this initializes the
/// `SegmentAccountant` based on recovered segment information.
pub fn initialize_from_segments(&mut self, mut segments: Vec<Segment>) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<LogID> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(_lsn, lid)| *lid)
.collect();
for (idx, ref mut segment) in segments.iter_mut().enumerate() {
if segment.lsn.is_none() {
continue;
}
let segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
let lsn = segment.lsn();
// populate free and to_clean if the segment has seen
if segment.is_empty() {
// can be reused immediately
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
if segment.state == Inactive {
segment.inactive_to_draining(lsn);
}
self.to_clean.remove(&segment_start);
trace!("pid {} freed @initialize_from_segments", segment_start);
if logical_tail.contains(&segment_start) {
// we depend on the invariant that the last segments
// always link together, so that we can detect torn
// segments during recovery.
self.ensure_safe_free_distance();
}
segment.draining_to_free(lsn);
if self.tip != segment_start &&
!self.free.lock().unwrap().contains(&segment_start)
{
// don't give out this segment twice
trace!(
"freeing segment {} from initialize_from_segments, tip: {}",
segment_start,
self.tip
);
self.free_segment(segment_start, true);
}
} else if segment.live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"setting segment {} to Draining from initialize_from_segments",
segment_start
);
if segment.state == Active {
segment.active_to_inactive(lsn, true);
}
segment.inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
self.free.lock().unwrap().retain(|&s| s != segment_start);
} else {
self.free.lock().unwrap().retain(|&s| s != segment_start);
}
}
self.set_last_given();
if !segments.is_empty() {
trace!("initialized self.segments to {:?}", segments);
for (i, segment) in segments.into_iter().enumerate() {
// we should not forget about segments that we've added
// during the initial segment scan, but freed for being
// empty, as that's where we set an LSN for them.
self.segments[i] = segment;
}
} else {
// this is basically just for when we recover with a single
// empty-yet-initialized segment
debug!(
"pagecache recovered no segments so not initializing from any"
);
}
}
fn set_last_given(&mut self) {
let new_max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((_lsn, lid)) = new_max {
trace!("setting last_given to {}", lid);
self.last_given = lid;
}
}
// Mark a specific segment as being present at a particular
// file offset.
fn recover(&mut self, lsn: Lsn, lid: LogID) {
trace!("recovered segment lsn {} at lid {}", lsn, lid);
let io_buf_size = self.config.get_io_buf_size() as LogID;
let idx = self.lid_to_idx(lid);
assert!(!(lsn == 0 && lid != 0), "lsn 0 provided with non-zero lid");
if !self.segments[idx].is_empty() {
self.segments[idx].free_to_active(lsn);
let segment_lsn = lsn / io_buf_size * io_buf_size;
self.segments[idx].active_to_inactive(segment_lsn, true);
} else {
// this is necessary for properly removing the ordering
// info later on, if this segment is found to be empty
// during recovery.
self.segments[idx].lsn = Some(lsn);
}
assert!(!self.ordering.contains_key(&lsn));
self.ordering.insert(lsn, lid);
}
// Scan the log file if we don't know of any Lsn offsets yet, and recover
// the order of segments, and the highest Lsn.
fn scan_segment_lsns(&mut self) {
assert!(self.segments.is_empty());
let segment_len = self.config.get_io_buf_size() as LogID;
let mut cursor = 0;
let cached_f = self.config.cached_file();
let mut f = cached_f.borrow_mut();
while let Ok(segment) = f.read_segment_header(cursor) {
// in the future this can be optimized to just read
// the initial header at that position... but we need to
// make sure the segment is not torn
trace!("SA scanned header during startup {:?}", segment);
if segment.ok && (segment.lsn != 0 || cursor == 0) {
// if lsn is 0, this is free
self.recover(segment.lsn, cursor);
} else {
// this segment was skipped or is free
trace!(
"freeing segment {} from scan_segment_lsns",
cursor,
);
self.free_segment(cursor, true);
}
cursor += segment_len;
}
// Check that the last <# io buffers> segments properly
// link their previous segment pointers.
self.clean_tail_tears(&mut f);
// Drop the file so that the `Iter` below is able to borrow
// the thread's file handle.
drop(f);
let mut empty_tip = true;
let max = self.ordering
.iter()
.rev()
.nth(0)
.map(|(lsn, lid)| (*lsn, *lid))
.clone();
if let Some((base_lsn, lid)) = max {
let segment_base = lid / segment_len * segment_len;
assert_eq!(lid, segment_base);
let mut tip = lid + SEG_HEADER_LEN as LogID;
let cur_lsn = base_lsn + SEG_HEADER_LEN as Lsn;
let segment_ceiling = base_lsn + segment_len -
SEG_TRAILER_LEN as LogID -
MSG_HEADER_LEN as LogID;
trace!(
"segment accountant recovering segment at lsn: {} \
read_offset: {}, ceiling: {}, cur_lsn: {}",
base_lsn,
lid,
segment_ceiling,
cur_lsn
);
let iter = Iter {
config: &self.config,
max_lsn: segment_ceiling,
cur_lsn: cur_lsn,
segment_base: None,
segment_iter: Box::new(vec![(base_lsn, lid)].into_iter()),
segment_len: segment_len as usize,
use_compression: self.config.get_use_compression(),
trailer: None,
};
for (_lsn, lid, _buf) in iter {
empty_tip = false;
tip = lid;
assert!(tip <= segment_ceiling);
}
if !empty_tip {
// if we found any later
let mut f = cached_f.borrow_mut();
let (_, _, len) = f.read_message(
tip,
segment_len as usize,
self.config.get_use_compression(),
).unwrap()
.flush()
.unwrap();
tip += MSG_HEADER_LEN as LogID + len as LogID;
self.recovered_lid = tip;
}
let segment_overhang = self.recovered_lid %
self.config.get_io_buf_size() as LogID;
self.recovered_lsn = base_lsn + segment_overhang;
} else {
assert!(
self.ordering.is_empty(),
"should have found recovered lsn {} in ordering {:?}",
self.recovered_lsn,
self.ordering
);
}
// determine the end of our valid entries
for &lid in self.ordering.values() {
if lid >= self.tip {
let new_tip = lid + self.config.get_io_buf_size() as LogID;
self.tip = new_tip;
}
}
if empty_tip && max.is_some() {
let (_lsn, lid) = max.unwrap();
debug!("freed empty tip segment {} while recovering segments", lid);
self.free_segment(lid, true);
}
// make sure we don't double-allocate a segment
while self.free.lock().unwrap().contains(&self.tip) {
self.tip += self.config.get_io_buf_size() as LogID;
}
debug!(
"segment accountant recovered max lsn:{}, lid: {}",
self.recovered_lsn,
self.recovered_lid
);
}
fn free_segment(&mut self, lid: LogID, in_recovery: bool) {
debug!("freeing segment {}", lid);
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
assert!(
!self.free.lock().unwrap().contains(&lid),
"double-free of a segment occurred"
);
if in_recovery {
self.free.lock().unwrap().push_front(lid);
// We only want to immediately remove the segment
// mapping if we're in recovery because otherwise
// we may be acting on updates relating to things
// in IO buffers, before they have been flushed.
// The latter will be removed from the mapping
// before being reused, in the next() method.
if let Some(old_lsn) = self.segments[idx].lsn {
trace!(
"removing segment {} with lsn {} from ordering",
lid,
old_lsn
);
self.ordering.remove(&old_lsn);
}
} else {
self.ensure_safe_free_distance();
pin(|scope| {
let pd = Owned::new(SegmentDropper(lid, self.free.clone()));
let ptr = pd.into_ptr(scope);
unsafe {
scope.defer_drop(ptr);
scope.flush();
}
});
}
}
// This ensures that the last <# io buffers> segments on
// disk connect via their previous segment pointers in
// the header. This is important because we expect that
// the last <# io buffers> segments will join up, and we
// never reuse buffers within this safety range.
fn clean_tail_tears(&mut self, f: &mut File) {
let safety_buffer = self.config.get_io_bufs();
let logical_tail: Vec<(Lsn, LogID)> = self.ordering
.iter()
.rev()
.take(safety_buffer)
.map(|(lsn, lid)| (*lsn, *lid))
.collect();
let mut tear_at = None;
for (i, &(_lsn, lid)) in logical_tail.iter().enumerate() {
if i + 1 == logical_tail.len() {
// we've reached the end, nothing to check after
break;
}
// check link
let segment_header = f.read_segment_header(lid).unwrap();
if !segment_header.ok {
error!(
"read corrupted segment header during recovery of segment {}",
lid
);
tear_at = Some(i);
continue;
}
let expected_prev = segment_header.prev;
let actual_prev = logical_tail[i + 1].1;
if expected_prev != actual_prev {
// detected a tear, everything after
error!(
"detected corruption during recovery for segment at {}! \
expected prev lid: {} actual: {} in last chain {:?}",
lid,
expected_prev,
actual_prev,
logical_tail
);
tear_at = Some(i);
}
}
if let Some(i) = tear_at {
// we need to chop off the elements after the tear
for &(_lsn_to_chop, lid_to_chop) in &logical_tail[0..i] {
error!("clearing corrupted segment at lid {}", lid_to_chop);
self.free_segment(lid_to_chop, true);
// TODO write zeroes to these segments to reduce
// false recovery.
}
}
}
pub fn recovered_lid(&self) -> LogID {
self.recovered_lid
}
pub fn recovered_lsn(&self) -> Lsn {
self.recovered_lsn
}
/// Causes all new allocations to occur at the end of the file, which
/// is necessary to preserve consistency while concurrently iterating through
/// the log during snapshot creation.
pub fn pause_rewriting(&mut self) {
self.pause_rewriting = true;
}
/// Re-enables segment rewriting after iteration is complete.
pub fn resume_rewriting(&mut self) {
self.pause_rewriting = false;
}
/// Called by the `PageCache` when a page has been rewritten completely.
/// We mark all of the old segments that contained the previous state
/// from the page, and if the old segments are empty or clear enough to
/// begin accelerated cleaning we mark them as so.
pub fn mark_replace(
&mut self,
pid: PageID,
lsn: Lsn,
old_lids: Vec<LogID>,
new_lid: LogID,
) {
trace!("mark_replace pid {} at lid {} with lsn {}", pid, new_lid, lsn);
self.pending_clean.remove(&pid);
let new_idx = new_lid as usize / self.config.get_io_buf_size();
// make sure we're not actively trying to replace the destination
let new_segment_start = new_idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
for old_lid in old_lids {
let old_idx = self.lid_to_idx(old_lid);
let segment_start = (old_idx * self.config.get_io_buf_size()) as
LogID;
if new_idx == old_idx {
// we probably haven't flushed this segment yet, so don't
// mark the pid as being removed from it
continue;
}
if self.segments[old_idx].lsn() > lsn {
// has been replaced after this call already,
// quite a big race happened
// TODO think about how this happens with our segment delay
continue;
}
if self.segments[old_idx].state == Free {
// this segment is already reused
// TODO should this be a panic?
continue;
}
self.segments[old_idx].remove_pid(pid, lsn);
if self.segments[old_idx].can_free() {
// can be reused immediately
self.segments[old_idx].draining_to_free(lsn);
self.to_clean.remove(&segment_start);
trace!("freed segment {} in replace", segment_start);
self.free_segment(segment_start, false);
} else if self.segments[old_idx].is_inactive() &&
self.segments[old_idx].live_pct() <=
self.config.get_segment_cleanup_threshold()
{
// can be cleaned
trace!(
"SA inserting {} into to_clean from mark_replace",
segment_start
);
self.segments[old_idx].inactive_to_draining(lsn);
self.to_clean.insert(segment_start);
}
}
self.mark_link(pid, lsn, new_lid);
}
/// Called by the `PageCache` to find useful pages
/// it should try to rewrite.
pub fn clean(&mut self, ignore: Option<PageID>) -> Option<PageID> {
// try to maintain about twice the number of necessary
// on-deck segments, to reduce the amount of log growth.
if self.free.lock().unwrap().len() >=
self.config.get_min_free_segments() * 2
{
return None;
}
let to_clean = self.to_clean.clone();
for lid in to_clean {
let idx = self.lid_to_idx(lid);
let segment = &self.segments[idx];
assert_eq!(segment.state, Draining);
for pid in &segment.present {
if self.pending_clean.contains(pid) || ignore == Some(*pid) {
continue;
}
self.pending_clean.insert(*pid);
trace!(
"telling caller to clean {} from segment at {}",
pid,
lid,
);
return Some(*pid);
}
}
None
}
/// Called from `PageCache` when some state has been added
/// to a logical page at a particular offset. We ensure the
/// page is present in the segment's page set.
pub fn mark_link(&mut self, pid: PageID, lsn: Lsn, lid: LogID) {
trace!("mark_link pid {} at lid {}", pid, lid);
self.pending_clean.remove(&pid);
let idx = self.lid_to_idx(lid);
// make sure we're not actively trying to replace the destination
let new_segment_start = idx as LogID *
self.config.get_io_buf_size() as LogID;
self.to_clean.remove(&new_segment_start);
let segment = &mut self.segments[idx];
if segment.lsn() > lsn {
// a race happened, and our Lsn does not apply anymore
// TODO think about how this happens with segment delay
return;
}
let segment_lsn = lsn / self.config.get_io_buf_size() as Lsn *
self.config.get_io_buf_size() as Lsn;
segment.insert_pid(pid, segment_lsn);
}
/// Called after the trailer of a segment has been written to disk,
/// indicating that no more pids will be added to a segment. Moves
/// the segment into the Inactive state.
///
/// # Panics
/// The provided lsn and lid must exactly match the existing segment.
pub fn deactivate_segment(&mut self, lsn: Lsn, lid: LogID) {
let idx = self.lid_to_idx(lid);
self.segments[idx].active_to_inactive(lsn, false);
}
fn bump_tip(&mut self) -> LogID {
let lid = self.tip;
self.tip += self.config.get_io_buf_size() as LogID;
trace!("advancing file tip from {} to {}", lid, self.tip);
lid
}
fn ensure_safe_free_distance(&mut self) {
// NB we must maintain a queue of free segments that
// is at least as long as the number of io buffers.
// This is so that we will never give out a segment
// that has been placed on the free queue after its
// contained pages have all had updates added to an
// IO buffer during a PageCache replace, but whose
// replacing updates have not actually landed on disk
// yet. If updates always have to wait in a queue
// at least as long as the number of IO buffers, it
// guarantees that the old updates are actually safe
// somewhere else first. Note that we push_front here
// so that the log tip is used first.
while self.free.lock().unwrap().len() < self.config.get_io_bufs() {
let new_lid = self.bump_tip();
trace!(
"pushing segment {} to free from ensure_safe_free_distance",
new_lid
);
self.free.lock().unwrap().push_front(new_lid);
}
}
/// Returns the next offset to write a new segment in,
/// as well as the offset of the previous segment that
/// was allocated, so that we can detect missing
/// out-of-order segments during recovery.
pub fn next(&mut self, lsn: Lsn) -> (LogID, LogID) {
assert_eq!(
lsn % self.config.get_io_buf_size() as Lsn,
0,
"unaligned Lsn provided to next!"
);
// pop free or add to end
let lid = if self.pause_rewriting {
self.bump_tip()
} else {
let res = self.free.lock().unwrap().pop_front();
if res.is_none() {
self.bump_tip()
} else {
res.unwrap()
}
};
let last_given = self.last_given;
// pin lsn to this segment
let idx = self.lid_to_idx(lid);
assert_eq!(self.segments[idx].state, Free);
// remove the ordering from our list
if let Some(old_lsn) = self.segments[idx].lsn |
self.segments[idx].free_to_active(lsn);
self.ordering.insert(lsn, lid);
debug!(
"segment accountant returning offset: {} paused: {} last: {} on deck: {:?}",
lid,
self.pause_rewriting,
last_given,
self.free
);
self.last_given = lid;
if last_given != 0 {
assert_ne!(last_given, lid);
}
(lid, last_given)
}
/// Returns an iterator over a snapshot of current segment
/// log sequence numbers and their corresponding file offsets.
pub fn segment_snapshot_iter_from(
&mut self,
lsn: Lsn,
) -> Box<Iterator<Item = (Lsn, LogID)>> {
// assert!( self.pause_rewriting, "must pause rewriting before iterating over segments");
let segment_len = self.config.get_io_buf_size() as Lsn;
let normalized_lsn = lsn / segment_len * segment_len;
Box::new(self.ordering.clone().into_iter().filter(move |&(l, _)| {
l >= normalized_lsn
}))
}
fn lid_to_idx(&mut self, lid: LogID) -> usize {
let idx = lid as usize / self.config.get_io_buf_size();
if self.segments.len() < idx + 1 {
trace!(
"expanding self.segments to cover segment at {}",
(idx + 1) * self.config.get_io_buf_size()
);
self.segments.resize(idx + 1, Segment::default());
}
idx
}
}
| {
self.ordering.remove(&old_lsn);
} | conditional_block |
Detail.controller.js | /*global location */
sap.ui.define([
"ru/teamidea/odatapractice/WebShop/controller/BaseController",
"sap/ui/model/json/JSONModel",
"ru/teamidea/odatapractice/WebShop/model/formatter",
"sap/m/MessageBox",
"sap/m/MessageToast"
], function (BaseController, JSONModel, formatter, MessageBox, MessageToast) {
"use strict";
return BaseController.extend("ru.teamidea.odatapractice.WebShop.controller.Detail", {
formatter: formatter,
/* =========================================================== */
/* lifecycle methods */
/* =========================================================== */
onInit: function () {
// Model used to manipulate control states. The chosen values make sure,
// detail page is busy indication immediately so there is no break in
// between the busy indication for loading the view's meta data
var oViewModel = new JSONModel({
busy: false,
delay: 0
});
this.getRouter().getRoute("object").attachPatternMatched(this._onObjectMatched, this);
this.setModel(oViewModel, "detailView");
this.getOwnerComponent().getModel().metadataLoaded().then(this._onMetadataLoaded.bind(this));
this._oODataModel = this.getOwnerComponent().getModel();
this._oResourceBundle = this.getResourceBundle();
},
/* =========================================================== */
/* event handlers */
/* =========================================================== */
/* =========================================================== */
/* begin: internal methods */
/* =========================================================== */
/**
* Binds the view to the object path and expands the aggregated line items.
* @function
* @param {sap.ui.base.Event} oEvent pattern match event in route 'object'
* @private
*/
_onObjectMatched: function (oEvent) {
var oParameter = oEvent.getParameter("arguments");
for (var value in oParameter) {
oParameter[value] = decodeURIComponent(oParameter[value]);
}
this.getModel().metadataLoaded().then(function () {
var sObjectPath = this.getModel().createKey("Products", oParameter);
this._bindView("/" + sObjectPath);
}.bind(this));
},
/**
* Binds the view to the object path. Makes sure that detail view displays
* a busy indicator while data for the corresponding element binding is loaded.
* @function
* @param {string} sObjectPath path to the object to be bound to the view.
* @private
*/
_bindView: function (sObjectPath) {
// Set busy indicator during view binding
var oViewModel = this.getModel("detailView");
// If the view was not bound yet its not busy, only if the binding requests data it is set to busy again
oViewModel.setProperty("/busy", false);
this.getView().bindElement({
path: sObjectPath,
events: {
change: this._onBindingChange.bind(this),
dataRequested: function () {
oViewModel.setProperty("/busy", true);
},
dataReceived: function () {
oViewModel.setProperty("/busy", false);
}
}
});
},
onGoToShopCart: function () {},
onBtnAddToCart: function (oEvt) {
var sQuant = this.getView().byId("input2").getValue();
var sProdId;
if (Number(sQuant) > 0) | ll);
MessageToast.show( this.getView().getModel("i18n").getResourceBundle().getText("msgItemAdded"));
},
parseProductId: function (sProdId) {
return sProdId.split("(")[1].split(")")[0];
},
getProductFromCache: function (sId, sProdPath) {
var aOrders = this.getView().getModel("appView").getProperty("/tempOrder");
var aNeededOrders = aOrders.filter(function (oProd) {
return oProd.ProductId === sId;
});
if (aNeededOrders.length === 0) {
return {
"ProductId": sId,
"Name": this.getModel().getObject(sProdPath).Name,
"Quantity": 0,
"Id": Math.round(Math.random() * 1000000).toString()
};
} else {
return aNeededOrders[0];
}
},
onPressDelFromCart: function (oEvt) {
var sProdId = oEvt.getSource().getBindingContext().getPath();
var sId = this.parseProductId(sProdId);
var aOrders = this.getView().getModel("appView").getProperty("/tempOrder");
aOrders = aOrders.filter(function (oProd) { //убираем дубликат продукта
return oProd.ProductId != sId;
});
this.getView().getModel("appView").setProperty("/tempOrder", aOrders);
MessageToast.show( this.getView().getModel("i18n").getResourceBundle().getText("msgItemDeleted"));
},
/**
* Event handler for binding change event
* @function
* @private
*/
_onBindingChange: function () {
var oView = this.getView(),
oElementBinding = oView.getElementBinding(),
oViewModel = this.getModel("detailView"),
oAppViewModel = this.getModel("appView");
// No data for the binding
if (!oElementBinding.getBoundContext()) {
this.getRouter().getTargets().display("detailObjectNotFound");
// if object could not be found, the selection in the master list
// does not make sense anymore.
this.getOwnerComponent().oListSelector.clearMasterListSelection();
return;
}
var sPath = oElementBinding.getBoundContext().getPath(),
oResourceBundle = this.getResourceBundle(),
oObject = oView.getModel().getObject(sPath),
sObjectId = oObject.Id,
sObjectName = oObject.Name;
oViewModel.setProperty("/sObjectId", sObjectId);
oViewModel.setProperty("/sObjectPath", sPath);
oAppViewModel.setProperty("/itemToSelect", sPath);
this.getOwnerComponent().oListSelector.selectAListItem(sPath);
oViewModel.setProperty("/saveAsTileTitle", oResourceBundle.getText("shareSaveTileAppTitle", [sObjectName]));
oViewModel.setProperty("/shareOnJamTitle", sObjectName);
oViewModel.setProperty("/shareSendEmailSubject", oResourceBundle.getText("shareSendEmailObjectSubject", [sObjectId]));
oViewModel.setProperty("/shareSendEmailMessage", oResourceBundle.getText("shareSendEmailObjectMessage", [
sObjectName,
sObjectId,
location.href
]));
},
/**
* Event handler for metadata loaded event
* @function
* @private
*/
_onMetadataLoaded: function () {
// Store original busy indicator delay for the detail view
var iOriginalViewBusyDelay = this.getView().getBusyIndicatorDelay(),
oViewModel = this.getModel("detailView");
// Make sure busy indicator is displayed immediately when
// detail view is displayed for the first time
oViewModel.setProperty("/delay", 0);
// Binding the view will set it to not busy - so the view is always busy if it is not bound
oViewModel.setProperty("/busy", true);
// Restore original busy indicator delay for the detail view
oViewModel.setProperty("/delay", iOriginalViewBusyDelay);
},
/**
* Opens a dialog letting the user either confirm or cancel the deletion of a list of entities
* @param {object} oConfirmation - Possesses up to two attributes: question (obligatory) is a string providing the statement presented to the user.
* title (optional) may be a string defining the title of the popup.
* @param {object} oConfirmation - Possesses up to two attributes: question (obligatory) is a string providing the statement presented to the user.
* @param {array} aPaths - Array of strings representing the context paths to the entities to be deleted. Currently only one is supported.
* @param {callback} fnAfterDeleted (optional) - called after deletion is done.
* @param {callback} fnDeleteCanceled (optional) - called when the user decides not to perform the deletion
* @param {callback} fnDeleteConfirmed (optional) - called when the user decides to perform the deletion. A Promise will be passed
* @function
* @private
*/
/* eslint-disable */
// using more then 4 parameters for a function is justified here
_confirmDeletionByUser: function (oConfirmation, aPaths, fnAfterDeleted, fnDeleteCanceled, fnDeleteConfirmed) {
/* eslint-enable */
// Callback function for when the user decides to perform the deletion
var fnDelete = function () {
// Calls the oData Delete service
this._callDelete(aPaths, fnAfterDeleted);
}.bind(this);
// Opens the confirmation dialog
MessageBox.show(oConfirmation.question, {
icon: oConfirmation.icon || MessageBox.Icon.WARNING,
title: oConfirmation.title || this._oResourceBundle.getText("delete"),
actions: [
MessageBox.Action.OK,
MessageBox.Action.CANCEL
],
onClose: function (oAction) {
if (oAction === MessageBox.Action.OK) {
fnDelete();
} else if (fnDeleteCanceled) {
fnDeleteCanceled();
}
}
});
},
/**
* Performs the deletion of a list of entities.
* @param {array} aPaths - Array of strings representing the context paths to the entities to be deleted. Currently only one is supported.
* @param {callback} fnAfterDeleted (optional) - called after deletion is done.
* @return a Promise that will be resolved as soon as the deletion process ended successfully.
* @function
* @private
*/
_callDelete: function (aPaths, fnAfterDeleted) {
var oViewModel = this.getModel("detailView");
oViewModel.setProperty("/busy", true);
var fnFailed = function () {
this._oODataModel.setUseBatch(true);
}.bind(this);
var fnSuccess = function () {
if (fnAfterDeleted) {
fnAfterDeleted();
this._oODataModel.setUseBatch(true);
}
oViewModel.setProperty("/busy", false);
}.bind(this);
return this._deleteOneEntity(aPaths[0], fnSuccess, fnFailed);
},
/**
* Deletes the entity from the odata model
* @param {array} aPaths - Array of strings representing the context paths to the entities to be deleted. Currently only one is supported.
* @param {callback} fnSuccess - Event handler for success operation.
* @param {callback} fnFailed - Event handler for failure operation.
* @function
* @private
*/
_deleteOneEntity: function (sPath, fnSuccess, fnFailed) {
var oPromise = new Promise(function (fnResolve, fnReject) {
this._oODataModel.setUseBatch(false);
this._oODataModel.remove(sPath, {
success: fnResolve,
error: fnReject,
async: true
});
}.bind(this));
oPromise.then(fnSuccess, fnFailed);
return oPromise;
},
/**
*@memberOf ru.teamidea.odatapractice.WebShop.controller.Detail
*/
action: function (oEvent) {
var that = this;
var actionParameters = JSON.parse(oEvent.getSource().data("wiring").replace(/'/g, "\""));
var eventType = oEvent.getId();
var aTargets = actionParameters[eventType].targets || [];
aTargets.forEach(function (oTarget) {
var oControl = that.byId(oTarget.id);
if (oControl) {
var oParams = {};
for (var prop in oTarget.parameters) {
oParams[prop] = oEvent.getParameter(oTarget.parameters[prop]);
}
oControl[oTarget.action](oParams);
}
});
var oNavigation = actionParameters[eventType].navigation;
if (oNavigation) {
var oParams = {};
(oNavigation.keys || []).forEach(function (prop) {
oParams[prop.name] = encodeURIComponent(JSON.stringify({
value: oEvent.getSource().getBindingContext(oNavigation.model).getProperty(prop.name),
type: prop.type
}));
});
if (Object.getOwnPropertyNames(oParams).length !== 0) {
this.getOwnerComponent().getRouter().navTo(oNavigation.routeName, oParams);
} else {
this.getOwnerComponent().getRouter().navTo(oNavigation.routeName);
}
}
}
});
}); | {
sProdId = oEvt.getSource().getBindingContext().getPath();
var sId = this.parseProductId(sProdId);
var aOrders = this.getView().getModel("appView").getProperty("/tempOrder");
var oProduct = this.getProductFromCache(sId, sProdId);
oProduct.Quantity = Number(sQuant);
aOrders = aOrders.filter(function (oProd) { //убираем дубликат продукта
return oProd.ProductId != sId;
});
aOrders = aOrders.concat(oProduct); // добавляем с обновл кол-вом
this.getView().getModel("appView").setProperty("/tempOrder", aOrders);
MessageToast.show( this.getView().getModel("i18n").getResourceBundle().getText("msgItemAdded"));
}
this.getView().byId("input2").setValue(nu | conditional_block |
Detail.controller.js | /*global location */
sap.ui.define([
"ru/teamidea/odatapractice/WebShop/controller/BaseController",
"sap/ui/model/json/JSONModel",
"ru/teamidea/odatapractice/WebShop/model/formatter",
"sap/m/MessageBox",
"sap/m/MessageToast"
], function (BaseController, JSONModel, formatter, MessageBox, MessageToast) {
"use strict";
return BaseController.extend("ru.teamidea.odatapractice.WebShop.controller.Detail", {
formatter: formatter,
/* =========================================================== */
/* lifecycle methods */
/* =========================================================== */
onInit: function () {
// Model used to manipulate control states. The chosen values make sure,
// detail page is busy indication immediately so there is no break in
// between the busy indication for loading the view's meta data
var oViewModel = new JSONModel({
busy: false,
delay: 0
});
this.getRouter().getRoute("object").attachPatternMatched(this._onObjectMatched, this);
this.setModel(oViewModel, "detailView");
this.getOwnerComponent().getModel().metadataLoaded().then(this._onMetadataLoaded.bind(this));
this._oODataModel = this.getOwnerComponent().getModel();
this._oResourceBundle = this.getResourceBundle();
},
/* =========================================================== */
/* event handlers */
/* =========================================================== */
/* =========================================================== */
/* begin: internal methods */
/* =========================================================== */
/**
* Binds the view to the object path and expands the aggregated line items.
* @function
* @param {sap.ui.base.Event} oEvent pattern match event in route 'object'
* @private
*/
_onObjectMatched: function (oEvent) {
var oParameter = oEvent.getParameter("arguments");
for (var value in oParameter) {
oParameter[value] = decodeURIComponent(oParameter[value]);
}
this.getModel().metadataLoaded().then(function () {
var sObjectPath = this.getModel().createKey("Products", oParameter);
this._bindView("/" + sObjectPath);
}.bind(this));
},
/**
* Binds the view to the object path. Makes sure that detail view displays
* a busy indicator while data for the corresponding element binding is loaded.
* @function
* @param {string} sObjectPath path to the object to be bound to the view.
* @private
*/
_bindView: function (sObjectPath) {
// Set busy indicator during view binding
var oViewModel = this.getModel("detailView");
// If the view was not bound yet its not busy, only if the binding requests data it is set to busy again
oViewModel.setProperty("/busy", false);
this.getView().bindElement({
path: sObjectPath,
events: {
change: this._onBindingChange.bind(this),
dataRequested: function () {
oViewModel.setProperty("/busy", true);
},
dataReceived: function () {
oViewModel.setProperty("/busy", false);
}
}
});
},
onGoToShopCart: function () {},
onBtnAddToCart: function (oEvt) {
var sQuant = this.getView().byId("input2").getValue();
var sProdId;
if (Number(sQuant) > 0) {
sProdId = oEvt.getSource().getBindingContext().getPath();
var sId = this.parseProductId(sProdId);
var aOrders = this.getView().getModel("appView").getProperty("/tempOrder");
var oProduct = this.getProductFromCache(sId, sProdId);
oProduct.Quantity = Number(sQuant);
aOrders = aOrders.filter(function (oProd) { //убираем дубликат продукта
return oProd.ProductId != sId;
});
aOrders = aOrders.concat(oProduct); // добавляем с обновл кол-вом
this.getView().getModel("appView").setProperty("/tempOrder", aOrders);
MessageToast.show( this.getView().getModel("i18n").getResourceBundle().getText("msgItemAdded"));
}
this.getView().byId("input2").setValue(null);
MessageToast.show( this.getView().getModel("i18n").getResourceBundle().getText("msgItemAdded"));
},
parseProductId: function (sProdId) {
return sProdId.split("(")[1].split(")")[0];
},
getProductFromCache: function (sId, sProdPath) {
var aOrders = this.getView().getModel("appView").getProperty("/tempOrder");
var aNeededOrders = aOrders.filter(function (oProd) {
return oProd.ProductId === sId;
});
if (aNeededOrders.length === 0) {
return {
"ProductId": sId,
"Name": this.getModel().getObject(sProdPath).Name,
"Quantity": 0,
"Id": Math.round(Math.random() * 1000000).toString()
};
} else {
return aNeededOrders[0];
}
},
onPressDelFromCart: function (oEvt) {
var sProdId = oEvt.getSource().getBindingContext().getPath();
var sId = this.parseProductId(sProdId);
var aOrders = this.getView().getModel("appView").getProperty("/tempOrder");
aOrders = aOrders.filter(function (oProd) { //убираем дубликат продукта
return oProd.ProductId != sId;
});
this.getView().getModel("appView").setProperty("/tempOrder", aOrders);
MessageToast.show( this.getView().getModel("i18n").getResourceBundle().getText("msgItemDeleted"));
},
/**
* Event handler for binding change event
* @function
* @private
*/
_onBindingChange: function () {
var oView = this.getView(),
oElementBinding = oView.getElementBinding(),
oViewModel = this.getModel("detailView"),
oAppViewModel = this.getModel("appView");
// No data for the binding
if (!oElementBinding.getBoundContext()) {
this.getRouter().getTargets().display("detailObjectNotFound");
// if object could not be found, the selection in the master list
// does not make sense anymore.
this.getOwnerComponent().oListSelector.clearMasterListSelection();
return;
}
var sPath = oElementBinding.getBoundContext().getPath(),
oResourceBundle = this.getResourceBundle(),
oObject = oView.getModel().getObject(sPath),
sObjectId = oObject.Id,
sObjectName = oObject.Name;
oViewModel.setProperty("/sObjectId", sObjectId);
oViewModel.setProperty("/sObjectPath", sPath);
oAppViewModel.setProperty("/itemToSelect", sPath);
this.getOwnerComponent().oListSelector.selectAListItem(sPath);
oViewModel.setProperty("/saveAsTileTitle", oResourceBundle.getText("shareSaveTileAppTitle", [sObjectName]));
oViewModel.setProperty("/shareOnJamTitle", sObjectName);
oViewModel.setProperty("/shareSendEmailSubject", oResourceBundle.getText("shareSendEmailObjectSubject", [sObjectId]));
oViewModel.setProperty("/shareSendEmailMessage", oResourceBundle.getText("shareSendEmailObjectMessage", [
sObjectName,
sObjectId,
location.href
]));
},
/**
* Event handler for metadata loaded event
* @function
* @private
*/
_onMetadataLoaded: function () {
// Store original busy indicator delay for the detail view | oViewModel = this.getModel("detailView");
// Make sure busy indicator is displayed immediately when
// detail view is displayed for the first time
oViewModel.setProperty("/delay", 0);
// Binding the view will set it to not busy - so the view is always busy if it is not bound
oViewModel.setProperty("/busy", true);
// Restore original busy indicator delay for the detail view
oViewModel.setProperty("/delay", iOriginalViewBusyDelay);
},
/**
* Opens a dialog letting the user either confirm or cancel the deletion of a list of entities
* @param {object} oConfirmation - Possesses up to two attributes: question (obligatory) is a string providing the statement presented to the user.
* title (optional) may be a string defining the title of the popup.
* @param {object} oConfirmation - Possesses up to two attributes: question (obligatory) is a string providing the statement presented to the user.
* @param {array} aPaths - Array of strings representing the context paths to the entities to be deleted. Currently only one is supported.
* @param {callback} fnAfterDeleted (optional) - called after deletion is done.
* @param {callback} fnDeleteCanceled (optional) - called when the user decides not to perform the deletion
* @param {callback} fnDeleteConfirmed (optional) - called when the user decides to perform the deletion. A Promise will be passed
* @function
* @private
*/
/* eslint-disable */
// using more then 4 parameters for a function is justified here
_confirmDeletionByUser: function (oConfirmation, aPaths, fnAfterDeleted, fnDeleteCanceled, fnDeleteConfirmed) {
/* eslint-enable */
// Callback function for when the user decides to perform the deletion
var fnDelete = function () {
// Calls the oData Delete service
this._callDelete(aPaths, fnAfterDeleted);
}.bind(this);
// Opens the confirmation dialog
MessageBox.show(oConfirmation.question, {
icon: oConfirmation.icon || MessageBox.Icon.WARNING,
title: oConfirmation.title || this._oResourceBundle.getText("delete"),
actions: [
MessageBox.Action.OK,
MessageBox.Action.CANCEL
],
onClose: function (oAction) {
if (oAction === MessageBox.Action.OK) {
fnDelete();
} else if (fnDeleteCanceled) {
fnDeleteCanceled();
}
}
});
},
/**
* Performs the deletion of a list of entities.
* @param {array} aPaths - Array of strings representing the context paths to the entities to be deleted. Currently only one is supported.
* @param {callback} fnAfterDeleted (optional) - called after deletion is done.
* @return a Promise that will be resolved as soon as the deletion process ended successfully.
* @function
* @private
*/
_callDelete: function (aPaths, fnAfterDeleted) {
var oViewModel = this.getModel("detailView");
oViewModel.setProperty("/busy", true);
var fnFailed = function () {
this._oODataModel.setUseBatch(true);
}.bind(this);
var fnSuccess = function () {
if (fnAfterDeleted) {
fnAfterDeleted();
this._oODataModel.setUseBatch(true);
}
oViewModel.setProperty("/busy", false);
}.bind(this);
return this._deleteOneEntity(aPaths[0], fnSuccess, fnFailed);
},
/**
* Deletes the entity from the odata model
* @param {array} aPaths - Array of strings representing the context paths to the entities to be deleted. Currently only one is supported.
* @param {callback} fnSuccess - Event handler for success operation.
* @param {callback} fnFailed - Event handler for failure operation.
* @function
* @private
*/
_deleteOneEntity: function (sPath, fnSuccess, fnFailed) {
var oPromise = new Promise(function (fnResolve, fnReject) {
this._oODataModel.setUseBatch(false);
this._oODataModel.remove(sPath, {
success: fnResolve,
error: fnReject,
async: true
});
}.bind(this));
oPromise.then(fnSuccess, fnFailed);
return oPromise;
},
/**
*@memberOf ru.teamidea.odatapractice.WebShop.controller.Detail
*/
action: function (oEvent) {
var that = this;
var actionParameters = JSON.parse(oEvent.getSource().data("wiring").replace(/'/g, "\""));
var eventType = oEvent.getId();
var aTargets = actionParameters[eventType].targets || [];
aTargets.forEach(function (oTarget) {
var oControl = that.byId(oTarget.id);
if (oControl) {
var oParams = {};
for (var prop in oTarget.parameters) {
oParams[prop] = oEvent.getParameter(oTarget.parameters[prop]);
}
oControl[oTarget.action](oParams);
}
});
var oNavigation = actionParameters[eventType].navigation;
if (oNavigation) {
var oParams = {};
(oNavigation.keys || []).forEach(function (prop) {
oParams[prop.name] = encodeURIComponent(JSON.stringify({
value: oEvent.getSource().getBindingContext(oNavigation.model).getProperty(prop.name),
type: prop.type
}));
});
if (Object.getOwnPropertyNames(oParams).length !== 0) {
this.getOwnerComponent().getRouter().navTo(oNavigation.routeName, oParams);
} else {
this.getOwnerComponent().getRouter().navTo(oNavigation.routeName);
}
}
}
});
}); | var iOriginalViewBusyDelay = this.getView().getBusyIndicatorDelay(), | random_line_split |
Map.js | import React, {
// useState,4
Component,
PureComponent,
} from "react";
import { compose } from "recompose";
import _ from "lodash";
import {
Polyline,
GoogleMap,
withGoogleMap,
withScriptjs,
Marker,
InfoWindow,
DirectionsRenderer,
TrafficLayer,
// DirectionsService
} from "react-google-maps";
import markerimg from "../components/marker.svg";
import HomeIcon from "../components/Homeicon.svg";
import { ORDER_DELIVERED } from "./Constants/Order/Constants";
const {
SearchBox,
} = require("react-google-maps/lib/components/places/SearchBox");
class Map extends PureComponent {
constructor(props) {
super(props);
this.origin = null;
this.firstOrigin = null;
this.markerPositions = [];
this.markerCounts = 0;
this.orignmarker = null;
this.destmarker = null;
this.destination = null;
this.Mode = null;
this.google = window.google;
this.polylinescoords = [];
this.state = {
selectedOrder: null,
directions: [],
routes: null,
markers: { url: markerimg },
vehiclesdata: null,
vehiclesdesc: null,
loading: true,
wayPoints: null,
linescoords: [],
ordercaltime: [],
};
}
createOrderObject = (originorder, destinationorder) => {
let originaddress = {
lat: parseFloat(originorder.address.latitude),
lng: parseFloat(originorder.address.longitude),
};
let destinationaddress = {
lat: parseFloat(destinationorder.address.latitude),
lng: parseFloat(destinationorder.address.longitude),
};
let orderpoints = {
origin: new this.google.maps.LatLng(originaddress.lat, originaddress.lng),
destination: new this.google.maps.LatLng(
destinationaddress.lat,
destinationaddress.lng
),
order_id: destinationorder.order_id,
};
return orderpoints;
};
getCalcultedTimeOfWaypoints = (routelist, store_address) => {
// routelist=routelist.map(({order})=> )
let update_store_address = { address: store_address };
let allorders = routelist.map(({ order }) => order);
allorders = [update_store_address, ...allorders];
let count = 0;
for (let i = 0; i < allorders.length; i++) {
let originorder = allorders[i];
if (typeof allorders[i + 1] !== "undefined") {
let destinationorder = allorders[i + 1];
count++;
let orderpoints = this.createOrderObject(originorder, destinationorder);
this.getDistanceBetweenPoints(orderpoints);
}
}
};
getDistanceBetweenPoints = (orderpoints) => {
const service = new this.google.maps.DistanceMatrixService();
service.getDistanceMatrix(
{
origins: [orderpoints.origin],
destinations: [orderpoints.destination],
// waypoints: wayPoints,
// optimizeWaypoints: true,
avoidHighways: false,
travelMode: this.google.maps.TravelMode.DRIVING,
},
(result, status) => {
let general = result.rows[0].elements[0];
let distance_in_km = general.distance.text;
let time_required = general.duration.text;
let finalobject = {
order_id: orderpoints.order_id,
distance: distance_in_km,
time: time_required,
};
let ordertime = [...this.state.ordercaltime];
ordertime.push(finalobject);
this.setState({ ordercaltime: [...ordertime] });
}
);
};
setDirections(wayPoints) {
const DirectionsService = new this.google.maps.DirectionsService();
DirectionsService.route(
{
origin: this.origin,
destination: this.destination,
provideRouteAlternatives: false,
waypoints: wayPoints,
// optimizeWaypoints: true,
avoidHighways: false,
travelMode: this.google.maps.TravelMode.DRIVING,
},
(result, status) => {
// console.log("Check Direction Services", result);
if (status === this.google.maps.DirectionsStatus.OK) {
const overViewCoords = result.routes[0].overview_path;
this.setState({
directions: [...this.state.directions, result],
linescoords:
typeof overViewCoords !== "undefined"
? [...this.state.linescoords, overViewCoords]
: null,
});
} else {
// console.error(`error fetching directions ${result}`);
}
}
);
} | lng: null,
};
if (store_address) {
origin.lat = parseFloat(store_address.latitude);
origin.lng = parseFloat(store_address.longitude);
}
let pointlength = 0;
if (type === "multiple") {
let firstpoint = point[0];
if (this.markerPositions.length > 0) {
this.markerPositions.push(point);
} else {
this.markerPositions.push(point);
}
if (this.destination) {
pointlength = point.length - 2;
pointlength = point.length;
} else {
pointlength = point.length - 1;
}
if (this.origin) {
this.origin = this.destination;
} else {
this.origin = new this.google.maps.LatLng(origin.lat, origin.lng);
this.orignmarker = this.origin;
}
} else {
this.markerPositions.push(point);
pointlength = point.length - 2;
this.origin = new this.google.maps.LatLng(origin.lat, origin.lng);
this.orignmarker = this.origin;
}
let dest = { lat: null, lng: null };
dest.lat = point[point.length - 1].order.address.latitude;
dest.lng = point[point.length - 1].order.address.longitude;
this.destination = new this.google.maps.LatLng(dest.lat, dest.lng);
this.destmarker = this.dest;
let wayPoints = [];
const filteredpoints = point.filter(({ order }, key) => {
return order.address.latitude !== null;
});
filteredpoints.map(({ order }, key) =>
wayPoints.push({
// location: new this.google.maps.LatLng(data.lat, data.lng),
location: {
lat: parseFloat(order.address.latitude),
lng: parseFloat(order.address.longitude),
},
stopover: false,
})
);
return wayPoints;
}
static getDerivedStateFromProps(props, state) {
if (props.routelist !== null) {
if (props.routelist.deliveries.length > 0) {
if (state.ordercaltime.length > 0) {
if (state.ordercaltime.length === props.routelist.deliveries.length) {
console.log(state.ordercaltime.length);
}
}
}
} else {
return {
directions: [],
markerPositions: [],
routes: null,
};
}
return state;
}
sendOrderRouteDistanceAndTime = (orderlist) => {
console.log("send");
console.log([...orderlist]);
console.log(JSON.stringify([...orderlist]));
};
componentDidUpdate(prevProps, prevState) {
if (this.props.routelist !== prevProps.routelist) {
// await this.getCalcultedTimeOfWaypoints(this.props.routelist.deliveries, this.props.routelist.store_address);
// // prevProps
if (this.props.routelist && this.props.routelist.deliveries.length > 0) {
this.setState({
routes: this.props.routelist.deliveries,
ordercaltime: [],
});
let wayPoints = [];
let routelist;
// let wayPointsmax = 25;
routelist = this.props.routelist.deliveries;
let store_address = this.props.routelist.store_address;
// this.getCalcultedTimeOfWaypoints(
// this.props.routelist.deliveries,
// this.props.routelist.store_address
// );
let chunkarray = _.chunk(routelist, 24);
for (let i = 0; i < chunkarray.length; i++) {
if (i !== 0) {
let firstchunkitem = chunkarray[i][0];
chunkarray[i - 1].push(firstchunkitem);
}
}
this.polylinescoords = [];
this.origin = null;
this.destination = null;
this.markerPositions = [];
if (this.state.directions.length > 0) {
this.setState({
directions: [],
linescoords: [],
});
}
if (chunkarray.length > 1) {
for (let i = 0; i < chunkarray.length; i++) {
let lat = chunkarray[0][0].latitude;
let lng = chunkarray[0][0].longitude;
this.firstOrigin = {
lat: lat,
lng: lng,
};
let filteredchunk = chunkarray[i].filter(
({ order }) => order.latitude !== 0 || order.langitude !== null
);
if (filteredchunk.length > 0) {
let type = "multiple";
wayPoints = this.makewayPoints(
filteredchunk,
type,
store_address
);
this.setDirections(wayPoints);
}
}
} else {
let filteredchunk = chunkarray[0].filter((order) => order.lat !== 0);
wayPoints = this.makewayPoints(
filteredchunk,
routelist,
store_address
);
this.setDirections(wayPoints);
}
// this.sendOrderRouteDistanceAndTime(this.state.ordercaltime);
}
}
}
// shouldComponentUpdate(nextProps, nextState) {
// // if (nextProps.routelist !== this.props.routelist) {
// // return true;
// // } else {
// // return false;
// // }
// }
showDirectionRendrer = () => {
const alternatingColor = ["#FFFF00", "#0000fd"];
const strokeColor = ["#ff9900", "#6a0dad"];
return this.state.directions.map((direcrray, key) => (
<DirectionsRenderer
key={key}
places={this.state.markers}
directions={direcrray}
options={{
polylineOptions: {
icons: [
{
color: "#00ff00",
icon: {
// strokeColor:strokeColor[key],
path: this.google.maps.SymbolPath.FORWARD_CLOSED_ARROW,
},
offset: "100%",
scaledSize: new this.google.maps.Size(2, 2),
repeat: "100px",
},
],
// strokeColor:strokeColor[key],
// strokeColor: '#0000fd',
// strokeOpacity: 0.9,
// strokeWeight: `${key + 2}`,
strokeWeight: 3.5,
},
suppressMarkers: true,
markerOptions: {},
}}
></DirectionsRenderer>
));
};
renderMarkers = () => {
const markerColors = { green: "#008000", red: "#FF0000" };
return (
this.markerPositions &&
this.state.directions &&
[].concat.apply([], this.markerPositions).map(({ order }, i) => (
<Marker
animation={`BOUNCE`}
key={i}
label={{
// color: this.BLACK,
fontWeight: "bold",
text: `${i + 1}`,
}}
position={
new this.google.maps.LatLng(
order.address.latitude,
order.address.longitude
)
}
onClick={(e) => {
this.setState({
selectedOrder: order,
});
}}
// icon={{
// url: `http://maps.google.com/mapfiles/ms/icons/green.png`,
// color:
// order.order_status === ORDER_DELIVERED
// ? markerColors.green
// : markerColors.red,
// }}
></Marker>
))
);
};
renderInfoWindow = () => {
let order = null;
if (this.state.selectedOrder) {
order = this.state.selectedOrder;
}
return order ? (
<InfoWindow
key={order.order_id + order.order_number}
position={
new this.google.maps.LatLng(
order.address.latitude,
order.address.longitude
)
}
onCloseClick={() => {
this.setState({ selectedOrder: null });
}}
>
<div
style={{
fontSize: "11px",
background: `white`,
borderRadius: null,
padding: 15,
}}
id={order.order_id + order.order_number}
key={order.order_id + order.order_number}
>
<h5>Order Id: {order.order_id}</h5>
{order.items.map((item, key) => (
<div key={key}>
<strong>Products Name:</strong>
{item.product_name.en}
<br />
<strong>Product Quantitity:</strong> {item.quantity}
</div>
))}
<div>
<strong>Delivery Resource Name:</strong> {order.customer.name}
<br />
<strong>Phone Number:</strong> {order.customer.phone}
<br />
<strong>Coordinates:</strong>{" "}
{`${order.address.latitude} , ${order.address.longitude}`}
<br />
<strong>Location:</strong> {order.address.address_detail}
</div>
</div>
</InfoWindow>
) : null;
};
renderOriginMarker = () => {
return (
<Marker
position={this.orignmarker}
icon={{
url: HomeIcon,
// color: '#ff0000',
scaledSize: new this.google.maps.Size(25, 25),
offset: "100%",
}}
></Marker>
);
};
renderPolyLines = () => {
return (
<Polyline
path={this.state.linescoords ? this.state.linescoords[0] : []}
geodesic={true}
options={{
icons: [
{
color: "#00ff00",
icon: {
path: this.google.maps.SymbolPath.FORWARD_CLOSED_ARROW,
},
offset: "100%",
scaledSize: new this.google.maps.Size(2, 2),
repeat: "100px",
},
],
clickable: true,
strokeColor: "#FF5733",
strokeOpacity: 1,
strokeWeight: 1,
}}
/>
);
};
render() {
return (
<React.Fragment>
{" "}
<GoogleMap
style={{
position: "absolute",
top: "0",
left: "0",
right: "0",
bottom: "0",
}}
key={this.state.routes ? this.state.routes : null}
defaultZoom={11}
mapContainerStyle={{
height: "100vh",
width: "100%",
}}
defaultCenter={
this.state.routes
? new this.google.maps.LatLng(
this.state.routes[0].order.address.latitude,
this.state.routes[0].order.address.longitude
)
: new this.google.maps.LatLng(23.8859, 45.0792)
}
>
{/*this.renderGoogleMap()*/}
{this.renderInfoWindow()}
{this.renderMarkers()}
{/*this.renderPolyLines*/}
{this.showDirectionRendrer()}
{this.renderOriginMarker()}
<TrafficLayer autoUpdate />
</GoogleMap>
</React.Fragment>
);
}
}
export default compose(withScriptjs, withGoogleMap)(Map); |
makewayPoints(point, type, store_address) {
let origin = {
lat: null, | random_line_split |
Map.js | import React, {
// useState,4
Component,
PureComponent,
} from "react";
import { compose } from "recompose";
import _ from "lodash";
import {
Polyline,
GoogleMap,
withGoogleMap,
withScriptjs,
Marker,
InfoWindow,
DirectionsRenderer,
TrafficLayer,
// DirectionsService
} from "react-google-maps";
import markerimg from "../components/marker.svg";
import HomeIcon from "../components/Homeicon.svg";
import { ORDER_DELIVERED } from "./Constants/Order/Constants";
const {
SearchBox,
} = require("react-google-maps/lib/components/places/SearchBox");
class | extends PureComponent {
constructor(props) {
super(props);
this.origin = null;
this.firstOrigin = null;
this.markerPositions = [];
this.markerCounts = 0;
this.orignmarker = null;
this.destmarker = null;
this.destination = null;
this.Mode = null;
this.google = window.google;
this.polylinescoords = [];
this.state = {
selectedOrder: null,
directions: [],
routes: null,
markers: { url: markerimg },
vehiclesdata: null,
vehiclesdesc: null,
loading: true,
wayPoints: null,
linescoords: [],
ordercaltime: [],
};
}
createOrderObject = (originorder, destinationorder) => {
let originaddress = {
lat: parseFloat(originorder.address.latitude),
lng: parseFloat(originorder.address.longitude),
};
let destinationaddress = {
lat: parseFloat(destinationorder.address.latitude),
lng: parseFloat(destinationorder.address.longitude),
};
let orderpoints = {
origin: new this.google.maps.LatLng(originaddress.lat, originaddress.lng),
destination: new this.google.maps.LatLng(
destinationaddress.lat,
destinationaddress.lng
),
order_id: destinationorder.order_id,
};
return orderpoints;
};
getCalcultedTimeOfWaypoints = (routelist, store_address) => {
// routelist=routelist.map(({order})=> )
let update_store_address = { address: store_address };
let allorders = routelist.map(({ order }) => order);
allorders = [update_store_address, ...allorders];
let count = 0;
for (let i = 0; i < allorders.length; i++) {
let originorder = allorders[i];
if (typeof allorders[i + 1] !== "undefined") {
let destinationorder = allorders[i + 1];
count++;
let orderpoints = this.createOrderObject(originorder, destinationorder);
this.getDistanceBetweenPoints(orderpoints);
}
}
};
getDistanceBetweenPoints = (orderpoints) => {
const service = new this.google.maps.DistanceMatrixService();
service.getDistanceMatrix(
{
origins: [orderpoints.origin],
destinations: [orderpoints.destination],
// waypoints: wayPoints,
// optimizeWaypoints: true,
avoidHighways: false,
travelMode: this.google.maps.TravelMode.DRIVING,
},
(result, status) => {
let general = result.rows[0].elements[0];
let distance_in_km = general.distance.text;
let time_required = general.duration.text;
let finalobject = {
order_id: orderpoints.order_id,
distance: distance_in_km,
time: time_required,
};
let ordertime = [...this.state.ordercaltime];
ordertime.push(finalobject);
this.setState({ ordercaltime: [...ordertime] });
}
);
};
setDirections(wayPoints) {
const DirectionsService = new this.google.maps.DirectionsService();
DirectionsService.route(
{
origin: this.origin,
destination: this.destination,
provideRouteAlternatives: false,
waypoints: wayPoints,
// optimizeWaypoints: true,
avoidHighways: false,
travelMode: this.google.maps.TravelMode.DRIVING,
},
(result, status) => {
// console.log("Check Direction Services", result);
if (status === this.google.maps.DirectionsStatus.OK) {
const overViewCoords = result.routes[0].overview_path;
this.setState({
directions: [...this.state.directions, result],
linescoords:
typeof overViewCoords !== "undefined"
? [...this.state.linescoords, overViewCoords]
: null,
});
} else {
// console.error(`error fetching directions ${result}`);
}
}
);
}
makewayPoints(point, type, store_address) {
let origin = {
lat: null,
lng: null,
};
if (store_address) {
origin.lat = parseFloat(store_address.latitude);
origin.lng = parseFloat(store_address.longitude);
}
let pointlength = 0;
if (type === "multiple") {
let firstpoint = point[0];
if (this.markerPositions.length > 0) {
this.markerPositions.push(point);
} else {
this.markerPositions.push(point);
}
if (this.destination) {
pointlength = point.length - 2;
pointlength = point.length;
} else {
pointlength = point.length - 1;
}
if (this.origin) {
this.origin = this.destination;
} else {
this.origin = new this.google.maps.LatLng(origin.lat, origin.lng);
this.orignmarker = this.origin;
}
} else {
this.markerPositions.push(point);
pointlength = point.length - 2;
this.origin = new this.google.maps.LatLng(origin.lat, origin.lng);
this.orignmarker = this.origin;
}
let dest = { lat: null, lng: null };
dest.lat = point[point.length - 1].order.address.latitude;
dest.lng = point[point.length - 1].order.address.longitude;
this.destination = new this.google.maps.LatLng(dest.lat, dest.lng);
this.destmarker = this.dest;
let wayPoints = [];
const filteredpoints = point.filter(({ order }, key) => {
return order.address.latitude !== null;
});
filteredpoints.map(({ order }, key) =>
wayPoints.push({
// location: new this.google.maps.LatLng(data.lat, data.lng),
location: {
lat: parseFloat(order.address.latitude),
lng: parseFloat(order.address.longitude),
},
stopover: false,
})
);
return wayPoints;
}
static getDerivedStateFromProps(props, state) {
if (props.routelist !== null) {
if (props.routelist.deliveries.length > 0) {
if (state.ordercaltime.length > 0) {
if (state.ordercaltime.length === props.routelist.deliveries.length) {
console.log(state.ordercaltime.length);
}
}
}
} else {
return {
directions: [],
markerPositions: [],
routes: null,
};
}
return state;
}
sendOrderRouteDistanceAndTime = (orderlist) => {
console.log("send");
console.log([...orderlist]);
console.log(JSON.stringify([...orderlist]));
};
componentDidUpdate(prevProps, prevState) {
if (this.props.routelist !== prevProps.routelist) {
// await this.getCalcultedTimeOfWaypoints(this.props.routelist.deliveries, this.props.routelist.store_address);
// // prevProps
if (this.props.routelist && this.props.routelist.deliveries.length > 0) {
this.setState({
routes: this.props.routelist.deliveries,
ordercaltime: [],
});
let wayPoints = [];
let routelist;
// let wayPointsmax = 25;
routelist = this.props.routelist.deliveries;
let store_address = this.props.routelist.store_address;
// this.getCalcultedTimeOfWaypoints(
// this.props.routelist.deliveries,
// this.props.routelist.store_address
// );
let chunkarray = _.chunk(routelist, 24);
for (let i = 0; i < chunkarray.length; i++) {
if (i !== 0) {
let firstchunkitem = chunkarray[i][0];
chunkarray[i - 1].push(firstchunkitem);
}
}
this.polylinescoords = [];
this.origin = null;
this.destination = null;
this.markerPositions = [];
if (this.state.directions.length > 0) {
this.setState({
directions: [],
linescoords: [],
});
}
if (chunkarray.length > 1) {
for (let i = 0; i < chunkarray.length; i++) {
let lat = chunkarray[0][0].latitude;
let lng = chunkarray[0][0].longitude;
this.firstOrigin = {
lat: lat,
lng: lng,
};
let filteredchunk = chunkarray[i].filter(
({ order }) => order.latitude !== 0 || order.langitude !== null
);
if (filteredchunk.length > 0) {
let type = "multiple";
wayPoints = this.makewayPoints(
filteredchunk,
type,
store_address
);
this.setDirections(wayPoints);
}
}
} else {
let filteredchunk = chunkarray[0].filter((order) => order.lat !== 0);
wayPoints = this.makewayPoints(
filteredchunk,
routelist,
store_address
);
this.setDirections(wayPoints);
}
// this.sendOrderRouteDistanceAndTime(this.state.ordercaltime);
}
}
}
// shouldComponentUpdate(nextProps, nextState) {
// // if (nextProps.routelist !== this.props.routelist) {
// // return true;
// // } else {
// // return false;
// // }
// }
showDirectionRendrer = () => {
const alternatingColor = ["#FFFF00", "#0000fd"];
const strokeColor = ["#ff9900", "#6a0dad"];
return this.state.directions.map((direcrray, key) => (
<DirectionsRenderer
key={key}
places={this.state.markers}
directions={direcrray}
options={{
polylineOptions: {
icons: [
{
color: "#00ff00",
icon: {
// strokeColor:strokeColor[key],
path: this.google.maps.SymbolPath.FORWARD_CLOSED_ARROW,
},
offset: "100%",
scaledSize: new this.google.maps.Size(2, 2),
repeat: "100px",
},
],
// strokeColor:strokeColor[key],
// strokeColor: '#0000fd',
// strokeOpacity: 0.9,
// strokeWeight: `${key + 2}`,
strokeWeight: 3.5,
},
suppressMarkers: true,
markerOptions: {},
}}
></DirectionsRenderer>
));
};
renderMarkers = () => {
const markerColors = { green: "#008000", red: "#FF0000" };
return (
this.markerPositions &&
this.state.directions &&
[].concat.apply([], this.markerPositions).map(({ order }, i) => (
<Marker
animation={`BOUNCE`}
key={i}
label={{
// color: this.BLACK,
fontWeight: "bold",
text: `${i + 1}`,
}}
position={
new this.google.maps.LatLng(
order.address.latitude,
order.address.longitude
)
}
onClick={(e) => {
this.setState({
selectedOrder: order,
});
}}
// icon={{
// url: `http://maps.google.com/mapfiles/ms/icons/green.png`,
// color:
// order.order_status === ORDER_DELIVERED
// ? markerColors.green
// : markerColors.red,
// }}
></Marker>
))
);
};
renderInfoWindow = () => {
let order = null;
if (this.state.selectedOrder) {
order = this.state.selectedOrder;
}
return order ? (
<InfoWindow
key={order.order_id + order.order_number}
position={
new this.google.maps.LatLng(
order.address.latitude,
order.address.longitude
)
}
onCloseClick={() => {
this.setState({ selectedOrder: null });
}}
>
<div
style={{
fontSize: "11px",
background: `white`,
borderRadius: null,
padding: 15,
}}
id={order.order_id + order.order_number}
key={order.order_id + order.order_number}
>
<h5>Order Id: {order.order_id}</h5>
{order.items.map((item, key) => (
<div key={key}>
<strong>Products Name:</strong>
{item.product_name.en}
<br />
<strong>Product Quantitity:</strong> {item.quantity}
</div>
))}
<div>
<strong>Delivery Resource Name:</strong> {order.customer.name}
<br />
<strong>Phone Number:</strong> {order.customer.phone}
<br />
<strong>Coordinates:</strong>{" "}
{`${order.address.latitude} , ${order.address.longitude}`}
<br />
<strong>Location:</strong> {order.address.address_detail}
</div>
</div>
</InfoWindow>
) : null;
};
renderOriginMarker = () => {
return (
<Marker
position={this.orignmarker}
icon={{
url: HomeIcon,
// color: '#ff0000',
scaledSize: new this.google.maps.Size(25, 25),
offset: "100%",
}}
></Marker>
);
};
renderPolyLines = () => {
return (
<Polyline
path={this.state.linescoords ? this.state.linescoords[0] : []}
geodesic={true}
options={{
icons: [
{
color: "#00ff00",
icon: {
path: this.google.maps.SymbolPath.FORWARD_CLOSED_ARROW,
},
offset: "100%",
scaledSize: new this.google.maps.Size(2, 2),
repeat: "100px",
},
],
clickable: true,
strokeColor: "#FF5733",
strokeOpacity: 1,
strokeWeight: 1,
}}
/>
);
};
render() {
return (
<React.Fragment>
{" "}
<GoogleMap
style={{
position: "absolute",
top: "0",
left: "0",
right: "0",
bottom: "0",
}}
key={this.state.routes ? this.state.routes : null}
defaultZoom={11}
mapContainerStyle={{
height: "100vh",
width: "100%",
}}
defaultCenter={
this.state.routes
? new this.google.maps.LatLng(
this.state.routes[0].order.address.latitude,
this.state.routes[0].order.address.longitude
)
: new this.google.maps.LatLng(23.8859, 45.0792)
}
>
{/*this.renderGoogleMap()*/}
{this.renderInfoWindow()}
{this.renderMarkers()}
{/*this.renderPolyLines*/}
{this.showDirectionRendrer()}
{this.renderOriginMarker()}
<TrafficLayer autoUpdate />
</GoogleMap>
</React.Fragment>
);
}
}
export default compose(withScriptjs, withGoogleMap)(Map);
| Map | identifier_name |
Map.js | import React, {
// useState,4
Component,
PureComponent,
} from "react";
import { compose } from "recompose";
import _ from "lodash";
import {
Polyline,
GoogleMap,
withGoogleMap,
withScriptjs,
Marker,
InfoWindow,
DirectionsRenderer,
TrafficLayer,
// DirectionsService
} from "react-google-maps";
import markerimg from "../components/marker.svg";
import HomeIcon from "../components/Homeicon.svg";
import { ORDER_DELIVERED } from "./Constants/Order/Constants";
const {
SearchBox,
} = require("react-google-maps/lib/components/places/SearchBox");
class Map extends PureComponent {
constructor(props) {
super(props);
this.origin = null;
this.firstOrigin = null;
this.markerPositions = [];
this.markerCounts = 0;
this.orignmarker = null;
this.destmarker = null;
this.destination = null;
this.Mode = null;
this.google = window.google;
this.polylinescoords = [];
this.state = {
selectedOrder: null,
directions: [],
routes: null,
markers: { url: markerimg },
vehiclesdata: null,
vehiclesdesc: null,
loading: true,
wayPoints: null,
linescoords: [],
ordercaltime: [],
};
}
createOrderObject = (originorder, destinationorder) => {
let originaddress = {
lat: parseFloat(originorder.address.latitude),
lng: parseFloat(originorder.address.longitude),
};
let destinationaddress = {
lat: parseFloat(destinationorder.address.latitude),
lng: parseFloat(destinationorder.address.longitude),
};
let orderpoints = {
origin: new this.google.maps.LatLng(originaddress.lat, originaddress.lng),
destination: new this.google.maps.LatLng(
destinationaddress.lat,
destinationaddress.lng
),
order_id: destinationorder.order_id,
};
return orderpoints;
};
getCalcultedTimeOfWaypoints = (routelist, store_address) => {
// routelist=routelist.map(({order})=> )
let update_store_address = { address: store_address };
let allorders = routelist.map(({ order }) => order);
allorders = [update_store_address, ...allorders];
let count = 0;
for (let i = 0; i < allorders.length; i++) {
let originorder = allorders[i];
if (typeof allorders[i + 1] !== "undefined") {
let destinationorder = allorders[i + 1];
count++;
let orderpoints = this.createOrderObject(originorder, destinationorder);
this.getDistanceBetweenPoints(orderpoints);
}
}
};
getDistanceBetweenPoints = (orderpoints) => {
const service = new this.google.maps.DistanceMatrixService();
service.getDistanceMatrix(
{
origins: [orderpoints.origin],
destinations: [orderpoints.destination],
// waypoints: wayPoints,
// optimizeWaypoints: true,
avoidHighways: false,
travelMode: this.google.maps.TravelMode.DRIVING,
},
(result, status) => {
let general = result.rows[0].elements[0];
let distance_in_km = general.distance.text;
let time_required = general.duration.text;
let finalobject = {
order_id: orderpoints.order_id,
distance: distance_in_km,
time: time_required,
};
let ordertime = [...this.state.ordercaltime];
ordertime.push(finalobject);
this.setState({ ordercaltime: [...ordertime] });
}
);
};
setDirections(wayPoints) {
const DirectionsService = new this.google.maps.DirectionsService();
DirectionsService.route(
{
origin: this.origin,
destination: this.destination,
provideRouteAlternatives: false,
waypoints: wayPoints,
// optimizeWaypoints: true,
avoidHighways: false,
travelMode: this.google.maps.TravelMode.DRIVING,
},
(result, status) => {
// console.log("Check Direction Services", result);
if (status === this.google.maps.DirectionsStatus.OK) {
const overViewCoords = result.routes[0].overview_path;
this.setState({
directions: [...this.state.directions, result],
linescoords:
typeof overViewCoords !== "undefined"
? [...this.state.linescoords, overViewCoords]
: null,
});
} else {
// console.error(`error fetching directions ${result}`);
}
}
);
}
makewayPoints(point, type, store_address) {
let origin = {
lat: null,
lng: null,
};
if (store_address) {
origin.lat = parseFloat(store_address.latitude);
origin.lng = parseFloat(store_address.longitude);
}
let pointlength = 0;
if (type === "multiple") | else {
this.markerPositions.push(point);
pointlength = point.length - 2;
this.origin = new this.google.maps.LatLng(origin.lat, origin.lng);
this.orignmarker = this.origin;
}
let dest = { lat: null, lng: null };
dest.lat = point[point.length - 1].order.address.latitude;
dest.lng = point[point.length - 1].order.address.longitude;
this.destination = new this.google.maps.LatLng(dest.lat, dest.lng);
this.destmarker = this.dest;
let wayPoints = [];
const filteredpoints = point.filter(({ order }, key) => {
return order.address.latitude !== null;
});
filteredpoints.map(({ order }, key) =>
wayPoints.push({
// location: new this.google.maps.LatLng(data.lat, data.lng),
location: {
lat: parseFloat(order.address.latitude),
lng: parseFloat(order.address.longitude),
},
stopover: false,
})
);
return wayPoints;
}
static getDerivedStateFromProps(props, state) {
if (props.routelist !== null) {
if (props.routelist.deliveries.length > 0) {
if (state.ordercaltime.length > 0) {
if (state.ordercaltime.length === props.routelist.deliveries.length) {
console.log(state.ordercaltime.length);
}
}
}
} else {
return {
directions: [],
markerPositions: [],
routes: null,
};
}
return state;
}
sendOrderRouteDistanceAndTime = (orderlist) => {
console.log("send");
console.log([...orderlist]);
console.log(JSON.stringify([...orderlist]));
};
componentDidUpdate(prevProps, prevState) {
if (this.props.routelist !== prevProps.routelist) {
// await this.getCalcultedTimeOfWaypoints(this.props.routelist.deliveries, this.props.routelist.store_address);
// // prevProps
if (this.props.routelist && this.props.routelist.deliveries.length > 0) {
this.setState({
routes: this.props.routelist.deliveries,
ordercaltime: [],
});
let wayPoints = [];
let routelist;
// let wayPointsmax = 25;
routelist = this.props.routelist.deliveries;
let store_address = this.props.routelist.store_address;
// this.getCalcultedTimeOfWaypoints(
// this.props.routelist.deliveries,
// this.props.routelist.store_address
// );
let chunkarray = _.chunk(routelist, 24);
for (let i = 0; i < chunkarray.length; i++) {
if (i !== 0) {
let firstchunkitem = chunkarray[i][0];
chunkarray[i - 1].push(firstchunkitem);
}
}
this.polylinescoords = [];
this.origin = null;
this.destination = null;
this.markerPositions = [];
if (this.state.directions.length > 0) {
this.setState({
directions: [],
linescoords: [],
});
}
if (chunkarray.length > 1) {
for (let i = 0; i < chunkarray.length; i++) {
let lat = chunkarray[0][0].latitude;
let lng = chunkarray[0][0].longitude;
this.firstOrigin = {
lat: lat,
lng: lng,
};
let filteredchunk = chunkarray[i].filter(
({ order }) => order.latitude !== 0 || order.langitude !== null
);
if (filteredchunk.length > 0) {
let type = "multiple";
wayPoints = this.makewayPoints(
filteredchunk,
type,
store_address
);
this.setDirections(wayPoints);
}
}
} else {
let filteredchunk = chunkarray[0].filter((order) => order.lat !== 0);
wayPoints = this.makewayPoints(
filteredchunk,
routelist,
store_address
);
this.setDirections(wayPoints);
}
// this.sendOrderRouteDistanceAndTime(this.state.ordercaltime);
}
}
}
// shouldComponentUpdate(nextProps, nextState) {
// // if (nextProps.routelist !== this.props.routelist) {
// // return true;
// // } else {
// // return false;
// // }
// }
showDirectionRendrer = () => {
const alternatingColor = ["#FFFF00", "#0000fd"];
const strokeColor = ["#ff9900", "#6a0dad"];
return this.state.directions.map((direcrray, key) => (
<DirectionsRenderer
key={key}
places={this.state.markers}
directions={direcrray}
options={{
polylineOptions: {
icons: [
{
color: "#00ff00",
icon: {
// strokeColor:strokeColor[key],
path: this.google.maps.SymbolPath.FORWARD_CLOSED_ARROW,
},
offset: "100%",
scaledSize: new this.google.maps.Size(2, 2),
repeat: "100px",
},
],
// strokeColor:strokeColor[key],
// strokeColor: '#0000fd',
// strokeOpacity: 0.9,
// strokeWeight: `${key + 2}`,
strokeWeight: 3.5,
},
suppressMarkers: true,
markerOptions: {},
}}
></DirectionsRenderer>
));
};
renderMarkers = () => {
const markerColors = { green: "#008000", red: "#FF0000" };
return (
this.markerPositions &&
this.state.directions &&
[].concat.apply([], this.markerPositions).map(({ order }, i) => (
<Marker
animation={`BOUNCE`}
key={i}
label={{
// color: this.BLACK,
fontWeight: "bold",
text: `${i + 1}`,
}}
position={
new this.google.maps.LatLng(
order.address.latitude,
order.address.longitude
)
}
onClick={(e) => {
this.setState({
selectedOrder: order,
});
}}
// icon={{
// url: `http://maps.google.com/mapfiles/ms/icons/green.png`,
// color:
// order.order_status === ORDER_DELIVERED
// ? markerColors.green
// : markerColors.red,
// }}
></Marker>
))
);
};
renderInfoWindow = () => {
let order = null;
if (this.state.selectedOrder) {
order = this.state.selectedOrder;
}
return order ? (
<InfoWindow
key={order.order_id + order.order_number}
position={
new this.google.maps.LatLng(
order.address.latitude,
order.address.longitude
)
}
onCloseClick={() => {
this.setState({ selectedOrder: null });
}}
>
<div
style={{
fontSize: "11px",
background: `white`,
borderRadius: null,
padding: 15,
}}
id={order.order_id + order.order_number}
key={order.order_id + order.order_number}
>
<h5>Order Id: {order.order_id}</h5>
{order.items.map((item, key) => (
<div key={key}>
<strong>Products Name:</strong>
{item.product_name.en}
<br />
<strong>Product Quantitity:</strong> {item.quantity}
</div>
))}
<div>
<strong>Delivery Resource Name:</strong> {order.customer.name}
<br />
<strong>Phone Number:</strong> {order.customer.phone}
<br />
<strong>Coordinates:</strong>{" "}
{`${order.address.latitude} , ${order.address.longitude}`}
<br />
<strong>Location:</strong> {order.address.address_detail}
</div>
</div>
</InfoWindow>
) : null;
};
renderOriginMarker = () => {
return (
<Marker
position={this.orignmarker}
icon={{
url: HomeIcon,
// color: '#ff0000',
scaledSize: new this.google.maps.Size(25, 25),
offset: "100%",
}}
></Marker>
);
};
renderPolyLines = () => {
return (
<Polyline
path={this.state.linescoords ? this.state.linescoords[0] : []}
geodesic={true}
options={{
icons: [
{
color: "#00ff00",
icon: {
path: this.google.maps.SymbolPath.FORWARD_CLOSED_ARROW,
},
offset: "100%",
scaledSize: new this.google.maps.Size(2, 2),
repeat: "100px",
},
],
clickable: true,
strokeColor: "#FF5733",
strokeOpacity: 1,
strokeWeight: 1,
}}
/>
);
};
render() {
return (
<React.Fragment>
{" "}
<GoogleMap
style={{
position: "absolute",
top: "0",
left: "0",
right: "0",
bottom: "0",
}}
key={this.state.routes ? this.state.routes : null}
defaultZoom={11}
mapContainerStyle={{
height: "100vh",
width: "100%",
}}
defaultCenter={
this.state.routes
? new this.google.maps.LatLng(
this.state.routes[0].order.address.latitude,
this.state.routes[0].order.address.longitude
)
: new this.google.maps.LatLng(23.8859, 45.0792)
}
>
{/*this.renderGoogleMap()*/}
{this.renderInfoWindow()}
{this.renderMarkers()}
{/*this.renderPolyLines*/}
{this.showDirectionRendrer()}
{this.renderOriginMarker()}
<TrafficLayer autoUpdate />
</GoogleMap>
</React.Fragment>
);
}
}
export default compose(withScriptjs, withGoogleMap)(Map);
| {
let firstpoint = point[0];
if (this.markerPositions.length > 0) {
this.markerPositions.push(point);
} else {
this.markerPositions.push(point);
}
if (this.destination) {
pointlength = point.length - 2;
pointlength = point.length;
} else {
pointlength = point.length - 1;
}
if (this.origin) {
this.origin = this.destination;
} else {
this.origin = new this.google.maps.LatLng(origin.lat, origin.lng);
this.orignmarker = this.origin;
}
} | conditional_block |
db.go | //go:build linux && amd64 && rocksdballowed
// +build linux,amd64,rocksdballowed
// ^ Only build this file if this computer linux AND it's AMD64 AND rocksdb is allowed
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package rocksdb
import (
"bytes"
"errors"
"os"
"runtime"
"sync"
"sync/atomic"
"github.com/linxGnu/grocksdb"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/database/nodb"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/logging"
"github.com/ava-labs/avalanchego/utils/perms"
"github.com/ava-labs/avalanchego/utils/units"
)
const (
MemoryBudget = 512 * units.MiB // 512 MiB
BitsPerKey = 10 // 10 bits
BlockCacheSize = 12 * units.MiB // 12 MiB
BlockSize = 8 * units.KiB // 8 KiB
// rocksDBByteOverhead is the number of bytes of constant overhead that
// should be added to a batch size per operation.
rocksDBByteOverhead = 8
)
var (
errFailedToCreateIterator = errors.New("failed to create iterator")
_ database.Database = &Database{}
_ database.Batch = &batch{}
_ database.Iterator = &iterator{}
)
// Database is a persistent key-value store. Apart from basic data storage
// functionality it also supports batch writes and iterating over the keyspace
// in binary-alphabetical order.
type Database struct {
lock sync.RWMutex
db *grocksdb.DB
readOptions *grocksdb.ReadOptions
iteratorOptions *grocksdb.ReadOptions
writeOptions *grocksdb.WriteOptions
log logging.Logger
// 1 if there was previously an error other than "not found" or "closed"
// while performing a db operation. If [errored] == 1, Has, Get, Put,
// Delete and batch writes fail with ErrAvoidCorruption.
errored uint64
}
// New returns a wrapped RocksDB object.
// TODO: use configBytes to config the database options
func New(file string, configBytes []byte, log logging.Logger) (database.Database, error) {
filter := grocksdb.NewBloomFilter(BitsPerKey)
blockOptions := grocksdb.NewDefaultBlockBasedTableOptions()
blockOptions.SetBlockCache(grocksdb.NewLRUCache(BlockCacheSize))
blockOptions.SetBlockSize(BlockSize)
blockOptions.SetFilterPolicy(filter)
options := grocksdb.NewDefaultOptions()
options.SetCreateIfMissing(true)
options.OptimizeUniversalStyleCompaction(MemoryBudget)
options.SetBlockBasedTableFactory(blockOptions)
if err := os.MkdirAll(file, perms.ReadWriteExecute); err != nil {
return nil, err
}
db, err := grocksdb.OpenDb(options, file)
if err != nil |
iteratorOptions := grocksdb.NewDefaultReadOptions()
iteratorOptions.SetFillCache(false)
return &Database{
db: db,
readOptions: grocksdb.NewDefaultReadOptions(),
iteratorOptions: iteratorOptions,
writeOptions: grocksdb.NewDefaultWriteOptions(),
log: log,
}, nil
}
// Has returns if the key is set in the database
func (db *Database) Has(key []byte) (bool, error) {
_, err := db.Get(key)
switch err {
case nil:
return true, nil
case database.ErrNotFound:
return false, nil
default:
return false, err
}
}
// Get returns the value the key maps to in the database
func (db *Database) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return nil, database.ErrClosed
case db.corrupted():
return nil, database.ErrAvoidCorruption
}
value, err := db.db.GetBytes(db.readOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
return nil, err
}
if value != nil {
return value, nil
}
return nil, database.ErrNotFound
}
// Put sets the value of the provided key to the provided value
func (db *Database) Put(key []byte, value []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Put(db.writeOptions, key, value)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// Delete removes the key from the database
func (db *Database) Delete(key []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Delete(db.writeOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// NewBatch creates a write/delete-only buffer that is atomically committed to
// the database when write is called
func (db *Database) NewBatch() database.Batch {
b := grocksdb.NewWriteBatch()
runtime.SetFinalizer(b, func(b *grocksdb.WriteBatch) {
b.Destroy()
})
return &batch{
batch: b,
db: db,
}
}
// Inner returns itself
func (b *batch) Inner() database.Batch { return b }
// NewIterator creates a lexicographically ordered iterator over the database
func (db *Database) NewIterator() database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(nil)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithStart creates a lexicographically ordered iterator over the
// database starting at the provided key
func (db *Database) NewIteratorWithStart(start []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(start)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithPrefix creates a lexicographically ordered iterator over the
// database ignoring keys that do not start with the provided prefix
func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(prefix)
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// NewIteratorWithStartAndPrefix creates a lexicographically ordered iterator
// over the database starting at start and ignoring keys that do not start with
// the provided prefix
func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
if bytes.Compare(start, prefix) == 1 {
it.Seek(start)
} else {
it.Seek(prefix)
}
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", database.ErrNotFound
}
// Compact the underlying DB for the given key range.
// Specifically, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// A nil start is treated as a key before all keys in the DB.
// And a nil limit is treated as a key after all keys in the DB.
// Therefore if both are nil then it will compact entire DB.
func (db *Database) Compact(start []byte, limit []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
db.db.CompactRange(grocksdb.Range{Start: start, Limit: limit})
return nil
}
// Close implements the Database interface
func (db *Database) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
if db.db == nil {
return database.ErrClosed
}
db.readOptions.Destroy()
db.iteratorOptions.Destroy()
db.writeOptions.Destroy()
db.db.Close()
db.db = nil
return nil
}
func (db *Database) corrupted() bool {
return atomic.LoadUint64(&db.errored) == 1
}
// batch is a wrapper around a levelDB batch to contain sizes.
type batch struct {
batch *grocksdb.WriteBatch
db *Database
size int
}
// Put the value into the batch for later writing
func (b *batch) Put(key, value []byte) error {
b.batch.Put(key, value)
b.size += len(key) + len(value) + rocksDBByteOverhead
return nil
}
// Delete the key during writing
func (b *batch) Delete(key []byte) error {
b.batch.Delete(key)
b.size += len(key) + rocksDBByteOverhead
return nil
}
// Size retrieves the amount of data queued up for writing.
func (b *batch) Size() int { return b.size }
// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
b.db.lock.RLock()
defer b.db.lock.RUnlock()
switch {
case b.db.db == nil:
return database.ErrClosed
case b.db.corrupted():
return database.ErrAvoidCorruption
}
return b.db.db.Write(b.db.writeOptions, b.batch)
}
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.batch.Clear()
b.size = 0
}
// Replay the batch contents.
func (b *batch) Replay(w database.KeyValueWriter) error {
it := b.batch.NewIterator()
for it.Next() {
rec := it.Record()
switch rec.Type {
case
grocksdb.WriteBatchDeletionRecord,
grocksdb.WriteBatchSingleDeletionRecord:
if err := w.Delete(rec.Key); err != nil {
return err
}
case grocksdb.WriteBatchValueRecord:
if err := w.Put(rec.Key, rec.Value); err != nil {
return err
}
}
}
return nil
}
type iterator struct {
it *grocksdb.Iterator
db *Database
prefix []byte
started bool
key []byte
value []byte
}
// Error implements the Iterator interface
func (it *iterator) Error() error { return it.it.Err() }
// Key implements the Iterator interface
func (it *iterator) Key() []byte {
return utils.CopyBytes(it.key)
}
// Value implements the Iterator interface
func (it *iterator) Value() []byte {
return utils.CopyBytes(it.value)
}
func (it *iterator) Release() {
it.db.lock.RLock()
defer it.db.lock.RUnlock()
if it.db.db != nil {
it.it.Close()
}
}
func (it *iterator) Next() bool {
if it.started {
it.it.Next()
}
it.started = true
if valid := it.it.Valid(); !valid {
it.key = nil
it.value = nil
return false
}
it.key = it.it.Key().Data()
it.value = it.it.Value().Data()
if !bytes.HasPrefix(it.key, it.prefix) {
it.key = nil
it.value = nil
return false
}
return true
}
| {
return nil, err
} | conditional_block |
db.go | //go:build linux && amd64 && rocksdballowed
// +build linux,amd64,rocksdballowed
// ^ Only build this file if this computer linux AND it's AMD64 AND rocksdb is allowed
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package rocksdb
import (
"bytes"
"errors"
"os"
"runtime"
"sync"
"sync/atomic"
"github.com/linxGnu/grocksdb"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/database/nodb"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/logging"
"github.com/ava-labs/avalanchego/utils/perms"
"github.com/ava-labs/avalanchego/utils/units"
)
const (
MemoryBudget = 512 * units.MiB // 512 MiB
BitsPerKey = 10 // 10 bits
BlockCacheSize = 12 * units.MiB // 12 MiB
BlockSize = 8 * units.KiB // 8 KiB
// rocksDBByteOverhead is the number of bytes of constant overhead that
// should be added to a batch size per operation.
rocksDBByteOverhead = 8
)
var (
errFailedToCreateIterator = errors.New("failed to create iterator")
_ database.Database = &Database{}
_ database.Batch = &batch{}
_ database.Iterator = &iterator{}
)
// Database is a persistent key-value store. Apart from basic data storage
// functionality it also supports batch writes and iterating over the keyspace
// in binary-alphabetical order.
type Database struct {
lock sync.RWMutex
db *grocksdb.DB
readOptions *grocksdb.ReadOptions
iteratorOptions *grocksdb.ReadOptions
writeOptions *grocksdb.WriteOptions
log logging.Logger
// 1 if there was previously an error other than "not found" or "closed"
// while performing a db operation. If [errored] == 1, Has, Get, Put,
// Delete and batch writes fail with ErrAvoidCorruption.
errored uint64
}
// New returns a wrapped RocksDB object.
// TODO: use configBytes to config the database options
func New(file string, configBytes []byte, log logging.Logger) (database.Database, error) {
filter := grocksdb.NewBloomFilter(BitsPerKey)
blockOptions := grocksdb.NewDefaultBlockBasedTableOptions()
blockOptions.SetBlockCache(grocksdb.NewLRUCache(BlockCacheSize))
blockOptions.SetBlockSize(BlockSize)
blockOptions.SetFilterPolicy(filter)
options := grocksdb.NewDefaultOptions()
options.SetCreateIfMissing(true)
options.OptimizeUniversalStyleCompaction(MemoryBudget)
options.SetBlockBasedTableFactory(blockOptions)
if err := os.MkdirAll(file, perms.ReadWriteExecute); err != nil {
return nil, err
}
db, err := grocksdb.OpenDb(options, file)
if err != nil {
return nil, err
}
iteratorOptions := grocksdb.NewDefaultReadOptions()
iteratorOptions.SetFillCache(false)
return &Database{
db: db,
readOptions: grocksdb.NewDefaultReadOptions(),
iteratorOptions: iteratorOptions,
writeOptions: grocksdb.NewDefaultWriteOptions(),
log: log,
}, nil
}
// Has returns if the key is set in the database
func (db *Database) Has(key []byte) (bool, error) {
_, err := db.Get(key)
switch err {
case nil:
return true, nil
case database.ErrNotFound:
return false, nil
default:
return false, err
}
}
// Get returns the value the key maps to in the database
func (db *Database) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return nil, database.ErrClosed
case db.corrupted():
return nil, database.ErrAvoidCorruption
}
value, err := db.db.GetBytes(db.readOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
return nil, err
}
if value != nil {
return value, nil
}
return nil, database.ErrNotFound
}
// Put sets the value of the provided key to the provided value
func (db *Database) Put(key []byte, value []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Put(db.writeOptions, key, value)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// Delete removes the key from the database
func (db *Database) Delete(key []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Delete(db.writeOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// NewBatch creates a write/delete-only buffer that is atomically committed to
// the database when write is called
func (db *Database) NewBatch() database.Batch {
b := grocksdb.NewWriteBatch()
runtime.SetFinalizer(b, func(b *grocksdb.WriteBatch) {
b.Destroy()
})
return &batch{
batch: b,
db: db,
}
}
// Inner returns itself
func (b *batch) Inner() database.Batch { return b }
// NewIterator creates a lexicographically ordered iterator over the database
func (db *Database) NewIterator() database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(nil)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithStart creates a lexicographically ordered iterator over the
// database starting at the provided key
func (db *Database) NewIteratorWithStart(start []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(start)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithPrefix creates a lexicographically ordered iterator over the
// database ignoring keys that do not start with the provided prefix
func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(prefix)
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// NewIteratorWithStartAndPrefix creates a lexicographically ordered iterator
// over the database starting at start and ignoring keys that do not start with
// the provided prefix
func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
if bytes.Compare(start, prefix) == 1 {
it.Seek(start)
} else {
it.Seek(prefix)
}
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", database.ErrNotFound
}
// Compact the underlying DB for the given key range.
// Specifically, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// A nil start is treated as a key before all keys in the DB.
// And a nil limit is treated as a key after all keys in the DB.
// Therefore if both are nil then it will compact entire DB.
func (db *Database) Compact(start []byte, limit []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
db.db.CompactRange(grocksdb.Range{Start: start, Limit: limit})
return nil
}
// Close implements the Database interface
func (db *Database) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
if db.db == nil {
return database.ErrClosed
}
db.readOptions.Destroy()
db.iteratorOptions.Destroy()
db.writeOptions.Destroy()
db.db.Close()
db.db = nil
return nil
}
func (db *Database) corrupted() bool {
return atomic.LoadUint64(&db.errored) == 1
}
// batch is a wrapper around a levelDB batch to contain sizes.
type batch struct {
batch *grocksdb.WriteBatch
db *Database
size int
}
// Put the value into the batch for later writing
func (b *batch) Put(key, value []byte) error {
b.batch.Put(key, value)
b.size += len(key) + len(value) + rocksDBByteOverhead
return nil
}
// Delete the key during writing
func (b *batch) Delete(key []byte) error {
b.batch.Delete(key)
b.size += len(key) + rocksDBByteOverhead
return nil
}
// Size retrieves the amount of data queued up for writing.
func (b *batch) Size() int { return b.size }
// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
b.db.lock.RLock()
defer b.db.lock.RUnlock()
switch {
case b.db.db == nil:
return database.ErrClosed
case b.db.corrupted():
return database.ErrAvoidCorruption
}
return b.db.db.Write(b.db.writeOptions, b.batch)
}
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.batch.Clear()
b.size = 0
}
// Replay the batch contents.
func (b *batch) Replay(w database.KeyValueWriter) error {
it := b.batch.NewIterator()
for it.Next() {
rec := it.Record()
switch rec.Type {
case
grocksdb.WriteBatchDeletionRecord,
grocksdb.WriteBatchSingleDeletionRecord:
if err := w.Delete(rec.Key); err != nil {
return err
}
case grocksdb.WriteBatchValueRecord:
if err := w.Put(rec.Key, rec.Value); err != nil {
return err
}
}
}
return nil
}
type iterator struct {
it *grocksdb.Iterator
db *Database
prefix []byte
started bool
key []byte
value []byte
}
// Error implements the Iterator interface
func (it *iterator) Error() error |
// Key implements the Iterator interface
func (it *iterator) Key() []byte {
return utils.CopyBytes(it.key)
}
// Value implements the Iterator interface
func (it *iterator) Value() []byte {
return utils.CopyBytes(it.value)
}
func (it *iterator) Release() {
it.db.lock.RLock()
defer it.db.lock.RUnlock()
if it.db.db != nil {
it.it.Close()
}
}
func (it *iterator) Next() bool {
if it.started {
it.it.Next()
}
it.started = true
if valid := it.it.Valid(); !valid {
it.key = nil
it.value = nil
return false
}
it.key = it.it.Key().Data()
it.value = it.it.Value().Data()
if !bytes.HasPrefix(it.key, it.prefix) {
it.key = nil
it.value = nil
return false
}
return true
}
| { return it.it.Err() } | identifier_body |
db.go | //go:build linux && amd64 && rocksdballowed
// +build linux,amd64,rocksdballowed
// ^ Only build this file if this computer linux AND it's AMD64 AND rocksdb is allowed
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package rocksdb
import (
"bytes"
"errors"
"os"
"runtime"
"sync"
"sync/atomic"
"github.com/linxGnu/grocksdb"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/database/nodb"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/logging"
"github.com/ava-labs/avalanchego/utils/perms"
"github.com/ava-labs/avalanchego/utils/units"
)
const (
MemoryBudget = 512 * units.MiB // 512 MiB
BitsPerKey = 10 // 10 bits
BlockCacheSize = 12 * units.MiB // 12 MiB
BlockSize = 8 * units.KiB // 8 KiB
// rocksDBByteOverhead is the number of bytes of constant overhead that
// should be added to a batch size per operation.
rocksDBByteOverhead = 8
)
var (
errFailedToCreateIterator = errors.New("failed to create iterator")
_ database.Database = &Database{}
_ database.Batch = &batch{}
_ database.Iterator = &iterator{}
)
// Database is a persistent key-value store. Apart from basic data storage
// functionality it also supports batch writes and iterating over the keyspace
// in binary-alphabetical order.
type Database struct {
lock sync.RWMutex
db *grocksdb.DB
readOptions *grocksdb.ReadOptions
iteratorOptions *grocksdb.ReadOptions
writeOptions *grocksdb.WriteOptions
log logging.Logger
// 1 if there was previously an error other than "not found" or "closed"
// while performing a db operation. If [errored] == 1, Has, Get, Put,
// Delete and batch writes fail with ErrAvoidCorruption.
errored uint64
}
// New returns a wrapped RocksDB object.
// TODO: use configBytes to config the database options
func New(file string, configBytes []byte, log logging.Logger) (database.Database, error) {
filter := grocksdb.NewBloomFilter(BitsPerKey)
blockOptions := grocksdb.NewDefaultBlockBasedTableOptions()
blockOptions.SetBlockCache(grocksdb.NewLRUCache(BlockCacheSize))
blockOptions.SetBlockSize(BlockSize)
blockOptions.SetFilterPolicy(filter)
options := grocksdb.NewDefaultOptions()
options.SetCreateIfMissing(true)
options.OptimizeUniversalStyleCompaction(MemoryBudget)
options.SetBlockBasedTableFactory(blockOptions)
if err := os.MkdirAll(file, perms.ReadWriteExecute); err != nil {
return nil, err
}
db, err := grocksdb.OpenDb(options, file)
if err != nil {
return nil, err
}
iteratorOptions := grocksdb.NewDefaultReadOptions()
iteratorOptions.SetFillCache(false)
return &Database{
db: db,
readOptions: grocksdb.NewDefaultReadOptions(),
iteratorOptions: iteratorOptions,
writeOptions: grocksdb.NewDefaultWriteOptions(),
log: log,
}, nil
}
// Has returns if the key is set in the database
func (db *Database) Has(key []byte) (bool, error) {
_, err := db.Get(key)
switch err {
case nil:
return true, nil
case database.ErrNotFound:
return false, nil
default:
return false, err
}
}
// Get returns the value the key maps to in the database
func (db *Database) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return nil, database.ErrClosed
case db.corrupted():
return nil, database.ErrAvoidCorruption
}
value, err := db.db.GetBytes(db.readOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
return nil, err
}
if value != nil {
return value, nil
}
return nil, database.ErrNotFound
}
// Put sets the value of the provided key to the provided value
func (db *Database) Put(key []byte, value []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Put(db.writeOptions, key, value)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// Delete removes the key from the database
func (db *Database) Delete(key []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Delete(db.writeOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// NewBatch creates a write/delete-only buffer that is atomically committed to
// the database when write is called
func (db *Database) NewBatch() database.Batch {
b := grocksdb.NewWriteBatch()
runtime.SetFinalizer(b, func(b *grocksdb.WriteBatch) {
b.Destroy()
})
return &batch{
batch: b,
db: db,
}
}
// Inner returns itself
func (b *batch) Inner() database.Batch { return b }
// NewIterator creates a lexicographically ordered iterator over the database
func (db *Database) NewIterator() database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(nil)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithStart creates a lexicographically ordered iterator over the
// database starting at the provided key
func (db *Database) NewIteratorWithStart(start []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(start)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithPrefix creates a lexicographically ordered iterator over the
// database ignoring keys that do not start with the provided prefix
func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(prefix)
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// NewIteratorWithStartAndPrefix creates a lexicographically ordered iterator
// over the database starting at start and ignoring keys that do not start with
// the provided prefix
func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
if bytes.Compare(start, prefix) == 1 {
it.Seek(start)
} else {
it.Seek(prefix)
}
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", database.ErrNotFound
}
// Compact the underlying DB for the given key range.
// Specifically, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// A nil start is treated as a key before all keys in the DB.
// And a nil limit is treated as a key after all keys in the DB.
// Therefore if both are nil then it will compact entire DB.
func (db *Database) Compact(start []byte, limit []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
db.db.CompactRange(grocksdb.Range{Start: start, Limit: limit})
return nil
}
// Close implements the Database interface
func (db *Database) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
if db.db == nil {
return database.ErrClosed
}
db.readOptions.Destroy()
db.iteratorOptions.Destroy()
db.writeOptions.Destroy()
db.db.Close()
db.db = nil
return nil
}
func (db *Database) corrupted() bool {
return atomic.LoadUint64(&db.errored) == 1
}
// batch is a wrapper around a levelDB batch to contain sizes.
type batch struct {
batch *grocksdb.WriteBatch
db *Database
size int
}
// Put the value into the batch for later writing
func (b *batch) Put(key, value []byte) error {
b.batch.Put(key, value)
b.size += len(key) + len(value) + rocksDBByteOverhead
return nil
}
// Delete the key during writing
func (b *batch) Delete(key []byte) error {
b.batch.Delete(key)
b.size += len(key) + rocksDBByteOverhead
return nil
}
// Size retrieves the amount of data queued up for writing.
func (b *batch) Size() int { return b.size }
// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
b.db.lock.RLock()
defer b.db.lock.RUnlock()
switch {
case b.db.db == nil:
return database.ErrClosed
case b.db.corrupted():
return database.ErrAvoidCorruption
}
return b.db.db.Write(b.db.writeOptions, b.batch)
}
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.batch.Clear()
b.size = 0
}
// Replay the batch contents.
func (b *batch) Replay(w database.KeyValueWriter) error {
it := b.batch.NewIterator() | switch rec.Type {
case
grocksdb.WriteBatchDeletionRecord,
grocksdb.WriteBatchSingleDeletionRecord:
if err := w.Delete(rec.Key); err != nil {
return err
}
case grocksdb.WriteBatchValueRecord:
if err := w.Put(rec.Key, rec.Value); err != nil {
return err
}
}
}
return nil
}
type iterator struct {
it *grocksdb.Iterator
db *Database
prefix []byte
started bool
key []byte
value []byte
}
// Error implements the Iterator interface
func (it *iterator) Error() error { return it.it.Err() }
// Key implements the Iterator interface
func (it *iterator) Key() []byte {
return utils.CopyBytes(it.key)
}
// Value implements the Iterator interface
func (it *iterator) Value() []byte {
return utils.CopyBytes(it.value)
}
func (it *iterator) Release() {
it.db.lock.RLock()
defer it.db.lock.RUnlock()
if it.db.db != nil {
it.it.Close()
}
}
func (it *iterator) Next() bool {
if it.started {
it.it.Next()
}
it.started = true
if valid := it.it.Valid(); !valid {
it.key = nil
it.value = nil
return false
}
it.key = it.it.Key().Data()
it.value = it.it.Value().Data()
if !bytes.HasPrefix(it.key, it.prefix) {
it.key = nil
it.value = nil
return false
}
return true
} | for it.Next() {
rec := it.Record() | random_line_split |
db.go | //go:build linux && amd64 && rocksdballowed
// +build linux,amd64,rocksdballowed
// ^ Only build this file if this computer linux AND it's AMD64 AND rocksdb is allowed
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package rocksdb
import (
"bytes"
"errors"
"os"
"runtime"
"sync"
"sync/atomic"
"github.com/linxGnu/grocksdb"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/database/nodb"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/logging"
"github.com/ava-labs/avalanchego/utils/perms"
"github.com/ava-labs/avalanchego/utils/units"
)
const (
MemoryBudget = 512 * units.MiB // 512 MiB
BitsPerKey = 10 // 10 bits
BlockCacheSize = 12 * units.MiB // 12 MiB
BlockSize = 8 * units.KiB // 8 KiB
// rocksDBByteOverhead is the number of bytes of constant overhead that
// should be added to a batch size per operation.
rocksDBByteOverhead = 8
)
var (
errFailedToCreateIterator = errors.New("failed to create iterator")
_ database.Database = &Database{}
_ database.Batch = &batch{}
_ database.Iterator = &iterator{}
)
// Database is a persistent key-value store. Apart from basic data storage
// functionality it also supports batch writes and iterating over the keyspace
// in binary-alphabetical order.
type Database struct {
lock sync.RWMutex
db *grocksdb.DB
readOptions *grocksdb.ReadOptions
iteratorOptions *grocksdb.ReadOptions
writeOptions *grocksdb.WriteOptions
log logging.Logger
// 1 if there was previously an error other than "not found" or "closed"
// while performing a db operation. If [errored] == 1, Has, Get, Put,
// Delete and batch writes fail with ErrAvoidCorruption.
errored uint64
}
// New returns a wrapped RocksDB object.
// TODO: use configBytes to config the database options
func New(file string, configBytes []byte, log logging.Logger) (database.Database, error) {
filter := grocksdb.NewBloomFilter(BitsPerKey)
blockOptions := grocksdb.NewDefaultBlockBasedTableOptions()
blockOptions.SetBlockCache(grocksdb.NewLRUCache(BlockCacheSize))
blockOptions.SetBlockSize(BlockSize)
blockOptions.SetFilterPolicy(filter)
options := grocksdb.NewDefaultOptions()
options.SetCreateIfMissing(true)
options.OptimizeUniversalStyleCompaction(MemoryBudget)
options.SetBlockBasedTableFactory(blockOptions)
if err := os.MkdirAll(file, perms.ReadWriteExecute); err != nil {
return nil, err
}
db, err := grocksdb.OpenDb(options, file)
if err != nil {
return nil, err
}
iteratorOptions := grocksdb.NewDefaultReadOptions()
iteratorOptions.SetFillCache(false)
return &Database{
db: db,
readOptions: grocksdb.NewDefaultReadOptions(),
iteratorOptions: iteratorOptions,
writeOptions: grocksdb.NewDefaultWriteOptions(),
log: log,
}, nil
}
// Has returns if the key is set in the database
func (db *Database) Has(key []byte) (bool, error) {
_, err := db.Get(key)
switch err {
case nil:
return true, nil
case database.ErrNotFound:
return false, nil
default:
return false, err
}
}
// Get returns the value the key maps to in the database
func (db *Database) Get(key []byte) ([]byte, error) {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return nil, database.ErrClosed
case db.corrupted():
return nil, database.ErrAvoidCorruption
}
value, err := db.db.GetBytes(db.readOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
return nil, err
}
if value != nil {
return value, nil
}
return nil, database.ErrNotFound
}
// Put sets the value of the provided key to the provided value
func (db *Database) Put(key []byte, value []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Put(db.writeOptions, key, value)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// Delete removes the key from the database
func (db *Database) Delete(key []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
err := db.db.Delete(db.writeOptions, key)
if err != nil {
atomic.StoreUint64(&db.errored, 1)
}
return err
}
// NewBatch creates a write/delete-only buffer that is atomically committed to
// the database when write is called
func (db *Database) NewBatch() database.Batch {
b := grocksdb.NewWriteBatch()
runtime.SetFinalizer(b, func(b *grocksdb.WriteBatch) {
b.Destroy()
})
return &batch{
batch: b,
db: db,
}
}
// Inner returns itself
func (b *batch) Inner() database.Batch { return b }
// NewIterator creates a lexicographically ordered iterator over the database
func (db *Database) NewIterator() database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(nil)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithStart creates a lexicographically ordered iterator over the
// database starting at the provided key
func (db *Database) NewIteratorWithStart(start []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(start)
return &iterator{
it: it,
db: db,
}
}
// NewIteratorWithPrefix creates a lexicographically ordered iterator over the
// database ignoring keys that do not start with the provided prefix
func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
it.Seek(prefix)
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// NewIteratorWithStartAndPrefix creates a lexicographically ordered iterator
// over the database starting at start and ignoring keys that do not start with
// the provided prefix
func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return &nodb.Iterator{Err: database.ErrClosed}
case db.corrupted():
return &nodb.Iterator{Err: database.ErrAvoidCorruption}
}
it := db.db.NewIterator(db.iteratorOptions)
if it == nil {
return &nodb.Iterator{Err: errFailedToCreateIterator}
}
if bytes.Compare(start, prefix) == 1 {
it.Seek(start)
} else {
it.Seek(prefix)
}
return &iterator{
it: it,
db: db,
prefix: prefix,
}
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", database.ErrNotFound
}
// Compact the underlying DB for the given key range.
// Specifically, deleted and overwritten versions are discarded,
// and the data is rearranged to reduce the cost of operations
// needed to access the data. This operation should typically only
// be invoked by users who understand the underlying implementation.
//
// A nil start is treated as a key before all keys in the DB.
// And a nil limit is treated as a key after all keys in the DB.
// Therefore if both are nil then it will compact entire DB.
func (db *Database) Compact(start []byte, limit []byte) error {
db.lock.RLock()
defer db.lock.RUnlock()
switch {
case db.db == nil:
return database.ErrClosed
case db.corrupted():
return database.ErrAvoidCorruption
}
db.db.CompactRange(grocksdb.Range{Start: start, Limit: limit})
return nil
}
// Close implements the Database interface
func (db *Database) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
if db.db == nil {
return database.ErrClosed
}
db.readOptions.Destroy()
db.iteratorOptions.Destroy()
db.writeOptions.Destroy()
db.db.Close()
db.db = nil
return nil
}
func (db *Database) corrupted() bool {
return atomic.LoadUint64(&db.errored) == 1
}
// batch is a wrapper around a levelDB batch to contain sizes.
type batch struct {
batch *grocksdb.WriteBatch
db *Database
size int
}
// Put the value into the batch for later writing
func (b *batch) Put(key, value []byte) error {
b.batch.Put(key, value)
b.size += len(key) + len(value) + rocksDBByteOverhead
return nil
}
// Delete the key during writing
func (b *batch) Delete(key []byte) error {
b.batch.Delete(key)
b.size += len(key) + rocksDBByteOverhead
return nil
}
// Size retrieves the amount of data queued up for writing.
func (b *batch) Size() int { return b.size }
// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
b.db.lock.RLock()
defer b.db.lock.RUnlock()
switch {
case b.db.db == nil:
return database.ErrClosed
case b.db.corrupted():
return database.ErrAvoidCorruption
}
return b.db.db.Write(b.db.writeOptions, b.batch)
}
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.batch.Clear()
b.size = 0
}
// Replay the batch contents.
func (b *batch) | (w database.KeyValueWriter) error {
it := b.batch.NewIterator()
for it.Next() {
rec := it.Record()
switch rec.Type {
case
grocksdb.WriteBatchDeletionRecord,
grocksdb.WriteBatchSingleDeletionRecord:
if err := w.Delete(rec.Key); err != nil {
return err
}
case grocksdb.WriteBatchValueRecord:
if err := w.Put(rec.Key, rec.Value); err != nil {
return err
}
}
}
return nil
}
type iterator struct {
it *grocksdb.Iterator
db *Database
prefix []byte
started bool
key []byte
value []byte
}
// Error implements the Iterator interface
func (it *iterator) Error() error { return it.it.Err() }
// Key implements the Iterator interface
func (it *iterator) Key() []byte {
return utils.CopyBytes(it.key)
}
// Value implements the Iterator interface
func (it *iterator) Value() []byte {
return utils.CopyBytes(it.value)
}
func (it *iterator) Release() {
it.db.lock.RLock()
defer it.db.lock.RUnlock()
if it.db.db != nil {
it.it.Close()
}
}
func (it *iterator) Next() bool {
if it.started {
it.it.Next()
}
it.started = true
if valid := it.it.Valid(); !valid {
it.key = nil
it.value = nil
return false
}
it.key = it.it.Key().Data()
it.value = it.it.Value().Data()
if !bytes.HasPrefix(it.key, it.prefix) {
it.key = nil
it.value = nil
return false
}
return true
}
| Replay | identifier_name |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 && !append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length != length {
if !append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if !self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer != line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length != buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history. | pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> {
// buffers[0] is the oldest entry
// the new entry goes to the end
if !self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if !self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command != input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts {
v.push(*i);
}
contains && !starts && tested != search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn into_iter(self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
} | #[inline(always)] | random_line_split |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 && !append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length != length {
if !append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if !self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer != line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length != buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history.
#[inline(always)]
pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> |
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command != input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts {
v.push(*i);
}
contains && !starts && tested != search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn into_iter(self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
}
| {
// buffers[0] is the oldest entry
// the new entry goes to the end
if !self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if !self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
} | identifier_body |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 && !append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length != length {
if !append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if !self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer != line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length != buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history.
#[inline(always)]
pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> {
// buffers[0] is the oldest entry
// the new entry goes to the end
if !self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if !self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command != input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts {
v.push(*i);
}
contains && !starts && tested != search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn | (self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
}
| into_iter | identifier_name |
history.rs | use super::*;
use std::{
collections::{vec_deque, VecDeque},
fs::File,
io::{self, Write},
io::{BufRead, BufReader, BufWriter},
iter::IntoIterator,
ops::Index,
ops::IndexMut,
path::Path,
//time::Duration,
};
const DEFAULT_MAX_SIZE: usize = 1000;
/// Structure encapsulating command history
pub struct History {
// TODO: this should eventually be private
/// Vector of buffers to store history in
pub buffers: VecDeque<Buffer>,
/// Store a filename to save history into; if None don't save history
file_name: Option<String>,
/// Maximal number of buffers stored in the memory
/// TODO: just make this public?
max_buffers_size: usize,
/// Maximal number of lines stored in the file
// TODO: just make this public?
max_file_size: usize,
// TODO set from environment variable?
pub append_duplicate_entries: bool,
/// Append each entry to history file as entered?
pub inc_append: bool,
/// Share history across ion's with the same history file (combine with inc_append).
pub share: bool,
/// Last filesize of history file, used to optimize history sharing.
pub file_size: u64,
/// Allow loading duplicate entries, need to know this for loading history files.
pub load_duplicates: bool,
/// Writes between history compaction.
compaction_writes: usize,
}
impl Default for History {
fn default() -> Self {
Self::new()
}
}
impl History {
/// Create new History structure.
pub fn new() -> History {
History {
buffers: VecDeque::with_capacity(DEFAULT_MAX_SIZE),
file_name: None,
max_buffers_size: DEFAULT_MAX_SIZE,
max_file_size: DEFAULT_MAX_SIZE,
append_duplicate_entries: false,
inc_append: false,
share: false,
file_size: 0,
load_duplicates: true,
compaction_writes: 0,
}
}
/// Clears out the history.
pub fn clear_history(&mut self) {
self.buffers.clear();
}
/// Loads the history file from the saved path and appends it to the end of the history if append
/// is true otherwise replace history.
pub fn load_history(&mut self, append: bool) -> io::Result<u64> {
if let Some(path) = self.file_name.clone() {
let file_size = self.file_size;
self.load_history_file_test(&path, file_size, append)
.map(|l| {
self.file_size = l;
l
})
} else {
Err(io::Error::new(
io::ErrorKind::Other,
"History filename not set!",
))
}
}
/// Loads the history file from path and appends it to the end of the history if append is true.
pub fn load_history_file<P: AsRef<Path>>(&mut self, path: P, append: bool) -> io::Result<u64> {
self.load_history_file_test(path, 0, append)
}
/// Loads the history file from path and appends it to the end of the history.f append is true
/// (replaces if false). Only loads if length is not equal to current file size.
fn load_history_file_test<P: AsRef<Path>>(
&mut self,
path: P,
length: u64,
append: bool,
) -> io::Result<u64> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let new_length = file.metadata()?.len();
if new_length == 0 && length == 0 && !append {
// Special case, trying to load nothing and not appending- just clear.
self.clear_history();
}
if new_length != length {
if !append {
self.clear_history();
}
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
self.buffers.push_back(Buffer::from(line));
}
}
Err(_) => break,
}
}
self.truncate();
if !self.load_duplicates {
let mut tmp_buffers: Vec<Buffer> = Vec::with_capacity(self.buffers.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(buf) = self.buffers.pop_back() {
self.remove_duplicates(&buf.to_string()[..]);
tmp_buffers.push(buf);
}
while let Some(buf) = tmp_buffers.pop() {
self.buffers.push_back(buf);
}
}
}
Ok(new_length)
}
/// Removes duplicates and trims a history file to max_file_size.
/// Primarily if inc_append is set without shared history.
/// Static because it should have no side effects on a history object.
fn deduplicate_history_file<P: AsRef<Path>>(
path: P,
max_file_size: usize,
) -> io::Result<String> {
let path = path.as_ref();
let file = if path.exists() {
File::open(path)?
} else {
let status = format!("File not found {:?}", path);
return Err(io::Error::new(io::ErrorKind::Other, status));
};
let mut buf: VecDeque<String> = VecDeque::new();
let reader = BufReader::new(file);
for line in reader.lines() {
match line {
Ok(line) => {
if !line.starts_with('#') {
buf.push_back(line);
}
}
Err(_) => break,
}
}
let org_length = buf.len();
if buf.len() >= max_file_size {
let pop_out = buf.len() - max_file_size;
for _ in 0..pop_out {
buf.pop_front();
}
}
let mut tmp_buffers: Vec<String> = Vec::with_capacity(buf.len());
// Remove duplicates from loaded history if we do not want it.
while let Some(line) = buf.pop_back() {
buf.retain(|buffer| *buffer != line);
tmp_buffers.push(line);
}
while let Some(line) = tmp_buffers.pop() {
buf.push_back(line);
}
if org_length != buf.len() {
// Overwrite the history file with the deduplicated version if it changed.
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in buf.into_iter() {
let _ = file.write_all(&command.as_bytes());
let _ = file.write_all(b"\n");
}
}
Ok("De-duplicated history file.".to_string())
}
/// Set history file name and at the same time load the history.
pub fn set_file_name_and_load_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<u64> {
let path = path.as_ref();
self.file_name = path.to_str().map(|s| s.to_owned());
self.file_size = 0;
if path.exists() {
self.load_history_file(path, false).map(|l| {
self.file_size = l;
l
})
} else {
File::create(path)?;
Ok(0)
}
}
/// Set maximal number of buffers stored in memory
pub fn set_max_buffers_size(&mut self, size: usize) {
self.max_buffers_size = size;
}
/// Set maximal number of entries in history file
pub fn set_max_file_size(&mut self, size: usize) {
self.max_file_size = size;
}
/// Number of items in history.
#[inline(always)]
pub fn len(&self) -> usize {
self.buffers.len()
}
/// Is the history empty
pub fn is_empty(&self) -> bool {
self.buffers.is_empty()
}
/// Add a command to the history buffer and remove the oldest commands when the max history
/// size has been met. If writing to the disk is enabled, this function will be used for
/// logging history to the designated history file.
pub fn push(&mut self, new_item: Buffer) -> io::Result<()> {
// buffers[0] is the oldest entry
// the new entry goes to the end
if !self.append_duplicate_entries
&& self.buffers.back().map(|b| b.to_string()) == Some(new_item.to_string())
{
return Ok(());
}
let item_str = String::from(new_item.clone());
self.buffers.push_back(new_item);
//self.to_max_size();
while self.buffers.len() > self.max_buffers_size {
self.buffers.pop_front();
}
if self.inc_append && self.file_name.is_some() {
if !self.load_duplicates {
// Do not want duplicates so periodically compact the history file.
self.compaction_writes += 1;
// Every 30 writes "compact" the history file by writing just in memory history. This
// is to keep the history file clean and at a reasonable size (not much over max
// history size at it's worst).
if self.compaction_writes > 29 {
if self.share {
// Reload history, we may be out of sync.
let _ = self.load_history(false);
// Commit the duplicated history.
if let Some(file_name) = self.file_name.clone() {
let _ = self.overwrite_history(file_name);
}
} else {
// Not using shared history so just de-dup the file without messing with
// our history.
if let Some(file_name) = self.file_name.clone() {
let _ =
History::deduplicate_history_file(file_name, self.max_file_size);
}
}
self.compaction_writes = 0;
}
} else {
// If allowing duplicates then no need for compaction.
self.compaction_writes = 1;
}
let file_name = self.file_name.clone().unwrap();
if let Ok(inner_file) = std::fs::OpenOptions::new().append(true).open(&file_name) {
// Leave file size alone, if it is not right trigger a reload later.
if self.compaction_writes > 0 {
// If 0 we "compacted" and nothing to write.
let mut file = BufWriter::new(inner_file);
let _ = file.write_all(&item_str.as_bytes());
let _ = file.write_all(b"\n");
// Save the filesize after each append so we do not reload when we do not need to.
self.file_size += item_str.len() as u64 + 1;
}
}
}
Ok(())
}
/// Removes duplicate entries in the history
pub fn remove_duplicates(&mut self, input: &str) {
self.buffers.retain(|buffer| {
let command = buffer.lines().concat();
command != input
});
}
fn get_match<I>(&self, vals: I, search_term: &Buffer) -> Option<usize>
where
I: Iterator<Item = usize>,
{
vals.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.find(|(_i, tested)| tested.starts_with(search_term))
.map(|(i, _)| i)
}
/// Go through the history and try to find an index (newest to oldest) which starts the same
/// as the new buffer given to this function as argument. Starts at curr_position. Does no wrap.
pub fn get_newest_match(
&self,
curr_position: Option<usize>,
new_buff: &Buffer,
) -> Option<usize> {
let pos = curr_position.unwrap_or_else(|| self.buffers.len());
if pos > 0 {
self.get_match((0..pos).rev(), new_buff)
} else {
None
}
}
pub fn get_history_subset(&self, search_term: &Buffer) -> Vec<usize> {
let mut v: Vec<usize> = Vec::new();
let mut ret: Vec<usize> = (0..self.len())
.filter(|i| {
if let Some(tested) = self.buffers.get(*i) {
let starts = tested.starts_with(search_term);
let contains = tested.contains(search_term);
if starts |
contains && !starts && tested != search_term
} else {
false
}
})
.collect();
ret.append(&mut v);
ret
}
pub fn search_index(&self, search_term: &Buffer) -> Vec<usize> {
(0..self.len())
.filter_map(|i| self.buffers.get(i).map(|t| (i, t)))
.filter(|(_i, tested)| tested.contains(search_term))
.map(|(i, _)| i)
.collect()
}
/// Get the history file name.
#[inline(always)]
pub fn file_name(&self) -> Option<&str> {
self.file_name.as_ref().map(|s| s.as_str())
}
fn truncate(&mut self) {
// Find how many lines we need to move backwards
// in the file to remove all the old commands.
if self.buffers.len() >= self.max_file_size {
let pop_out = self.buffers.len() - self.max_file_size;
for _ in 0..pop_out {
self.buffers.pop_front();
}
}
}
fn overwrite_history<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
self.truncate();
let mut file = BufWriter::new(File::create(&path)?);
// Write the commands to the history file.
for command in self.buffers.iter().cloned() {
let _ = file.write_all(&String::from(command).as_bytes());
let _ = file.write_all(b"\n");
}
Ok("Wrote history to file.".to_string())
}
pub fn commit_to_file_path<P: AsRef<Path>>(&mut self, path: P) -> io::Result<String> {
if self.inc_append {
Ok("Nothing to commit.".to_string())
} else {
self.overwrite_history(path)
}
}
pub fn commit_to_file(&mut self) {
if let Some(file_name) = self.file_name.clone() {
let _ = self.commit_to_file_path(file_name);
}
}
}
impl<'a> IntoIterator for &'a History {
type Item = &'a Buffer;
type IntoIter = vec_deque::Iter<'a, Buffer>;
fn into_iter(self) -> Self::IntoIter {
self.buffers.iter()
}
}
impl Index<usize> for History {
type Output = Buffer;
fn index(&self, index: usize) -> &Buffer {
&self.buffers[index]
}
}
impl IndexMut<usize> for History {
fn index_mut(&mut self, index: usize) -> &mut Buffer {
&mut self.buffers[index]
}
}
| {
v.push(*i);
} | conditional_block |
source.py | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """ | from itertools import islice
from rowgenerators.util import md5_file
from .appurl.web.download import Downloader
class RowGenerator(object):
"""Main class for accessing row generators"""
def __init__(self, url, *args, downloader=None, **kwargs):
from .appurl.url import parse_app_url
self._url_text = url
self._downloader = downloader or Downloader.get_instance()
self.url = parse_app_url(self._url_text, *args, downloader=self._downloader, **kwargs)
def registered_urls(self):
"""Return an array of registered Urls. The first row is the header"""
from pkg_resources import iter_entry_points
entries = ['Priority', 'EP Name', 'Module', 'Class']
for ep in iter_entry_points('appurl.urls'):
c = ep.load()
entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])
return entries
def __iter__(self):
"""Yields first the header, then each of the data rows. """
yield from self.url.generator
@property
def iter_dict(self):
"""Iterate over dicts"""
yield from self.url.generator.iter_dict
@property
def iter_row(self):
"""Iterate, yielding row proxy objects. DOes not first yield a header"""
yield from self.url.generator.iter_rp
@property
def generator(self):
"""Return the data generating object"""
return self.url
@property
def headers(self):
"""Return the columns headers"""
return self.generator.headers
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe"""
try:
return self.url.generator.dataframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.dataframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def geoframe(self, *args, **kwargs):
"""Return a Geopandas dataframe"""
try:
return self.url.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.geo_generator.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.generator.geoframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def intuit(self):
"""Return information about the columns, based on guessing data types"""
raise NotImplemented()
def statistics(self):
"""Return summary statistics for the columns"""
raise NotImplemented()
def set_row_processor(self):
"""Register a row processor, which will transform rows as they are iterated"""
raise NotImplemented()
class Source(object):
"""Base class for accessors that generate rows from any source. This is the class returned from
parse_app_url().generator
Subclasses of Source must override at least _get_row_gen method.
"""
priority = 100
def __init__(self, ref, cache=None, working_dir=None, env=None, **kwargs):
self.ref = ref
self.cache = cache
self._meta = {}
@property
def headers(self):
"""Return a list of the names of the columns of this file, or None if the header is not defined.
This should *only* return headers if the headers are unambiguous, such as for database tables,
or shapefiles. For other files, like CSV and Excel, the header row can not be determined without analysis
or specification."""
return None
@headers.setter
def headers(self, v):
"""Catch attempts to set"""
raise NotImplementedError
@property
def columns(self):
""" Returns columns for the file accessed by accessor.
"""
return None
@property
def meta(self):
return self._meta
@property
def hash(self):
with open(self.url.fspath, 'rb') as f:
return md5_file(f)
def __iter__(self):
"""Iterate over all of the lines in the file"""
raise NotImplementedError()
@property
def iter_rp(self):
"""Iterate, yielding row proxy objects rather than rows"""
from .rowproxy import RowProxy
itr = iter(self)
headers = next(itr)
row_proxy = RowProxy(headers)
for row in itr:
yield row_proxy.set_row(row)
@property
def iter_dict(self):
"""Iterate, yielding dicts rather than rows"""
itr = iter(self)
headers = next(itr)
for row in itr:
yield dict(zip(headers, row))
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe from the resource"""
from pandas import DataFrame
# Just normal data, so use the iterator in this object.
headers = next(islice(self, 0, 1))
data = islice(self, 1, None)
return DataFrame(list(data), columns=headers)
def start(self):
pass
def finish(self):
pass
class SelectiveRowGenerator(object):
"""Proxies an iterator to remove headers, comments, blank lines from the row stream.
The header will be emitted first, and comments are available from properties """
def __init__(self, seq, start=0, header_lines=[], comments=[], end=[], load_headers=True, **kwargs):
"""
An iteratable wrapper that coalesces headers and skips comments
:param seq: An iterable
:param start: The start of data row
:param header_lines: An array of row numbers that should be coalesced into the header line, which is yieled first
:param comments: An array of comment row numbers
:param end: The last row number for data
:param kwargs: Ignored. Sucks up extra parameters.
:return:
"""
self.iter = iter(seq)
self.start = start if (start or start == 0) else 1
self.header_lines = header_lines if isinstance(header_lines, (tuple, list)) else [int(e) for e in
header_lines.split(',') if e]
self.comment_lines = comments
self.end = end
self.load_headers = load_headers
self.headers = []
self.comments = []
int(self.start) # Throw error if it is not an int
@property
def coalesce_headers(self):
"""Collects headers that are spread across multiple lines into a single row"""
import re
if not self.headers:
return None
header_lines = [list(hl) for hl in self.headers if bool(hl)]
if len(header_lines) == 0:
return []
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = str(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(str(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [re.sub(r'\s+', ' ', h.strip()) for h in headers]
return headers
def __iter__(self):
row = []
for i, row in enumerate(self.iter):
if i in self.header_lines:
if self.load_headers:
self.headers.append(row)
elif i in self.comment_lines:
self.comments.append(row)
elif i == self.start:
break
if self.headers:
headers = self.coalesce_headers
yield headers
else:
# There is no header, so fake it
headers = ['col' + str(i) for i, _ in enumerate(row)]
yield headers
yield row
for row in self.iter:
yield row
class ReorderRowGenerator(object):
"""A row generator that remaps columns. The row-generator must be in standard format,
with the first row being the header, and all others being data.
The column map maps dest_header->source_header ( it's inverted from what you might expect ). It can be
a dict with dest values in keys and source values in values, or an object with `source' and 'dest' attributes,
or a sequence of dicts with 'source' and 'dest' keys
"""
def __init__(self, row_gen, colmap) -> None:
self.row_gen = row_gen
self.colmap = None
if colmap:
try:
self.colmap = {e.dest: e.source for e in colmap}
except AttributeError:
pass
if colmap and not self.colmap:
try:
self.colmap = {e['dest']: e['source'] for e in colmap}
except (KeyError, TypeError):
pass
if colmap and not self.colmap:
self.colmap = colmap
def __iter__(self):
itr = iter(self.row_gen)
source_headers = next(itr)
indexers = []
if self.colmap:
for dh, sh in self.colmap.items():
try:
i = source_headers.index(sh)
indexers.append(f'row[{i}]')
except ValueError:
indexers.append('None')
code = 'lambda row: ({})'.format(','.join(indexers))
f = eval(code)
yield list(self.colmap.keys())
else:
f = lambda row: row
yield source_headers
for row in itr:
yield f(row) | random_line_split | |
source.py | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """
from itertools import islice
from rowgenerators.util import md5_file
from .appurl.web.download import Downloader
class RowGenerator(object):
"""Main class for accessing row generators"""
def __init__(self, url, *args, downloader=None, **kwargs):
from .appurl.url import parse_app_url
self._url_text = url
self._downloader = downloader or Downloader.get_instance()
self.url = parse_app_url(self._url_text, *args, downloader=self._downloader, **kwargs)
def registered_urls(self):
"""Return an array of registered Urls. The first row is the header"""
from pkg_resources import iter_entry_points
entries = ['Priority', 'EP Name', 'Module', 'Class']
for ep in iter_entry_points('appurl.urls'):
c = ep.load()
entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])
return entries
def __iter__(self):
"""Yields first the header, then each of the data rows. """
yield from self.url.generator
@property
def iter_dict(self):
"""Iterate over dicts"""
yield from self.url.generator.iter_dict
@property
def iter_row(self):
"""Iterate, yielding row proxy objects. DOes not first yield a header"""
yield from self.url.generator.iter_rp
@property
def generator(self):
"""Return the data generating object"""
return self.url
@property
def headers(self):
"""Return the columns headers"""
return self.generator.headers
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe"""
try:
return self.url.generator.dataframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.dataframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def geoframe(self, *args, **kwargs):
"""Return a Geopandas dataframe"""
try:
return self.url.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.geo_generator.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.generator.geoframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def intuit(self):
"""Return information about the columns, based on guessing data types"""
raise NotImplemented()
def statistics(self):
"""Return summary statistics for the columns"""
raise NotImplemented()
def set_row_processor(self):
"""Register a row processor, which will transform rows as they are iterated"""
raise NotImplemented()
class Source(object):
"""Base class for accessors that generate rows from any source. This is the class returned from
parse_app_url().generator
Subclasses of Source must override at least _get_row_gen method.
"""
priority = 100
def __init__(self, ref, cache=None, working_dir=None, env=None, **kwargs):
self.ref = ref
self.cache = cache
self._meta = {}
@property
def headers(self):
"""Return a list of the names of the columns of this file, or None if the header is not defined.
This should *only* return headers if the headers are unambiguous, such as for database tables,
or shapefiles. For other files, like CSV and Excel, the header row can not be determined without analysis
or specification."""
return None
@headers.setter
def headers(self, v):
"""Catch attempts to set"""
raise NotImplementedError
@property
def columns(self):
""" Returns columns for the file accessed by accessor.
"""
return None
@property
def meta(self):
return self._meta
@property
def hash(self):
with open(self.url.fspath, 'rb') as f:
return md5_file(f)
def __iter__(self):
"""Iterate over all of the lines in the file"""
raise NotImplementedError()
@property
def iter_rp(self):
"""Iterate, yielding row proxy objects rather than rows"""
from .rowproxy import RowProxy
itr = iter(self)
headers = next(itr)
row_proxy = RowProxy(headers)
for row in itr:
yield row_proxy.set_row(row)
@property
def iter_dict(self):
"""Iterate, yielding dicts rather than rows"""
itr = iter(self)
headers = next(itr)
for row in itr:
yield dict(zip(headers, row))
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe from the resource"""
from pandas import DataFrame
# Just normal data, so use the iterator in this object.
headers = next(islice(self, 0, 1))
data = islice(self, 1, None)
return DataFrame(list(data), columns=headers)
def start(self):
pass
def finish(self):
pass
class SelectiveRowGenerator(object):
"""Proxies an iterator to remove headers, comments, blank lines from the row stream.
The header will be emitted first, and comments are available from properties """
def __init__(self, seq, start=0, header_lines=[], comments=[], end=[], load_headers=True, **kwargs):
"""
An iteratable wrapper that coalesces headers and skips comments
:param seq: An iterable
:param start: The start of data row
:param header_lines: An array of row numbers that should be coalesced into the header line, which is yieled first
:param comments: An array of comment row numbers
:param end: The last row number for data
:param kwargs: Ignored. Sucks up extra parameters.
:return:
"""
self.iter = iter(seq)
self.start = start if (start or start == 0) else 1
self.header_lines = header_lines if isinstance(header_lines, (tuple, list)) else [int(e) for e in
header_lines.split(',') if e]
self.comment_lines = comments
self.end = end
self.load_headers = load_headers
self.headers = []
self.comments = []
int(self.start) # Throw error if it is not an int
@property
def coalesce_headers(self):
"""Collects headers that are spread across multiple lines into a single row"""
import re
if not self.headers:
return None
header_lines = [list(hl) for hl in self.headers if bool(hl)]
if len(header_lines) == 0:
|
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = str(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(str(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [re.sub(r'\s+', ' ', h.strip()) for h in headers]
return headers
def __iter__(self):
row = []
for i, row in enumerate(self.iter):
if i in self.header_lines:
if self.load_headers:
self.headers.append(row)
elif i in self.comment_lines:
self.comments.append(row)
elif i == self.start:
break
if self.headers:
headers = self.coalesce_headers
yield headers
else:
# There is no header, so fake it
headers = ['col' + str(i) for i, _ in enumerate(row)]
yield headers
yield row
for row in self.iter:
yield row
class ReorderRowGenerator(object):
"""A row generator that remaps columns. The row-generator must be in standard format,
with the first row being the header, and all others being data.
The column map maps dest_header->source_header ( it's inverted from what you might expect ). It can be
a dict with dest values in keys and source values in values, or an object with `source' and 'dest' attributes,
or a sequence of dicts with 'source' and 'dest' keys
"""
def __init__(self, row_gen, colmap) -> None:
self.row_gen = row_gen
self.colmap = None
if colmap:
try:
self.colmap = {e.dest: e.source for e in colmap}
except AttributeError:
pass
if colmap and not self.colmap:
try:
self.colmap = {e['dest']: e['source'] for e in colmap}
except (KeyError, TypeError):
pass
if colmap and not self.colmap:
self.colmap = colmap
def __iter__(self):
itr = iter(self.row_gen)
source_headers = next(itr)
indexers = []
if self.colmap:
for dh, sh in self.colmap.items():
try:
i = source_headers.index(sh)
indexers.append(f'row[{i}]')
except ValueError:
indexers.append('None')
code = 'lambda row: ({})'.format(','.join(indexers))
f = eval(code)
yield list(self.colmap.keys())
else:
f = lambda row: row
yield source_headers
for row in itr:
yield f(row)
| return [] | conditional_block |
source.py | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """
from itertools import islice
from rowgenerators.util import md5_file
from .appurl.web.download import Downloader
class RowGenerator(object):
"""Main class for accessing row generators"""
def __init__(self, url, *args, downloader=None, **kwargs):
from .appurl.url import parse_app_url
self._url_text = url
self._downloader = downloader or Downloader.get_instance()
self.url = parse_app_url(self._url_text, *args, downloader=self._downloader, **kwargs)
def registered_urls(self):
"""Return an array of registered Urls. The first row is the header"""
from pkg_resources import iter_entry_points
entries = ['Priority', 'EP Name', 'Module', 'Class']
for ep in iter_entry_points('appurl.urls'):
c = ep.load()
entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])
return entries
def __iter__(self):
"""Yields first the header, then each of the data rows. """
yield from self.url.generator
@property
def iter_dict(self):
"""Iterate over dicts"""
yield from self.url.generator.iter_dict
@property
def iter_row(self):
"""Iterate, yielding row proxy objects. DOes not first yield a header"""
yield from self.url.generator.iter_rp
@property
def generator(self):
"""Return the data generating object"""
return self.url
@property
def headers(self):
"""Return the columns headers"""
return self.generator.headers
def dataframe(self, *args, **kwargs):
|
def geoframe(self, *args, **kwargs):
"""Return a Geopandas dataframe"""
try:
return self.url.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.geo_generator.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.generator.geoframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def intuit(self):
"""Return information about the columns, based on guessing data types"""
raise NotImplemented()
def statistics(self):
"""Return summary statistics for the columns"""
raise NotImplemented()
def set_row_processor(self):
"""Register a row processor, which will transform rows as they are iterated"""
raise NotImplemented()
class Source(object):
"""Base class for accessors that generate rows from any source. This is the class returned from
parse_app_url().generator
Subclasses of Source must override at least _get_row_gen method.
"""
priority = 100
def __init__(self, ref, cache=None, working_dir=None, env=None, **kwargs):
self.ref = ref
self.cache = cache
self._meta = {}
@property
def headers(self):
"""Return a list of the names of the columns of this file, or None if the header is not defined.
This should *only* return headers if the headers are unambiguous, such as for database tables,
or shapefiles. For other files, like CSV and Excel, the header row can not be determined without analysis
or specification."""
return None
@headers.setter
def headers(self, v):
"""Catch attempts to set"""
raise NotImplementedError
@property
def columns(self):
""" Returns columns for the file accessed by accessor.
"""
return None
@property
def meta(self):
return self._meta
@property
def hash(self):
with open(self.url.fspath, 'rb') as f:
return md5_file(f)
def __iter__(self):
"""Iterate over all of the lines in the file"""
raise NotImplementedError()
@property
def iter_rp(self):
"""Iterate, yielding row proxy objects rather than rows"""
from .rowproxy import RowProxy
itr = iter(self)
headers = next(itr)
row_proxy = RowProxy(headers)
for row in itr:
yield row_proxy.set_row(row)
@property
def iter_dict(self):
"""Iterate, yielding dicts rather than rows"""
itr = iter(self)
headers = next(itr)
for row in itr:
yield dict(zip(headers, row))
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe from the resource"""
from pandas import DataFrame
# Just normal data, so use the iterator in this object.
headers = next(islice(self, 0, 1))
data = islice(self, 1, None)
return DataFrame(list(data), columns=headers)
def start(self):
pass
def finish(self):
pass
class SelectiveRowGenerator(object):
"""Proxies an iterator to remove headers, comments, blank lines from the row stream.
The header will be emitted first, and comments are available from properties """
def __init__(self, seq, start=0, header_lines=[], comments=[], end=[], load_headers=True, **kwargs):
"""
An iteratable wrapper that coalesces headers and skips comments
:param seq: An iterable
:param start: The start of data row
:param header_lines: An array of row numbers that should be coalesced into the header line, which is yieled first
:param comments: An array of comment row numbers
:param end: The last row number for data
:param kwargs: Ignored. Sucks up extra parameters.
:return:
"""
self.iter = iter(seq)
self.start = start if (start or start == 0) else 1
self.header_lines = header_lines if isinstance(header_lines, (tuple, list)) else [int(e) for e in
header_lines.split(',') if e]
self.comment_lines = comments
self.end = end
self.load_headers = load_headers
self.headers = []
self.comments = []
int(self.start) # Throw error if it is not an int
@property
def coalesce_headers(self):
"""Collects headers that are spread across multiple lines into a single row"""
import re
if not self.headers:
return None
header_lines = [list(hl) for hl in self.headers if bool(hl)]
if len(header_lines) == 0:
return []
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = str(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(str(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [re.sub(r'\s+', ' ', h.strip()) for h in headers]
return headers
def __iter__(self):
row = []
for i, row in enumerate(self.iter):
if i in self.header_lines:
if self.load_headers:
self.headers.append(row)
elif i in self.comment_lines:
self.comments.append(row)
elif i == self.start:
break
if self.headers:
headers = self.coalesce_headers
yield headers
else:
# There is no header, so fake it
headers = ['col' + str(i) for i, _ in enumerate(row)]
yield headers
yield row
for row in self.iter:
yield row
class ReorderRowGenerator(object):
"""A row generator that remaps columns. The row-generator must be in standard format,
with the first row being the header, and all others being data.
The column map maps dest_header->source_header ( it's inverted from what you might expect ). It can be
a dict with dest values in keys and source values in values, or an object with `source' and 'dest' attributes,
or a sequence of dicts with 'source' and 'dest' keys
"""
def __init__(self, row_gen, colmap) -> None:
self.row_gen = row_gen
self.colmap = None
if colmap:
try:
self.colmap = {e.dest: e.source for e in colmap}
except AttributeError:
pass
if colmap and not self.colmap:
try:
self.colmap = {e['dest']: e['source'] for e in colmap}
except (KeyError, TypeError):
pass
if colmap and not self.colmap:
self.colmap = colmap
def __iter__(self):
itr = iter(self.row_gen)
source_headers = next(itr)
indexers = []
if self.colmap:
for dh, sh in self.colmap.items():
try:
i = source_headers.index(sh)
indexers.append(f'row[{i}]')
except ValueError:
indexers.append('None')
code = 'lambda row: ({})'.format(','.join(indexers))
f = eval(code)
yield list(self.colmap.keys())
else:
f = lambda row: row
yield source_headers
for row in itr:
yield f(row)
| """Return a pandas dataframe"""
try:
return self.url.generator.dataframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.dataframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url))) | identifier_body |
source.py | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """
from itertools import islice
from rowgenerators.util import md5_file
from .appurl.web.download import Downloader
class RowGenerator(object):
"""Main class for accessing row generators"""
def __init__(self, url, *args, downloader=None, **kwargs):
from .appurl.url import parse_app_url
self._url_text = url
self._downloader = downloader or Downloader.get_instance()
self.url = parse_app_url(self._url_text, *args, downloader=self._downloader, **kwargs)
def registered_urls(self):
"""Return an array of registered Urls. The first row is the header"""
from pkg_resources import iter_entry_points
entries = ['Priority', 'EP Name', 'Module', 'Class']
for ep in iter_entry_points('appurl.urls'):
c = ep.load()
entries.append([c.match_priority, ep.name, ep.module_name, c.__name__, ])
return entries
def __iter__(self):
"""Yields first the header, then each of the data rows. """
yield from self.url.generator
@property
def iter_dict(self):
"""Iterate over dicts"""
yield from self.url.generator.iter_dict
@property
def iter_row(self):
"""Iterate, yielding row proxy objects. DOes not first yield a header"""
yield from self.url.generator.iter_rp
@property
def generator(self):
"""Return the data generating object"""
return self.url
@property
def headers(self):
"""Return the columns headers"""
return self.generator.headers
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe"""
try:
return self.url.generator.dataframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.dataframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def geoframe(self, *args, **kwargs):
"""Return a Geopandas dataframe"""
try:
return self.url.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.geo_generator.geoframe(*args, **kwargs)
except AttributeError:
pass
try:
return self.url.generator.geoframe(*args, **kwargs)
except AttributeError:
pass
raise NotImplementedError("Url '{}' of type '{}' can't generate a dataframe ".format(self.url, type(self.url)))
def intuit(self):
"""Return information about the columns, based on guessing data types"""
raise NotImplemented()
def statistics(self):
"""Return summary statistics for the columns"""
raise NotImplemented()
def set_row_processor(self):
"""Register a row processor, which will transform rows as they are iterated"""
raise NotImplemented()
class Source(object):
"""Base class for accessors that generate rows from any source. This is the class returned from
parse_app_url().generator
Subclasses of Source must override at least _get_row_gen method.
"""
priority = 100
def __init__(self, ref, cache=None, working_dir=None, env=None, **kwargs):
self.ref = ref
self.cache = cache
self._meta = {}
@property
def headers(self):
"""Return a list of the names of the columns of this file, or None if the header is not defined.
This should *only* return headers if the headers are unambiguous, such as for database tables,
or shapefiles. For other files, like CSV and Excel, the header row can not be determined without analysis
or specification."""
return None
@headers.setter
def headers(self, v):
"""Catch attempts to set"""
raise NotImplementedError
@property
def columns(self):
""" Returns columns for the file accessed by accessor.
"""
return None
@property
def meta(self):
return self._meta
@property
def hash(self):
with open(self.url.fspath, 'rb') as f:
return md5_file(f)
def __iter__(self):
"""Iterate over all of the lines in the file"""
raise NotImplementedError()
@property
def iter_rp(self):
"""Iterate, yielding row proxy objects rather than rows"""
from .rowproxy import RowProxy
itr = iter(self)
headers = next(itr)
row_proxy = RowProxy(headers)
for row in itr:
yield row_proxy.set_row(row)
@property
def iter_dict(self):
"""Iterate, yielding dicts rather than rows"""
itr = iter(self)
headers = next(itr)
for row in itr:
yield dict(zip(headers, row))
def dataframe(self, *args, **kwargs):
"""Return a pandas dataframe from the resource"""
from pandas import DataFrame
# Just normal data, so use the iterator in this object.
headers = next(islice(self, 0, 1))
data = islice(self, 1, None)
return DataFrame(list(data), columns=headers)
def start(self):
pass
def finish(self):
pass
class | (object):
"""Proxies an iterator to remove headers, comments, blank lines from the row stream.
The header will be emitted first, and comments are available from properties """
def __init__(self, seq, start=0, header_lines=[], comments=[], end=[], load_headers=True, **kwargs):
"""
An iteratable wrapper that coalesces headers and skips comments
:param seq: An iterable
:param start: The start of data row
:param header_lines: An array of row numbers that should be coalesced into the header line, which is yieled first
:param comments: An array of comment row numbers
:param end: The last row number for data
:param kwargs: Ignored. Sucks up extra parameters.
:return:
"""
self.iter = iter(seq)
self.start = start if (start or start == 0) else 1
self.header_lines = header_lines if isinstance(header_lines, (tuple, list)) else [int(e) for e in
header_lines.split(',') if e]
self.comment_lines = comments
self.end = end
self.load_headers = load_headers
self.headers = []
self.comments = []
int(self.start) # Throw error if it is not an int
@property
def coalesce_headers(self):
"""Collects headers that are spread across multiple lines into a single row"""
import re
if not self.headers:
return None
header_lines = [list(hl) for hl in self.headers if bool(hl)]
if len(header_lines) == 0:
return []
if len(header_lines) == 1:
return header_lines[0]
# If there are gaps in the values of a line, copy them forward, so there
# is some value in every position
for hl in header_lines:
last = None
for i in range(len(hl)):
hli = str(hl[i])
if not hli.strip():
hl[i] = last
else:
last = hli
headers = [' '.join(str(col_val).strip() if col_val else '' for col_val in col_set)
for col_set in zip(*header_lines)]
headers = [re.sub(r'\s+', ' ', h.strip()) for h in headers]
return headers
def __iter__(self):
row = []
for i, row in enumerate(self.iter):
if i in self.header_lines:
if self.load_headers:
self.headers.append(row)
elif i in self.comment_lines:
self.comments.append(row)
elif i == self.start:
break
if self.headers:
headers = self.coalesce_headers
yield headers
else:
# There is no header, so fake it
headers = ['col' + str(i) for i, _ in enumerate(row)]
yield headers
yield row
for row in self.iter:
yield row
class ReorderRowGenerator(object):
"""A row generator that remaps columns. The row-generator must be in standard format,
with the first row being the header, and all others being data.
The column map maps dest_header->source_header ( it's inverted from what you might expect ). It can be
a dict with dest values in keys and source values in values, or an object with `source' and 'dest' attributes,
or a sequence of dicts with 'source' and 'dest' keys
"""
def __init__(self, row_gen, colmap) -> None:
self.row_gen = row_gen
self.colmap = None
if colmap:
try:
self.colmap = {e.dest: e.source for e in colmap}
except AttributeError:
pass
if colmap and not self.colmap:
try:
self.colmap = {e['dest']: e['source'] for e in colmap}
except (KeyError, TypeError):
pass
if colmap and not self.colmap:
self.colmap = colmap
def __iter__(self):
itr = iter(self.row_gen)
source_headers = next(itr)
indexers = []
if self.colmap:
for dh, sh in self.colmap.items():
try:
i = source_headers.index(sh)
indexers.append(f'row[{i}]')
except ValueError:
indexers.append('None')
code = 'lambda row: ({})'.format(','.join(indexers))
f = eval(code)
yield list(self.colmap.keys())
else:
f = lambda row: row
yield source_headers
for row in itr:
yield f(row)
| SelectiveRowGenerator | identifier_name |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString + ?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn new_window_id(&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else |
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType + ?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _);
}
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1 .0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1 .0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type & !0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T: ?Sized>(pub *mut T);
impl<T: ?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.0 } } }
impl<T: ?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T: ?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T: ?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
}
| { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } } | conditional_block |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString + ?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn new_window_id(&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } }
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType + ?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32) | }
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1 .0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1 .0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type & !0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T: ?Sized>(pub *mut T);
impl<T: ?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.0 } } }
impl<T: ?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T: ?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T: ?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
} | {
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _); | random_line_split |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString + ?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn new_window_id(&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } }
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType + ?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _);
}
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1 .0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1 .0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type & !0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T: ?Sized>(pub *mut T);
impl<T: ?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T | }
impl<T: ?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T: ?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T: ?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
}
| { unsafe { &*self.0 } } | identifier_body |
rxcb.rs | //! Objective XCB Wrapper
#![allow(dead_code)]
extern crate univstring; use self::univstring::UnivString;
extern crate xcb;
use self::xcb::ffi::*;
use std::ptr::{null, null_mut};
use std::marker::PhantomData;
use std::io::{Error as IOError, ErrorKind};
#[repr(C)] pub enum WindowIOClass
{
InputOnly = XCB_WINDOW_CLASS_INPUT_ONLY as _,
InputOutput = XCB_WINDOW_CLASS_INPUT_OUTPUT as _,
FromParent = XCB_WINDOW_CLASS_COPY_FROM_PARENT as _
}
pub struct Connection(*mut xcb_connection_t);
impl Connection
{
pub fn new<S: UnivString + ?Sized>(display: Option<&S>) -> Option<Self>
{
let display_name = display.map(|s| s.to_cstr().unwrap());
let p = unsafe
{
xcb_connect(display_name.as_ref().map(|p| p.as_ptr()).unwrap_or(null()), null_mut())
};
if p.is_null() { None } else { Some(Connection(p)) }
}
#[cfg(feature = "with_ferrite")]
pub(crate) fn inner(&self) -> *mut xcb_connection_t { self.0 }
pub fn setup(&self) -> &Setup { unsafe { &*(xcb_get_setup(self.0) as *mut _) } }
pub fn new_id(&self) -> u32 { unsafe { xcb_generate_id(self.0) } }
pub fn | (&self) -> Window { Window(self.new_id()) }
/*pub fn try_intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 0, name.len() as _, name.as_ptr()) }, self)
}*/
pub fn intern(&self, name: &str) -> AtomCookie
{
AtomCookie(unsafe { xcb_intern_atom(self.0, 1, name.len() as _, name.as_ptr() as _) }, self)
}
pub fn flush(&self) { unsafe { xcb_flush(self.0); } }
pub fn create_window(&self, depth: Option<u8>, id: &Window, parent: Option<xcb_window_t>,
x: i16, y: i16, width: u16, height: u16, border_width: u16, class: WindowIOClass,
visual: Option<VisualID>, valuelist: &WindowValueList) -> Result<(), GenericError>
{
let serialized = valuelist.serialize();
unsafe
{
CheckedCookie(xcb_create_window_checked(self.0, depth.unwrap_or(XCB_COPY_FROM_PARENT as _), id.0,
parent.unwrap_or_else(|| self.setup().iter_roots().next().unwrap().root()),
x, y, width, height, border_width, class as _, visual.unwrap_or(XCB_COPY_FROM_PARENT as _),
valuelist.0, serialized.0 as *const _), self).check()
}
}
pub fn map_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_map_window_checked(self.0, w.0), self).check() }
}
pub fn destroy_window(&self, w: &Window) -> Result<(), GenericError>
{
unsafe { CheckedCookie(xcb_destroy_window_checked(self.0, w.0), self).check() }
}
}
impl Drop for Connection { fn drop(&mut self) { unsafe { xcb_disconnect(self.0) } } }
pub struct Setup(xcb_setup_t);
impl Setup
{
pub fn iter_roots(&self) -> IterRootScreen { IterRootScreen(unsafe { xcb_setup_roots_iterator(&self.0) }) }
}
#[repr(C)] pub struct Screen(xcb_screen_t);
impl Screen
{
pub fn root(&self) -> xcb_window_t { self.0.root }
// pub fn default_colormap(&self) -> xcb_colormap_t { self.0.default_colormap }
}
pub struct IterRootScreen<'s>(xcb_screen_iterator_t<'s>);
impl<'s> Iterator for IterRootScreen<'s>
{
type Item = &'s Screen;
fn next(&mut self) -> Option<&'s Screen>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_screen_next(&mut self.0); Some(&*p) } }
}
}
pub type WindowID = xcb_window_t;
pub struct Window(WindowID);
impl Window
{
pub(crate) fn id(&self) -> WindowID { self.0 }
pub fn replace_property<T: PropertyType + ?Sized>(&self, con: &Connection, property: Atom, value: &T)
{
value.change_property_of(con, self, property, XCB_PROP_MODE_REPLACE)
}
}
pub trait PropertyType
{
const TYPE_ATOM: Atom; const DATA_STRIDE: u32;
fn change_property_of(&self, connection: &Connection, window: &Window, property: Atom, mode: u32);
}
impl PropertyType for str
{
const TYPE_ATOM: Atom = XCB_ATOM_STRING; const DATA_STRIDE: u32 = 8;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_STRING, 8,
self.len() as _, self.as_ptr() as _);
}
}
}
impl PropertyType for Atom
{
const TYPE_ATOM: Atom = XCB_ATOM_ATOM; const DATA_STRIDE: u32 = 32;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, XCB_ATOM_ATOM, 32, 1,
self as *const Atom as *const _);
}
}
}
impl<E: PropertyType> PropertyType for [E]
{
const TYPE_ATOM: Atom = E::TYPE_ATOM; const DATA_STRIDE: u32 = E::DATA_STRIDE;
fn change_property_of(&self, con: &Connection, window: &Window, props: Atom, mode: u32)
{
unsafe
{
xcb_change_property(con.0, mode as _, window.0, props, E::TYPE_ATOM, E::DATA_STRIDE as _,
self.len() as _, self.as_ptr() as _);
}
}
}
pub use self::xcb::ffi::XCB_ATOM_WM_NAME;
pub struct CheckedCookie<'s>(xcb_void_cookie_t, &'s Connection);
impl<'s> CheckedCookie<'s>
{
pub fn check(&self) -> Result<(), GenericError>
{
let r = unsafe { xcb_request_check(self.1 .0, self.0) };
if r.is_null() { Ok(()) } else { Err(unsafe { GenericError::from_ptr(r) }) }
}
}
pub struct AtomCookie<'s>(xcb_intern_atom_cookie_t, &'s Connection);
pub type Atom = xcb_atom_t;
impl<'s> AtomCookie<'s>
{
pub fn reply(self) -> Result<Atom, GenericError>
{
let mut _eptr = null_mut();
let r = unsafe { xcb_intern_atom_reply(self.1 .0, self.0, &mut _eptr) };
if r.is_null() { Err(unsafe { GenericError::from_ptr(_eptr) }) } else { Ok(MallocBox(r).atom) }
}
}
use std::mem::transmute;
pub struct GenericEvent(MallocBox<xcb_generic_event_t>);
impl Connection
{
pub fn wait_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_wait_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
pub fn poll_event(&self) -> Option<GenericEvent>
{
let p = unsafe { xcb_poll_for_event(self.0) };
if p.is_null() { None } else { Some(GenericEvent(MallocBox(p))) }
}
}
impl GenericEvent
{
pub fn response_type(&self) -> u8 { self.0.response_type & !0x80 }
}
pub struct ClientMessageEvent(MallocBox<xcb_client_message_event_t>);
impl ClientMessageEvent
{
pub fn msg_type(&self) -> xcb_atom_t { self.0.type_ }
pub fn data_as_u32(&self) -> u32 { unsafe { *(self.0.data.data.as_ptr() as *const u32) } }
}
pub struct ExposeEvent(MallocBox<xcb_expose_event_t>);
pub struct GenericError(MallocBox<xcb_generic_error_t>);
impl GenericError
{
unsafe fn from_ptr(p: *mut xcb_generic_error_t) -> Self { GenericError(MallocBox(p)) }
}
impl Debug for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { write!(fmt, "GenericError(code={})", (*self.0).error_code) }
}
impl Display for GenericError
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <Self as Debug>::fmt(self, fmt) }
}
impl From<GenericError> for IOError
{
fn from(v: GenericError) -> IOError { IOError::new(ErrorKind::Other, Box::new(v)) }
}
impl ::std::error::Error for GenericError
{
fn description(&self) -> &str { "XCB Generic Error" }
fn cause(&self) -> Option<&::std::error::Error> { None }
}
unsafe impl Send for GenericError {}
unsafe impl Sync for GenericError {}
pub trait Event
{
const RESPONSE_ENUM: u8;
unsafe fn from_ref(g: &GenericEvent) -> &Self;
}
impl Event for ClientMessageEvent
{
const RESPONSE_ENUM: u8 = XCB_CLIENT_MESSAGE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for ExposeEvent
{
const RESPONSE_ENUM: u8 = XCB_EXPOSE;
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
impl Event for GenericError
{
const RESPONSE_ENUM: u8 = 0; // unused
unsafe fn from_ref(g: &GenericEvent) -> &Self { transmute(g) }
}
#[repr(C)] pub struct Depth(xcb_depth_t);
impl Depth
{
pub fn depth(&self) -> u8 { self.0.depth }
}
pub struct IterDepths<'c>(xcb_depth_iterator_t<'c>);
impl<'c> Iterator for IterDepths<'c>
{
type Item = &'c Depth;
fn next(&mut self) -> Option<&'c Depth>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_depth_next(&mut self.0); Some(&*p) } }
}
fn size_hint(&self) -> (usize, Option<usize>) { (self.0.rem as _, Some(self.0.rem as _)) }
}
impl Screen
{
pub fn iter_allowed_depths(&self) -> IterDepths { IterDepths(unsafe { xcb_screen_allowed_depths_iterator(&self.0) }) }
}
pub type VisualID = xcb_visualid_t;
#[repr(C)] pub struct VisualType(xcb_visualtype_t);
impl VisualType
{
pub fn id(&self) -> VisualID { self.0.visual_id }
pub fn is_truecolor(&self) -> bool { self.0.class == XCB_VISUAL_CLASS_TRUE_COLOR as _ }
}
pub struct IterVisualTypes<'c>(xcb_visualtype_iterator_t, PhantomData<&'c Connection>);
impl<'c> Iterator for IterVisualTypes<'c>
{
type Item = &'c VisualType;
fn next(&mut self) -> Option<&'c VisualType>
{
if self.0.rem <= 0 { None }
else { let p = self.0.data as *mut _; unsafe { xcb_visualtype_next(&mut self.0); Some(&*p) } }
}
}
impl Depth
{
pub fn iter_visuals(&self) -> IterVisualTypes
{
IterVisualTypes(unsafe { xcb_depth_visuals_iterator(&self.0) }, PhantomData)
}
}
#[allow(non_camel_case_types)]
pub type xcb_bool32_t = u32;
#[repr(C)] #[allow(non_camel_case_types)]
pub struct xcb_create_window_value_list_t
{
pub background_pixmap: xcb_pixmap_t, pub background_pixel: u32,
pub border_pixmap: xcb_pixmap_t, pub border_pixel: u32,
pub bit_gravity: u32, pub win_gravity: u32, pub backing_store: u32, pub backing_planes: u32, pub backing_pixel: u32,
pub override_redirect: xcb_bool32_t, pub save_under: xcb_bool32_t, pub event_mask: u32,
pub do_not_propagate_mask: u32, pub colormap: xcb_colormap_t, pub cursor: xcb_cursor_t
}
extern "C"
{
fn xcb_create_window_value_list_serialize(buffer: *mut *mut ::libc::c_void, value_mask: u32,
aux: *const xcb_create_window_value_list_t) -> ::libc::c_int;
}
#[repr(C)]
pub struct WindowValueList(u32, xcb_create_window_value_list_t);
impl WindowValueList
{
pub fn new() -> Self { WindowValueList(0, unsafe { ::std::mem::zeroed() }) }
pub fn border_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BORDER_PIXEL; self.1.border_pixel = p; self
}
pub fn back_pixel(&mut self, p: u32) -> &mut Self
{
self.0 |= XCB_CW_BACK_PIXEL; self.1.background_pixel = p; self
}
pub fn colormap(&mut self, c: &Colormap) -> &mut Self
{
self.0 |= XCB_CW_COLORMAP; self.1.colormap = c.id(); self
}
pub fn eventmask(&mut self, m: xcb_event_mask_t) -> &mut Self
{
self.0 |= XCB_CW_EVENT_MASK; self.1.event_mask = m; self
}
pub fn serialize(&self) -> MallocBox<::libc::c_void>
{
let mut p = null_mut();
unsafe { xcb_create_window_value_list_serialize(&mut p, self.0, &self.1) };
MallocBox(p)
}
}
pub struct Colormap(xcb_colormap_t);
impl Colormap
{
pub fn new(con: &Connection, visual: VisualID, window: xcb_window_t) -> Self
{
let id = con.new_id();
unsafe { xcb_create_colormap(con.0, XCB_COLORMAP_ALLOC_NONE as _, id, window, visual) }; Colormap(id)
}
pub fn id(&self) -> xcb_colormap_t { self.0 }
}
pub use self::xcb::ffi::{
XCB_EVENT_MASK_EXPOSURE
};
use std::ops::{Deref, DerefMut};
use std::fmt::{Debug, Display, Formatter, Result as FmtResult};
/// Owned malloc-ed pointer box
pub struct MallocBox<T: ?Sized>(pub *mut T);
impl<T: ?Sized> Deref for MallocBox<T> { type Target = T; fn deref(&self) -> &T { unsafe { &*self.0 } } }
impl<T: ?Sized> DerefMut for MallocBox<T> { fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.0 } } }
impl<T: ?Sized> Drop for MallocBox<T>
{
fn drop(&mut self) { unsafe { ::libc::free(self.0 as *mut _) } }
}
impl<T: ?Sized> Debug for MallocBox<T> where T: Debug
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult { <T as Debug>::fmt(&self, fmt) }
}
| new_window_id | identifier_name |
workflow.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cadence
import (
"errors"
"fmt"
"time"
"go.uber.org/cadence/common"
"go.uber.org/zap"
)
var (
errActivityParamsBadRequest = errors.New("missing activity parameters through context, check ActivityOptions")
errWorkflowOptionBadRequest = errors.New("missing workflow options through context, check WorkflowOptions")
)
type (
// Channel must be used instead of native go channel by workflow code.
// Use Context.NewChannel method to create an instance.
Channel interface {
// Blocks until it gets a value. when it gets a value assigns to the provided pointer.
// Example:
// var v string
// c.Receive(ctx, &v)
Receive(ctx Context, valuePtr interface{}) (more bool) // more is false when channel is closed
ReceiveAsync(valuePtr interface{}) (ok bool) // ok is true when value was returned
ReceiveAsyncWithMoreFlag(valuePtr interface{}) (ok bool, more bool) // ok is true when value was returned, more is false when channel is closed
Send(ctx Context, v interface{})
SendAsync(v interface{}) (ok bool) // ok when value was sent
Close() // prohibit sends
}
// Selector must be used instead of native go select by workflow code
// Use Context.NewSelector method to create an instance.
Selector interface {
AddReceive(c Channel, f func(c Channel, more bool)) Selector
AddSend(c Channel, v interface{}, f func()) Selector
AddFuture(future Future, f func(f Future)) Selector
AddDefault(f func())
Select(ctx Context)
}
// Future represents the result of an asynchronous computation.
Future interface {
// Get blocks until the future is ready. When ready it either returns non nil error
// or assigns result value to the provided pointer.
// Example:
// var v string
// if err := f.Get(ctx, &v); err != nil {
// return err
// }
// fmt.Printf("Value=%v", v)
Get(ctx Context, valuePtr interface{}) error
// When true Get is guaranteed to not block
IsReady() bool
}
// Settable is used to set value or error on a future.
// See NewFuture function.
Settable interface {
Set(value interface{}, err error)
SetValue(value interface{})
SetError(err error)
Chain(future Future) // Value (or error) of the future become the same of the chained one.
}
// ChildWorkflowFuture represents the result of a child workflow execution
ChildWorkflowFuture interface {
Future
// GetChildWorkflowExecution returns a future that will be ready when child workflow execution started. You can
// get the WorkflowExecution of the child workflow from the future. Then you can use Workflow ID and RunID of
// child workflow to cancel or send signal to child workflow.
GetChildWorkflowExecution() Future
}
// WorkflowType identifies a workflow type.
WorkflowType struct {
Name string
}
// WorkflowExecution Details.
WorkflowExecution struct {
ID string
RunID string
}
// EncodedValue is type alias used to encapsulate/extract encoded result from workflow/activity.
EncodedValue []byte
// ChildWorkflowOptions stores all child workflow specific parameters that will be stored inside of a Context.
ChildWorkflowOptions struct {
// Domain of the child workflow.
// Optional: the current workflow (parent)'s domain will be used if this is not provided.
Domain string
// WorkflowID of the child workflow to be scheduled.
// Optional: an auto generated workflowID will be used if this is not provided.
WorkflowID string
// TaskList that the child workflow needs to be scheduled on.
// Optional: the parent workflow task list will be used if this is not provided.
TaskList string
// ExecutionStartToCloseTimeout - The end to end timeout for the child workflow execution.
// Mandatory: no default
ExecutionStartToCloseTimeout time.Duration
// TaskStartToCloseTimeout - The decision task timeout for the child workflow.
// Optional: default is 10s if this is not provided (or if 0 is provided).
TaskStartToCloseTimeout time.Duration
// ChildPolicy defines the behavior of child workflow when parent workflow is terminated.
// Optional: default to use ChildWorkflowPolicyTerminate if this is not provided
ChildPolicy ChildWorkflowPolicy
// WaitForCancellation - Whether to wait for cancelled child workflow to be ended (child workflow can be ended
// as: completed/failed/timedout/terminated/canceled)
// Optional: default false
WaitForCancellation bool
}
// ChildWorkflowPolicy defines child workflow behavior when parent workflow is terminated.
ChildWorkflowPolicy int32
)
const (
// ChildWorkflowPolicyTerminate is policy that will terminate all child workflows when parent workflow is terminated.
ChildWorkflowPolicyTerminate ChildWorkflowPolicy = 0
// ChildWorkflowPolicyRequestCancel is policy that will send cancel request to all open child workflows when parent
// workflow is terminated.
ChildWorkflowPolicyRequestCancel ChildWorkflowPolicy = 1
// ChildWorkflowPolicyAbandon is policy that will have no impact to child workflow execution when parent workflow is
// terminated.
ChildWorkflowPolicyAbandon ChildWorkflowPolicy = 2
)
// RegisterWorkflow - registers a workflow function with the framework.
// A workflow takes a cadence context and input and returns a (result, error) or just error.
// Examples:
// func sampleWorkflow(ctx cadence.Context, input []byte) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int, arg2 string) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int) (result string, err error)
// Serialization of all primitive types, structures is supported ... except channels, functions, variadic, unsafe pointer.
// This method calls panic if workflowFunc doesn't comply with the expected format.
func RegisterWorkflow(workflowFunc interface{}) {
thImpl := getHostEnvironment()
err := thImpl.RegisterWorkflow(workflowFunc)
if err != nil {
panic(err)
}
}
// NewChannel create new Channel instance
func NewChannel(ctx Context) Channel {
state := getState(ctx)
state.dispatcher.channelSequence++
return NewNamedChannel(ctx, fmt.Sprintf("chan-%v", state.dispatcher.channelSequence))
}
// NewNamedChannel create new Channel instance with a given human readable name.
// Name appears in stack traces that are blocked on this channel.
func NewNamedChannel(ctx Context, name string) Channel {
return &channelImpl{name: name}
}
// NewBufferedChannel create new buffered Channel instance
func NewBufferedChannel(ctx Context, size int) Channel {
return &channelImpl{size: size}
}
// NewNamedBufferedChannel create new BufferedChannel instance with a given human readable name.
// Name appears in stack traces that are blocked on this Channel.
func NewNamedBufferedChannel(ctx Context, name string, size int) Channel {
return &channelImpl{name: name, size: size}
}
// NewSelector creates a new Selector instance.
func NewSelector(ctx Context) Selector {
state := getState(ctx)
state.dispatcher.selectorSequence++
return NewNamedSelector(ctx, fmt.Sprintf("selector-%v", state.dispatcher.selectorSequence))
}
// NewNamedSelector creates a new Selector instance with a given human readable name.
// Name appears in stack traces that are blocked on this Selector.
func NewNamedSelector(ctx Context, name string) Selector {
return &selectorImpl{name: name}
}
// Go creates a new coroutine. It has similar semantic to goroutine in a context of the workflow.
func Go(ctx Context, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newCoroutine(ctx, f)
}
// GoNamed creates a new coroutine with a given human readable name.
// It has similar semantic to goroutine in a context of the workflow. | }
// NewFuture creates a new future as well as associated Settable that is used to set its value.
func NewFuture(ctx Context) (Future, Settable) {
impl := &futureImpl{channel: NewChannel(ctx).(*channelImpl)}
return impl, impl
}
// ExecuteActivity requests activity execution in the context of a workflow.
// - Context can be used to pass the settings for this activity.
// For example: task list that this need to be routed, timeouts that need to be configured.
// Use ActivityOptions to pass down the options.
// ao := ActivityOptions{
// TaskList: "exampleTaskList",
// ScheduleToStartTimeout: 10 * time.Second,
// StartToCloseTimeout: 5 * time.Second,
// ScheduleToCloseTimeout: 10 * time.Second,
// HeartbeatTimeout: 0,
// }
// ctx1 := WithActivityOptions(ctx, ao)
//
// or to override a single option
//
// ctx1 := WithTaskList(ctx, "exampleTaskList")
// - f - Either a activity name or a function that is getting scheduled.
// - args - The arguments that need to be passed to the function represented by 'f'.
// - If the activity failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending activity using context(WithCancel(ctx)) and that will fail the activity with
// error CanceledError.
// - returns Future with activity result or failure
func ExecuteActivity(ctx Context, f interface{}, args ...interface{}) Future {
// Validate type and its arguments.
future, settable := newDecodeFuture(ctx, f)
activityType, input, err := getValidatedActivityFunction(f, args)
if err != nil {
settable.Set(nil, err)
return future
}
// Validate context options.
parameters := getActivityOptions(ctx)
parameters, err = getValidatedActivityOptions(ctx)
if err != nil {
settable.Set(nil, err)
return future
}
parameters.ActivityType = *activityType
parameters.Input = input
a := getWorkflowEnvironment(ctx).ExecuteActivity(*parameters, func(r []byte, e error) {
settable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
getWorkflowEnvironment(ctx).RequestCancelActivity(a.activityID)
}
})
return future
}
// ExecuteChildWorkflow requests child workflow execution in the context of a workflow.
// - Context can be used to pass the settings for the child workflow.
// For example: task list that this child workflow should be routed, timeouts that need to be configured.
// Use ChildWorkflowOptions to pass down the options.
// cwo := ChildWorkflowOptions{
// ExecutionStartToCloseTimeout: 10 * time.Minute,
// TaskStartToCloseTimeout: time.Minute,
// }
// ctx1 := WithChildWorkflowOptions(ctx, cwo)
// - f - Either a workflow name or a workflow function that is getting scheduled.
// - args - The arguments that need to be passed to the child workflow function represented by 'f'.
// - If the child workflow failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending child workflow using context(WithCancel(ctx)) and that will fail the workflow with
// error CanceledError.
// - returns ChildWorkflowFuture
func ExecuteChildWorkflow(ctx Context, f interface{}, args ...interface{}) ChildWorkflowFuture {
mainFuture, mainSettable := newDecodeFuture(ctx, f)
executionFuture, executionSettable := NewFuture(ctx)
result := childWorkflowFutureImpl{
decodeFutureImpl: mainFuture.(*decodeFutureImpl),
executionFuture: executionFuture.(*futureImpl)}
wfType, input, err := getValidatedWorkerFunction(f, args)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options, err := getValidatedWorkflowOptions(ctx)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options.input = input
options.workflowType = wfType
var childWorkflowExecution *WorkflowExecution
getWorkflowEnvironment(ctx).ExecuteChildWorkflow(*options, func(r []byte, e error) {
mainSettable.Set(r, e)
}, func(r WorkflowExecution, e error) {
if e == nil {
childWorkflowExecution = &r
}
executionSettable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
if childWorkflowExecution != nil {
getWorkflowEnvironment(ctx).RequestCancelWorkflow(
*options.domain, childWorkflowExecution.ID, childWorkflowExecution.RunID)
}
}
})
return result
}
// WorkflowInfo information about currently executing workflow
type WorkflowInfo struct {
WorkflowExecution WorkflowExecution
WorkflowType WorkflowType
TaskListName string
ExecutionStartToCloseTimeoutSeconds int32
TaskStartToCloseTimeoutSeconds int32
Domain string
}
// GetWorkflowInfo extracts info of a current workflow from a context.
func GetWorkflowInfo(ctx Context) *WorkflowInfo {
return getWorkflowEnvironment(ctx).WorkflowInfo()
}
// GetLogger returns a logger to be used in workflow's context
func GetLogger(ctx Context) *zap.Logger {
return getWorkflowEnvironment(ctx).GetLogger()
}
// Now returns the current time when the decision is started or replayed.
// The workflow needs to use this Now() to get the wall clock time instead of the Go lang library one.
func Now(ctx Context) time.Time {
return getWorkflowEnvironment(ctx).Now()
}
// NewTimer returns immediately and the future becomes ready after the specified timeout.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this NewTimer() to get the timer instead of the Go lang library one(timer.NewTimer())
// - You can also cancel the pending timer using context(WithCancel(ctx)) and that will cancel the timer with
// error TimerCanceledError.
func NewTimer(ctx Context, d time.Duration) Future {
future, settable := NewFuture(ctx)
if d <= 0 {
settable.Set(true, nil)
return future
}
t := getWorkflowEnvironment(ctx).NewTimer(d, func(r []byte, e error) {
settable.Set(nil, e)
})
if t != nil {
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
// We will cancel the timer either it is explicit cancellation
// (or) we are closed.
ctx.Done().Receive(ctx, nil)
getWorkflowEnvironment(ctx).RequestCancelTimer(t.timerID)
})
}
return future
}
// Sleep pauses the current goroutine for at least the duration d.
// A negative or zero duration causes Sleep to return immediately.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this Sleep() to sleep instead of the Go lang library one(timer.Sleep())
// - You can also cancel the pending sleep using context(WithCancel(ctx)) and that will cancel the sleep with
// error TimerCanceledError.
func Sleep(ctx Context, d time.Duration) (err error) {
t := NewTimer(ctx, d)
err = t.Get(ctx, nil)
return
}
// RequestCancelWorkflow can be used to request cancellation of an external workflow.
// - workflowID - name of the workflow ID.
// - runID - Optional - indicates the instance of a workflow.
// You can specify the domain of the workflow using the context like
// ctx := WithWorkflowDomain(ctx, "domain-name")
func RequestCancelWorkflow(ctx Context, workflowID, runID string) error {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
options := getWorkflowEnvOptions(ctx1)
if options.domain == nil {
return errors.New("need a valid domain")
}
return getWorkflowEnvironment(ctx).RequestCancelWorkflow(*options.domain, workflowID, runID)
}
// WithChildWorkflowOptions adds all workflow options to the context.
func WithChildWorkflowOptions(ctx Context, cwo ChildWorkflowOptions) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
wfOptions := getWorkflowEnvOptions(ctx1)
wfOptions.domain = common.StringPtr(cwo.Domain)
wfOptions.taskListName = common.StringPtr(cwo.TaskList)
wfOptions.workflowID = cwo.WorkflowID
wfOptions.executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.ExecutionStartToCloseTimeout.Seconds()))
wfOptions.taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.TaskStartToCloseTimeout.Seconds()))
wfOptions.childPolicy = cwo.ChildPolicy
wfOptions.waitForCancellation = cwo.WaitForCancellation
return ctx1
}
// WithWorkflowDomain adds a domain to the context.
func WithWorkflowDomain(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).domain = common.StringPtr(name)
return ctx1
}
// WithWorkflowTaskList adds a task list to the context.
func WithWorkflowTaskList(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskListName = common.StringPtr(name)
return ctx1
}
// WithWorkflowID adds a workflowID to the context.
func WithWorkflowID(ctx Context, workflowID string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).workflowID = workflowID
return ctx1
}
// WithChildPolicy adds a ChildWorkflowPolicy to the context.
func WithChildPolicy(ctx Context, childPolicy ChildWorkflowPolicy) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).childPolicy = childPolicy
return ctx1
}
// WithExecutionStartToCloseTimeout adds a workflow execution timeout to the context.
func WithExecutionStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// WithWorkflowTaskStartToCloseTimeout adds a decision timeout to the context.
func WithWorkflowTaskStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// GetSignalChannel returns channel corresponding to the signal name.
func GetSignalChannel(ctx Context, signalName string) Channel {
return getWorkflowEnvOptions(ctx).getSignalChannel(ctx, signalName)
}
// Get extract data from encoded data to desired value type. valuePtr is pointer to the actual value type.
func (b EncodedValue) Get(valuePtr interface{}) error {
return getHostEnvironment().decodeArg(b, valuePtr)
}
// SideEffect executes provided function once, records its result into the workflow history and doesn't
// reexecute it on replay returning recorded result instead. It can be seen as an "inline" activity.
// Use it only for short nondeterministic code snippets like getting random value or generating UUID.
// The only way to fail SideEffect is to panic which causes decision task failure. The decision task after timeout is
// rescheduled and reexecuted giving SideEffect another chance to succeed.
// Be careful to not return any data from SideEffect function any other way than through its recorded return value.
// For example this code is BROKEN:
//
// var executed bool
// cadence.SideEffect(func(ctx cadence.Context) interface{} {
// executed = true
// return nil
// })
// if executed {
// ....
// } else {
// ....
// }
// On replay the function is not executed, the executed flag is not set to true
// and the workflow takes a different path breaking the determinism.
//
// Here is the correct way to use SideEffect:
//
// encodedRandom := SideEffect(func(ctx cadence.Context) interface{} {
// return rand.Intn(100)
// })
// var random int
// encodedRandom.Get(&random)
// if random < 50 {
// ....
// } else {
// ....
// }
func SideEffect(ctx Context, f func(ctx Context) interface{}) EncodedValue {
future, settable := NewFuture(ctx)
wrapperFunc := func() ([]byte, error) {
r := f(ctx)
return getHostEnvironment().encodeArg(r)
}
resultCallback := func(result []byte, err error) {
settable.Set(EncodedValue(result), err)
}
getWorkflowEnvironment(ctx).SideEffect(wrapperFunc, resultCallback)
var encoded EncodedValue
if err := future.Get(ctx, &encoded); err != nil {
panic(err)
}
return encoded
} | // Name appears in stack traces that are blocked on this Channel.
func GoNamed(ctx Context, name string, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newNamedCoroutine(ctx, name, f) | random_line_split |
workflow.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cadence
import (
"errors"
"fmt"
"time"
"go.uber.org/cadence/common"
"go.uber.org/zap"
)
var (
errActivityParamsBadRequest = errors.New("missing activity parameters through context, check ActivityOptions")
errWorkflowOptionBadRequest = errors.New("missing workflow options through context, check WorkflowOptions")
)
type (
// Channel must be used instead of native go channel by workflow code.
// Use Context.NewChannel method to create an instance.
Channel interface {
// Blocks until it gets a value. when it gets a value assigns to the provided pointer.
// Example:
// var v string
// c.Receive(ctx, &v)
Receive(ctx Context, valuePtr interface{}) (more bool) // more is false when channel is closed
ReceiveAsync(valuePtr interface{}) (ok bool) // ok is true when value was returned
ReceiveAsyncWithMoreFlag(valuePtr interface{}) (ok bool, more bool) // ok is true when value was returned, more is false when channel is closed
Send(ctx Context, v interface{})
SendAsync(v interface{}) (ok bool) // ok when value was sent
Close() // prohibit sends
}
// Selector must be used instead of native go select by workflow code
// Use Context.NewSelector method to create an instance.
Selector interface {
AddReceive(c Channel, f func(c Channel, more bool)) Selector
AddSend(c Channel, v interface{}, f func()) Selector
AddFuture(future Future, f func(f Future)) Selector
AddDefault(f func())
Select(ctx Context)
}
// Future represents the result of an asynchronous computation.
Future interface {
// Get blocks until the future is ready. When ready it either returns non nil error
// or assigns result value to the provided pointer.
// Example:
// var v string
// if err := f.Get(ctx, &v); err != nil {
// return err
// }
// fmt.Printf("Value=%v", v)
Get(ctx Context, valuePtr interface{}) error
// When true Get is guaranteed to not block
IsReady() bool
}
// Settable is used to set value or error on a future.
// See NewFuture function.
Settable interface {
Set(value interface{}, err error)
SetValue(value interface{})
SetError(err error)
Chain(future Future) // Value (or error) of the future become the same of the chained one.
}
// ChildWorkflowFuture represents the result of a child workflow execution
ChildWorkflowFuture interface {
Future
// GetChildWorkflowExecution returns a future that will be ready when child workflow execution started. You can
// get the WorkflowExecution of the child workflow from the future. Then you can use Workflow ID and RunID of
// child workflow to cancel or send signal to child workflow.
GetChildWorkflowExecution() Future
}
// WorkflowType identifies a workflow type.
WorkflowType struct {
Name string
}
// WorkflowExecution Details.
WorkflowExecution struct {
ID string
RunID string
}
// EncodedValue is type alias used to encapsulate/extract encoded result from workflow/activity.
EncodedValue []byte
// ChildWorkflowOptions stores all child workflow specific parameters that will be stored inside of a Context.
ChildWorkflowOptions struct {
// Domain of the child workflow.
// Optional: the current workflow (parent)'s domain will be used if this is not provided.
Domain string
// WorkflowID of the child workflow to be scheduled.
// Optional: an auto generated workflowID will be used if this is not provided.
WorkflowID string
// TaskList that the child workflow needs to be scheduled on.
// Optional: the parent workflow task list will be used if this is not provided.
TaskList string
// ExecutionStartToCloseTimeout - The end to end timeout for the child workflow execution.
// Mandatory: no default
ExecutionStartToCloseTimeout time.Duration
// TaskStartToCloseTimeout - The decision task timeout for the child workflow.
// Optional: default is 10s if this is not provided (or if 0 is provided).
TaskStartToCloseTimeout time.Duration
// ChildPolicy defines the behavior of child workflow when parent workflow is terminated.
// Optional: default to use ChildWorkflowPolicyTerminate if this is not provided
ChildPolicy ChildWorkflowPolicy
// WaitForCancellation - Whether to wait for cancelled child workflow to be ended (child workflow can be ended
// as: completed/failed/timedout/terminated/canceled)
// Optional: default false
WaitForCancellation bool
}
// ChildWorkflowPolicy defines child workflow behavior when parent workflow is terminated.
ChildWorkflowPolicy int32
)
const (
// ChildWorkflowPolicyTerminate is policy that will terminate all child workflows when parent workflow is terminated.
ChildWorkflowPolicyTerminate ChildWorkflowPolicy = 0
// ChildWorkflowPolicyRequestCancel is policy that will send cancel request to all open child workflows when parent
// workflow is terminated.
ChildWorkflowPolicyRequestCancel ChildWorkflowPolicy = 1
// ChildWorkflowPolicyAbandon is policy that will have no impact to child workflow execution when parent workflow is
// terminated.
ChildWorkflowPolicyAbandon ChildWorkflowPolicy = 2
)
// RegisterWorkflow - registers a workflow function with the framework.
// A workflow takes a cadence context and input and returns a (result, error) or just error.
// Examples:
// func sampleWorkflow(ctx cadence.Context, input []byte) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int, arg2 string) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int) (result string, err error)
// Serialization of all primitive types, structures is supported ... except channels, functions, variadic, unsafe pointer.
// This method calls panic if workflowFunc doesn't comply with the expected format.
func RegisterWorkflow(workflowFunc interface{}) {
thImpl := getHostEnvironment()
err := thImpl.RegisterWorkflow(workflowFunc)
if err != nil {
panic(err)
}
}
// NewChannel create new Channel instance
func NewChannel(ctx Context) Channel {
state := getState(ctx)
state.dispatcher.channelSequence++
return NewNamedChannel(ctx, fmt.Sprintf("chan-%v", state.dispatcher.channelSequence))
}
// NewNamedChannel create new Channel instance with a given human readable name.
// Name appears in stack traces that are blocked on this channel.
func | (ctx Context, name string) Channel {
return &channelImpl{name: name}
}
// NewBufferedChannel create new buffered Channel instance
func NewBufferedChannel(ctx Context, size int) Channel {
return &channelImpl{size: size}
}
// NewNamedBufferedChannel create new BufferedChannel instance with a given human readable name.
// Name appears in stack traces that are blocked on this Channel.
func NewNamedBufferedChannel(ctx Context, name string, size int) Channel {
return &channelImpl{name: name, size: size}
}
// NewSelector creates a new Selector instance.
func NewSelector(ctx Context) Selector {
state := getState(ctx)
state.dispatcher.selectorSequence++
return NewNamedSelector(ctx, fmt.Sprintf("selector-%v", state.dispatcher.selectorSequence))
}
// NewNamedSelector creates a new Selector instance with a given human readable name.
// Name appears in stack traces that are blocked on this Selector.
func NewNamedSelector(ctx Context, name string) Selector {
return &selectorImpl{name: name}
}
// Go creates a new coroutine. It has similar semantic to goroutine in a context of the workflow.
func Go(ctx Context, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newCoroutine(ctx, f)
}
// GoNamed creates a new coroutine with a given human readable name.
// It has similar semantic to goroutine in a context of the workflow.
// Name appears in stack traces that are blocked on this Channel.
func GoNamed(ctx Context, name string, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newNamedCoroutine(ctx, name, f)
}
// NewFuture creates a new future as well as associated Settable that is used to set its value.
func NewFuture(ctx Context) (Future, Settable) {
impl := &futureImpl{channel: NewChannel(ctx).(*channelImpl)}
return impl, impl
}
// ExecuteActivity requests activity execution in the context of a workflow.
// - Context can be used to pass the settings for this activity.
// For example: task list that this need to be routed, timeouts that need to be configured.
// Use ActivityOptions to pass down the options.
// ao := ActivityOptions{
// TaskList: "exampleTaskList",
// ScheduleToStartTimeout: 10 * time.Second,
// StartToCloseTimeout: 5 * time.Second,
// ScheduleToCloseTimeout: 10 * time.Second,
// HeartbeatTimeout: 0,
// }
// ctx1 := WithActivityOptions(ctx, ao)
//
// or to override a single option
//
// ctx1 := WithTaskList(ctx, "exampleTaskList")
// - f - Either a activity name or a function that is getting scheduled.
// - args - The arguments that need to be passed to the function represented by 'f'.
// - If the activity failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending activity using context(WithCancel(ctx)) and that will fail the activity with
// error CanceledError.
// - returns Future with activity result or failure
func ExecuteActivity(ctx Context, f interface{}, args ...interface{}) Future {
// Validate type and its arguments.
future, settable := newDecodeFuture(ctx, f)
activityType, input, err := getValidatedActivityFunction(f, args)
if err != nil {
settable.Set(nil, err)
return future
}
// Validate context options.
parameters := getActivityOptions(ctx)
parameters, err = getValidatedActivityOptions(ctx)
if err != nil {
settable.Set(nil, err)
return future
}
parameters.ActivityType = *activityType
parameters.Input = input
a := getWorkflowEnvironment(ctx).ExecuteActivity(*parameters, func(r []byte, e error) {
settable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
getWorkflowEnvironment(ctx).RequestCancelActivity(a.activityID)
}
})
return future
}
// ExecuteChildWorkflow requests child workflow execution in the context of a workflow.
// - Context can be used to pass the settings for the child workflow.
// For example: task list that this child workflow should be routed, timeouts that need to be configured.
// Use ChildWorkflowOptions to pass down the options.
// cwo := ChildWorkflowOptions{
// ExecutionStartToCloseTimeout: 10 * time.Minute,
// TaskStartToCloseTimeout: time.Minute,
// }
// ctx1 := WithChildWorkflowOptions(ctx, cwo)
// - f - Either a workflow name or a workflow function that is getting scheduled.
// - args - The arguments that need to be passed to the child workflow function represented by 'f'.
// - If the child workflow failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending child workflow using context(WithCancel(ctx)) and that will fail the workflow with
// error CanceledError.
// - returns ChildWorkflowFuture
func ExecuteChildWorkflow(ctx Context, f interface{}, args ...interface{}) ChildWorkflowFuture {
mainFuture, mainSettable := newDecodeFuture(ctx, f)
executionFuture, executionSettable := NewFuture(ctx)
result := childWorkflowFutureImpl{
decodeFutureImpl: mainFuture.(*decodeFutureImpl),
executionFuture: executionFuture.(*futureImpl)}
wfType, input, err := getValidatedWorkerFunction(f, args)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options, err := getValidatedWorkflowOptions(ctx)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options.input = input
options.workflowType = wfType
var childWorkflowExecution *WorkflowExecution
getWorkflowEnvironment(ctx).ExecuteChildWorkflow(*options, func(r []byte, e error) {
mainSettable.Set(r, e)
}, func(r WorkflowExecution, e error) {
if e == nil {
childWorkflowExecution = &r
}
executionSettable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
if childWorkflowExecution != nil {
getWorkflowEnvironment(ctx).RequestCancelWorkflow(
*options.domain, childWorkflowExecution.ID, childWorkflowExecution.RunID)
}
}
})
return result
}
// WorkflowInfo information about currently executing workflow
type WorkflowInfo struct {
WorkflowExecution WorkflowExecution
WorkflowType WorkflowType
TaskListName string
ExecutionStartToCloseTimeoutSeconds int32
TaskStartToCloseTimeoutSeconds int32
Domain string
}
// GetWorkflowInfo extracts info of a current workflow from a context.
func GetWorkflowInfo(ctx Context) *WorkflowInfo {
return getWorkflowEnvironment(ctx).WorkflowInfo()
}
// GetLogger returns a logger to be used in workflow's context
func GetLogger(ctx Context) *zap.Logger {
return getWorkflowEnvironment(ctx).GetLogger()
}
// Now returns the current time when the decision is started or replayed.
// The workflow needs to use this Now() to get the wall clock time instead of the Go lang library one.
func Now(ctx Context) time.Time {
return getWorkflowEnvironment(ctx).Now()
}
// NewTimer returns immediately and the future becomes ready after the specified timeout.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this NewTimer() to get the timer instead of the Go lang library one(timer.NewTimer())
// - You can also cancel the pending timer using context(WithCancel(ctx)) and that will cancel the timer with
// error TimerCanceledError.
func NewTimer(ctx Context, d time.Duration) Future {
future, settable := NewFuture(ctx)
if d <= 0 {
settable.Set(true, nil)
return future
}
t := getWorkflowEnvironment(ctx).NewTimer(d, func(r []byte, e error) {
settable.Set(nil, e)
})
if t != nil {
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
// We will cancel the timer either it is explicit cancellation
// (or) we are closed.
ctx.Done().Receive(ctx, nil)
getWorkflowEnvironment(ctx).RequestCancelTimer(t.timerID)
})
}
return future
}
// Sleep pauses the current goroutine for at least the duration d.
// A negative or zero duration causes Sleep to return immediately.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this Sleep() to sleep instead of the Go lang library one(timer.Sleep())
// - You can also cancel the pending sleep using context(WithCancel(ctx)) and that will cancel the sleep with
// error TimerCanceledError.
func Sleep(ctx Context, d time.Duration) (err error) {
t := NewTimer(ctx, d)
err = t.Get(ctx, nil)
return
}
// RequestCancelWorkflow can be used to request cancellation of an external workflow.
// - workflowID - name of the workflow ID.
// - runID - Optional - indicates the instance of a workflow.
// You can specify the domain of the workflow using the context like
// ctx := WithWorkflowDomain(ctx, "domain-name")
func RequestCancelWorkflow(ctx Context, workflowID, runID string) error {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
options := getWorkflowEnvOptions(ctx1)
if options.domain == nil {
return errors.New("need a valid domain")
}
return getWorkflowEnvironment(ctx).RequestCancelWorkflow(*options.domain, workflowID, runID)
}
// WithChildWorkflowOptions adds all workflow options to the context.
func WithChildWorkflowOptions(ctx Context, cwo ChildWorkflowOptions) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
wfOptions := getWorkflowEnvOptions(ctx1)
wfOptions.domain = common.StringPtr(cwo.Domain)
wfOptions.taskListName = common.StringPtr(cwo.TaskList)
wfOptions.workflowID = cwo.WorkflowID
wfOptions.executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.ExecutionStartToCloseTimeout.Seconds()))
wfOptions.taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.TaskStartToCloseTimeout.Seconds()))
wfOptions.childPolicy = cwo.ChildPolicy
wfOptions.waitForCancellation = cwo.WaitForCancellation
return ctx1
}
// WithWorkflowDomain adds a domain to the context.
func WithWorkflowDomain(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).domain = common.StringPtr(name)
return ctx1
}
// WithWorkflowTaskList adds a task list to the context.
func WithWorkflowTaskList(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskListName = common.StringPtr(name)
return ctx1
}
// WithWorkflowID adds a workflowID to the context.
func WithWorkflowID(ctx Context, workflowID string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).workflowID = workflowID
return ctx1
}
// WithChildPolicy adds a ChildWorkflowPolicy to the context.
func WithChildPolicy(ctx Context, childPolicy ChildWorkflowPolicy) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).childPolicy = childPolicy
return ctx1
}
// WithExecutionStartToCloseTimeout adds a workflow execution timeout to the context.
func WithExecutionStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// WithWorkflowTaskStartToCloseTimeout adds a decision timeout to the context.
func WithWorkflowTaskStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// GetSignalChannel returns channel corresponding to the signal name.
func GetSignalChannel(ctx Context, signalName string) Channel {
return getWorkflowEnvOptions(ctx).getSignalChannel(ctx, signalName)
}
// Get extract data from encoded data to desired value type. valuePtr is pointer to the actual value type.
func (b EncodedValue) Get(valuePtr interface{}) error {
return getHostEnvironment().decodeArg(b, valuePtr)
}
// SideEffect executes provided function once, records its result into the workflow history and doesn't
// reexecute it on replay returning recorded result instead. It can be seen as an "inline" activity.
// Use it only for short nondeterministic code snippets like getting random value or generating UUID.
// The only way to fail SideEffect is to panic which causes decision task failure. The decision task after timeout is
// rescheduled and reexecuted giving SideEffect another chance to succeed.
// Be careful to not return any data from SideEffect function any other way than through its recorded return value.
// For example this code is BROKEN:
//
// var executed bool
// cadence.SideEffect(func(ctx cadence.Context) interface{} {
// executed = true
// return nil
// })
// if executed {
// ....
// } else {
// ....
// }
// On replay the function is not executed, the executed flag is not set to true
// and the workflow takes a different path breaking the determinism.
//
// Here is the correct way to use SideEffect:
//
// encodedRandom := SideEffect(func(ctx cadence.Context) interface{} {
// return rand.Intn(100)
// })
// var random int
// encodedRandom.Get(&random)
// if random < 50 {
// ....
// } else {
// ....
// }
func SideEffect(ctx Context, f func(ctx Context) interface{}) EncodedValue {
future, settable := NewFuture(ctx)
wrapperFunc := func() ([]byte, error) {
r := f(ctx)
return getHostEnvironment().encodeArg(r)
}
resultCallback := func(result []byte, err error) {
settable.Set(EncodedValue(result), err)
}
getWorkflowEnvironment(ctx).SideEffect(wrapperFunc, resultCallback)
var encoded EncodedValue
if err := future.Get(ctx, &encoded); err != nil {
panic(err)
}
return encoded
}
| NewNamedChannel | identifier_name |
workflow.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cadence
import (
"errors"
"fmt"
"time"
"go.uber.org/cadence/common"
"go.uber.org/zap"
)
var (
errActivityParamsBadRequest = errors.New("missing activity parameters through context, check ActivityOptions")
errWorkflowOptionBadRequest = errors.New("missing workflow options through context, check WorkflowOptions")
)
type (
// Channel must be used instead of native go channel by workflow code.
// Use Context.NewChannel method to create an instance.
Channel interface {
// Blocks until it gets a value. when it gets a value assigns to the provided pointer.
// Example:
// var v string
// c.Receive(ctx, &v)
Receive(ctx Context, valuePtr interface{}) (more bool) // more is false when channel is closed
ReceiveAsync(valuePtr interface{}) (ok bool) // ok is true when value was returned
ReceiveAsyncWithMoreFlag(valuePtr interface{}) (ok bool, more bool) // ok is true when value was returned, more is false when channel is closed
Send(ctx Context, v interface{})
SendAsync(v interface{}) (ok bool) // ok when value was sent
Close() // prohibit sends
}
// Selector must be used instead of native go select by workflow code
// Use Context.NewSelector method to create an instance.
Selector interface {
AddReceive(c Channel, f func(c Channel, more bool)) Selector
AddSend(c Channel, v interface{}, f func()) Selector
AddFuture(future Future, f func(f Future)) Selector
AddDefault(f func())
Select(ctx Context)
}
// Future represents the result of an asynchronous computation.
Future interface {
// Get blocks until the future is ready. When ready it either returns non nil error
// or assigns result value to the provided pointer.
// Example:
// var v string
// if err := f.Get(ctx, &v); err != nil {
// return err
// }
// fmt.Printf("Value=%v", v)
Get(ctx Context, valuePtr interface{}) error
// When true Get is guaranteed to not block
IsReady() bool
}
// Settable is used to set value or error on a future.
// See NewFuture function.
Settable interface {
Set(value interface{}, err error)
SetValue(value interface{})
SetError(err error)
Chain(future Future) // Value (or error) of the future become the same of the chained one.
}
// ChildWorkflowFuture represents the result of a child workflow execution
ChildWorkflowFuture interface {
Future
// GetChildWorkflowExecution returns a future that will be ready when child workflow execution started. You can
// get the WorkflowExecution of the child workflow from the future. Then you can use Workflow ID and RunID of
// child workflow to cancel or send signal to child workflow.
GetChildWorkflowExecution() Future
}
// WorkflowType identifies a workflow type.
WorkflowType struct {
Name string
}
// WorkflowExecution Details.
WorkflowExecution struct {
ID string
RunID string
}
// EncodedValue is type alias used to encapsulate/extract encoded result from workflow/activity.
EncodedValue []byte
// ChildWorkflowOptions stores all child workflow specific parameters that will be stored inside of a Context.
ChildWorkflowOptions struct {
// Domain of the child workflow.
// Optional: the current workflow (parent)'s domain will be used if this is not provided.
Domain string
// WorkflowID of the child workflow to be scheduled.
// Optional: an auto generated workflowID will be used if this is not provided.
WorkflowID string
// TaskList that the child workflow needs to be scheduled on.
// Optional: the parent workflow task list will be used if this is not provided.
TaskList string
// ExecutionStartToCloseTimeout - The end to end timeout for the child workflow execution.
// Mandatory: no default
ExecutionStartToCloseTimeout time.Duration
// TaskStartToCloseTimeout - The decision task timeout for the child workflow.
// Optional: default is 10s if this is not provided (or if 0 is provided).
TaskStartToCloseTimeout time.Duration
// ChildPolicy defines the behavior of child workflow when parent workflow is terminated.
// Optional: default to use ChildWorkflowPolicyTerminate if this is not provided
ChildPolicy ChildWorkflowPolicy
// WaitForCancellation - Whether to wait for cancelled child workflow to be ended (child workflow can be ended
// as: completed/failed/timedout/terminated/canceled)
// Optional: default false
WaitForCancellation bool
}
// ChildWorkflowPolicy defines child workflow behavior when parent workflow is terminated.
ChildWorkflowPolicy int32
)
const (
// ChildWorkflowPolicyTerminate is policy that will terminate all child workflows when parent workflow is terminated.
ChildWorkflowPolicyTerminate ChildWorkflowPolicy = 0
// ChildWorkflowPolicyRequestCancel is policy that will send cancel request to all open child workflows when parent
// workflow is terminated.
ChildWorkflowPolicyRequestCancel ChildWorkflowPolicy = 1
// ChildWorkflowPolicyAbandon is policy that will have no impact to child workflow execution when parent workflow is
// terminated.
ChildWorkflowPolicyAbandon ChildWorkflowPolicy = 2
)
// RegisterWorkflow - registers a workflow function with the framework.
// A workflow takes a cadence context and input and returns a (result, error) or just error.
// Examples:
// func sampleWorkflow(ctx cadence.Context, input []byte) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int, arg2 string) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int) (result string, err error)
// Serialization of all primitive types, structures is supported ... except channels, functions, variadic, unsafe pointer.
// This method calls panic if workflowFunc doesn't comply with the expected format.
func RegisterWorkflow(workflowFunc interface{}) {
thImpl := getHostEnvironment()
err := thImpl.RegisterWorkflow(workflowFunc)
if err != nil {
panic(err)
}
}
// NewChannel create new Channel instance
func NewChannel(ctx Context) Channel {
state := getState(ctx)
state.dispatcher.channelSequence++
return NewNamedChannel(ctx, fmt.Sprintf("chan-%v", state.dispatcher.channelSequence))
}
// NewNamedChannel create new Channel instance with a given human readable name.
// Name appears in stack traces that are blocked on this channel.
func NewNamedChannel(ctx Context, name string) Channel {
return &channelImpl{name: name}
}
// NewBufferedChannel create new buffered Channel instance
func NewBufferedChannel(ctx Context, size int) Channel {
return &channelImpl{size: size}
}
// NewNamedBufferedChannel create new BufferedChannel instance with a given human readable name.
// Name appears in stack traces that are blocked on this Channel.
func NewNamedBufferedChannel(ctx Context, name string, size int) Channel {
return &channelImpl{name: name, size: size}
}
// NewSelector creates a new Selector instance.
func NewSelector(ctx Context) Selector {
state := getState(ctx)
state.dispatcher.selectorSequence++
return NewNamedSelector(ctx, fmt.Sprintf("selector-%v", state.dispatcher.selectorSequence))
}
// NewNamedSelector creates a new Selector instance with a given human readable name.
// Name appears in stack traces that are blocked on this Selector.
func NewNamedSelector(ctx Context, name string) Selector {
return &selectorImpl{name: name}
}
// Go creates a new coroutine. It has similar semantic to goroutine in a context of the workflow.
func Go(ctx Context, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newCoroutine(ctx, f)
}
// GoNamed creates a new coroutine with a given human readable name.
// It has similar semantic to goroutine in a context of the workflow.
// Name appears in stack traces that are blocked on this Channel.
func GoNamed(ctx Context, name string, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newNamedCoroutine(ctx, name, f)
}
// NewFuture creates a new future as well as associated Settable that is used to set its value.
func NewFuture(ctx Context) (Future, Settable) {
impl := &futureImpl{channel: NewChannel(ctx).(*channelImpl)}
return impl, impl
}
// ExecuteActivity requests activity execution in the context of a workflow.
// - Context can be used to pass the settings for this activity.
// For example: task list that this need to be routed, timeouts that need to be configured.
// Use ActivityOptions to pass down the options.
// ao := ActivityOptions{
// TaskList: "exampleTaskList",
// ScheduleToStartTimeout: 10 * time.Second,
// StartToCloseTimeout: 5 * time.Second,
// ScheduleToCloseTimeout: 10 * time.Second,
// HeartbeatTimeout: 0,
// }
// ctx1 := WithActivityOptions(ctx, ao)
//
// or to override a single option
//
// ctx1 := WithTaskList(ctx, "exampleTaskList")
// - f - Either a activity name or a function that is getting scheduled.
// - args - The arguments that need to be passed to the function represented by 'f'.
// - If the activity failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending activity using context(WithCancel(ctx)) and that will fail the activity with
// error CanceledError.
// - returns Future with activity result or failure
func ExecuteActivity(ctx Context, f interface{}, args ...interface{}) Future {
// Validate type and its arguments.
future, settable := newDecodeFuture(ctx, f)
activityType, input, err := getValidatedActivityFunction(f, args)
if err != nil {
settable.Set(nil, err)
return future
}
// Validate context options.
parameters := getActivityOptions(ctx)
parameters, err = getValidatedActivityOptions(ctx)
if err != nil {
settable.Set(nil, err)
return future
}
parameters.ActivityType = *activityType
parameters.Input = input
a := getWorkflowEnvironment(ctx).ExecuteActivity(*parameters, func(r []byte, e error) {
settable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
getWorkflowEnvironment(ctx).RequestCancelActivity(a.activityID)
}
})
return future
}
// ExecuteChildWorkflow requests child workflow execution in the context of a workflow.
// - Context can be used to pass the settings for the child workflow.
// For example: task list that this child workflow should be routed, timeouts that need to be configured.
// Use ChildWorkflowOptions to pass down the options.
// cwo := ChildWorkflowOptions{
// ExecutionStartToCloseTimeout: 10 * time.Minute,
// TaskStartToCloseTimeout: time.Minute,
// }
// ctx1 := WithChildWorkflowOptions(ctx, cwo)
// - f - Either a workflow name or a workflow function that is getting scheduled.
// - args - The arguments that need to be passed to the child workflow function represented by 'f'.
// - If the child workflow failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending child workflow using context(WithCancel(ctx)) and that will fail the workflow with
// error CanceledError.
// - returns ChildWorkflowFuture
func ExecuteChildWorkflow(ctx Context, f interface{}, args ...interface{}) ChildWorkflowFuture {
mainFuture, mainSettable := newDecodeFuture(ctx, f)
executionFuture, executionSettable := NewFuture(ctx)
result := childWorkflowFutureImpl{
decodeFutureImpl: mainFuture.(*decodeFutureImpl),
executionFuture: executionFuture.(*futureImpl)}
wfType, input, err := getValidatedWorkerFunction(f, args)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options, err := getValidatedWorkflowOptions(ctx)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options.input = input
options.workflowType = wfType
var childWorkflowExecution *WorkflowExecution
getWorkflowEnvironment(ctx).ExecuteChildWorkflow(*options, func(r []byte, e error) {
mainSettable.Set(r, e)
}, func(r WorkflowExecution, e error) {
if e == nil {
childWorkflowExecution = &r
}
executionSettable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled |
})
return result
}
// WorkflowInfo information about currently executing workflow
type WorkflowInfo struct {
WorkflowExecution WorkflowExecution
WorkflowType WorkflowType
TaskListName string
ExecutionStartToCloseTimeoutSeconds int32
TaskStartToCloseTimeoutSeconds int32
Domain string
}
// GetWorkflowInfo extracts info of a current workflow from a context.
func GetWorkflowInfo(ctx Context) *WorkflowInfo {
return getWorkflowEnvironment(ctx).WorkflowInfo()
}
// GetLogger returns a logger to be used in workflow's context
func GetLogger(ctx Context) *zap.Logger {
return getWorkflowEnvironment(ctx).GetLogger()
}
// Now returns the current time when the decision is started or replayed.
// The workflow needs to use this Now() to get the wall clock time instead of the Go lang library one.
func Now(ctx Context) time.Time {
return getWorkflowEnvironment(ctx).Now()
}
// NewTimer returns immediately and the future becomes ready after the specified timeout.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this NewTimer() to get the timer instead of the Go lang library one(timer.NewTimer())
// - You can also cancel the pending timer using context(WithCancel(ctx)) and that will cancel the timer with
// error TimerCanceledError.
func NewTimer(ctx Context, d time.Duration) Future {
future, settable := NewFuture(ctx)
if d <= 0 {
settable.Set(true, nil)
return future
}
t := getWorkflowEnvironment(ctx).NewTimer(d, func(r []byte, e error) {
settable.Set(nil, e)
})
if t != nil {
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
// We will cancel the timer either it is explicit cancellation
// (or) we are closed.
ctx.Done().Receive(ctx, nil)
getWorkflowEnvironment(ctx).RequestCancelTimer(t.timerID)
})
}
return future
}
// Sleep pauses the current goroutine for at least the duration d.
// A negative or zero duration causes Sleep to return immediately.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this Sleep() to sleep instead of the Go lang library one(timer.Sleep())
// - You can also cancel the pending sleep using context(WithCancel(ctx)) and that will cancel the sleep with
// error TimerCanceledError.
func Sleep(ctx Context, d time.Duration) (err error) {
t := NewTimer(ctx, d)
err = t.Get(ctx, nil)
return
}
// RequestCancelWorkflow can be used to request cancellation of an external workflow.
// - workflowID - name of the workflow ID.
// - runID - Optional - indicates the instance of a workflow.
// You can specify the domain of the workflow using the context like
// ctx := WithWorkflowDomain(ctx, "domain-name")
func RequestCancelWorkflow(ctx Context, workflowID, runID string) error {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
options := getWorkflowEnvOptions(ctx1)
if options.domain == nil {
return errors.New("need a valid domain")
}
return getWorkflowEnvironment(ctx).RequestCancelWorkflow(*options.domain, workflowID, runID)
}
// WithChildWorkflowOptions adds all workflow options to the context.
func WithChildWorkflowOptions(ctx Context, cwo ChildWorkflowOptions) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
wfOptions := getWorkflowEnvOptions(ctx1)
wfOptions.domain = common.StringPtr(cwo.Domain)
wfOptions.taskListName = common.StringPtr(cwo.TaskList)
wfOptions.workflowID = cwo.WorkflowID
wfOptions.executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.ExecutionStartToCloseTimeout.Seconds()))
wfOptions.taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.TaskStartToCloseTimeout.Seconds()))
wfOptions.childPolicy = cwo.ChildPolicy
wfOptions.waitForCancellation = cwo.WaitForCancellation
return ctx1
}
// WithWorkflowDomain adds a domain to the context.
func WithWorkflowDomain(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).domain = common.StringPtr(name)
return ctx1
}
// WithWorkflowTaskList adds a task list to the context.
func WithWorkflowTaskList(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskListName = common.StringPtr(name)
return ctx1
}
// WithWorkflowID adds a workflowID to the context.
func WithWorkflowID(ctx Context, workflowID string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).workflowID = workflowID
return ctx1
}
// WithChildPolicy adds a ChildWorkflowPolicy to the context.
func WithChildPolicy(ctx Context, childPolicy ChildWorkflowPolicy) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).childPolicy = childPolicy
return ctx1
}
// WithExecutionStartToCloseTimeout adds a workflow execution timeout to the context.
func WithExecutionStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// WithWorkflowTaskStartToCloseTimeout adds a decision timeout to the context.
func WithWorkflowTaskStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// GetSignalChannel returns channel corresponding to the signal name.
func GetSignalChannel(ctx Context, signalName string) Channel {
return getWorkflowEnvOptions(ctx).getSignalChannel(ctx, signalName)
}
// Get extract data from encoded data to desired value type. valuePtr is pointer to the actual value type.
func (b EncodedValue) Get(valuePtr interface{}) error {
return getHostEnvironment().decodeArg(b, valuePtr)
}
// SideEffect executes provided function once, records its result into the workflow history and doesn't
// reexecute it on replay returning recorded result instead. It can be seen as an "inline" activity.
// Use it only for short nondeterministic code snippets like getting random value or generating UUID.
// The only way to fail SideEffect is to panic which causes decision task failure. The decision task after timeout is
// rescheduled and reexecuted giving SideEffect another chance to succeed.
// Be careful to not return any data from SideEffect function any other way than through its recorded return value.
// For example this code is BROKEN:
//
// var executed bool
// cadence.SideEffect(func(ctx cadence.Context) interface{} {
// executed = true
// return nil
// })
// if executed {
// ....
// } else {
// ....
// }
// On replay the function is not executed, the executed flag is not set to true
// and the workflow takes a different path breaking the determinism.
//
// Here is the correct way to use SideEffect:
//
// encodedRandom := SideEffect(func(ctx cadence.Context) interface{} {
// return rand.Intn(100)
// })
// var random int
// encodedRandom.Get(&random)
// if random < 50 {
// ....
// } else {
// ....
// }
func SideEffect(ctx Context, f func(ctx Context) interface{}) EncodedValue {
future, settable := NewFuture(ctx)
wrapperFunc := func() ([]byte, error) {
r := f(ctx)
return getHostEnvironment().encodeArg(r)
}
resultCallback := func(result []byte, err error) {
settable.Set(EncodedValue(result), err)
}
getWorkflowEnvironment(ctx).SideEffect(wrapperFunc, resultCallback)
var encoded EncodedValue
if err := future.Get(ctx, &encoded); err != nil {
panic(err)
}
return encoded
}
| {
if childWorkflowExecution != nil {
getWorkflowEnvironment(ctx).RequestCancelWorkflow(
*options.domain, childWorkflowExecution.ID, childWorkflowExecution.RunID)
}
} | conditional_block |
workflow.go | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cadence
import (
"errors"
"fmt"
"time"
"go.uber.org/cadence/common"
"go.uber.org/zap"
)
var (
errActivityParamsBadRequest = errors.New("missing activity parameters through context, check ActivityOptions")
errWorkflowOptionBadRequest = errors.New("missing workflow options through context, check WorkflowOptions")
)
type (
// Channel must be used instead of native go channel by workflow code.
// Use Context.NewChannel method to create an instance.
Channel interface {
// Blocks until it gets a value. when it gets a value assigns to the provided pointer.
// Example:
// var v string
// c.Receive(ctx, &v)
Receive(ctx Context, valuePtr interface{}) (more bool) // more is false when channel is closed
ReceiveAsync(valuePtr interface{}) (ok bool) // ok is true when value was returned
ReceiveAsyncWithMoreFlag(valuePtr interface{}) (ok bool, more bool) // ok is true when value was returned, more is false when channel is closed
Send(ctx Context, v interface{})
SendAsync(v interface{}) (ok bool) // ok when value was sent
Close() // prohibit sends
}
// Selector must be used instead of native go select by workflow code
// Use Context.NewSelector method to create an instance.
Selector interface {
AddReceive(c Channel, f func(c Channel, more bool)) Selector
AddSend(c Channel, v interface{}, f func()) Selector
AddFuture(future Future, f func(f Future)) Selector
AddDefault(f func())
Select(ctx Context)
}
// Future represents the result of an asynchronous computation.
Future interface {
// Get blocks until the future is ready. When ready it either returns non nil error
// or assigns result value to the provided pointer.
// Example:
// var v string
// if err := f.Get(ctx, &v); err != nil {
// return err
// }
// fmt.Printf("Value=%v", v)
Get(ctx Context, valuePtr interface{}) error
// When true Get is guaranteed to not block
IsReady() bool
}
// Settable is used to set value or error on a future.
// See NewFuture function.
Settable interface {
Set(value interface{}, err error)
SetValue(value interface{})
SetError(err error)
Chain(future Future) // Value (or error) of the future become the same of the chained one.
}
// ChildWorkflowFuture represents the result of a child workflow execution
ChildWorkflowFuture interface {
Future
// GetChildWorkflowExecution returns a future that will be ready when child workflow execution started. You can
// get the WorkflowExecution of the child workflow from the future. Then you can use Workflow ID and RunID of
// child workflow to cancel or send signal to child workflow.
GetChildWorkflowExecution() Future
}
// WorkflowType identifies a workflow type.
WorkflowType struct {
Name string
}
// WorkflowExecution Details.
WorkflowExecution struct {
ID string
RunID string
}
// EncodedValue is type alias used to encapsulate/extract encoded result from workflow/activity.
EncodedValue []byte
// ChildWorkflowOptions stores all child workflow specific parameters that will be stored inside of a Context.
ChildWorkflowOptions struct {
// Domain of the child workflow.
// Optional: the current workflow (parent)'s domain will be used if this is not provided.
Domain string
// WorkflowID of the child workflow to be scheduled.
// Optional: an auto generated workflowID will be used if this is not provided.
WorkflowID string
// TaskList that the child workflow needs to be scheduled on.
// Optional: the parent workflow task list will be used if this is not provided.
TaskList string
// ExecutionStartToCloseTimeout - The end to end timeout for the child workflow execution.
// Mandatory: no default
ExecutionStartToCloseTimeout time.Duration
// TaskStartToCloseTimeout - The decision task timeout for the child workflow.
// Optional: default is 10s if this is not provided (or if 0 is provided).
TaskStartToCloseTimeout time.Duration
// ChildPolicy defines the behavior of child workflow when parent workflow is terminated.
// Optional: default to use ChildWorkflowPolicyTerminate if this is not provided
ChildPolicy ChildWorkflowPolicy
// WaitForCancellation - Whether to wait for cancelled child workflow to be ended (child workflow can be ended
// as: completed/failed/timedout/terminated/canceled)
// Optional: default false
WaitForCancellation bool
}
// ChildWorkflowPolicy defines child workflow behavior when parent workflow is terminated.
ChildWorkflowPolicy int32
)
const (
// ChildWorkflowPolicyTerminate is policy that will terminate all child workflows when parent workflow is terminated.
ChildWorkflowPolicyTerminate ChildWorkflowPolicy = 0
// ChildWorkflowPolicyRequestCancel is policy that will send cancel request to all open child workflows when parent
// workflow is terminated.
ChildWorkflowPolicyRequestCancel ChildWorkflowPolicy = 1
// ChildWorkflowPolicyAbandon is policy that will have no impact to child workflow execution when parent workflow is
// terminated.
ChildWorkflowPolicyAbandon ChildWorkflowPolicy = 2
)
// RegisterWorkflow - registers a workflow function with the framework.
// A workflow takes a cadence context and input and returns a (result, error) or just error.
// Examples:
// func sampleWorkflow(ctx cadence.Context, input []byte) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int, arg2 string) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context) (result []byte, err error)
// func sampleWorkflow(ctx cadence.Context, arg1 int) (result string, err error)
// Serialization of all primitive types, structures is supported ... except channels, functions, variadic, unsafe pointer.
// This method calls panic if workflowFunc doesn't comply with the expected format.
func RegisterWorkflow(workflowFunc interface{}) |
// NewChannel create new Channel instance
func NewChannel(ctx Context) Channel {
state := getState(ctx)
state.dispatcher.channelSequence++
return NewNamedChannel(ctx, fmt.Sprintf("chan-%v", state.dispatcher.channelSequence))
}
// NewNamedChannel create new Channel instance with a given human readable name.
// Name appears in stack traces that are blocked on this channel.
func NewNamedChannel(ctx Context, name string) Channel {
return &channelImpl{name: name}
}
// NewBufferedChannel create new buffered Channel instance
func NewBufferedChannel(ctx Context, size int) Channel {
return &channelImpl{size: size}
}
// NewNamedBufferedChannel create new BufferedChannel instance with a given human readable name.
// Name appears in stack traces that are blocked on this Channel.
func NewNamedBufferedChannel(ctx Context, name string, size int) Channel {
return &channelImpl{name: name, size: size}
}
// NewSelector creates a new Selector instance.
func NewSelector(ctx Context) Selector {
state := getState(ctx)
state.dispatcher.selectorSequence++
return NewNamedSelector(ctx, fmt.Sprintf("selector-%v", state.dispatcher.selectorSequence))
}
// NewNamedSelector creates a new Selector instance with a given human readable name.
// Name appears in stack traces that are blocked on this Selector.
func NewNamedSelector(ctx Context, name string) Selector {
return &selectorImpl{name: name}
}
// Go creates a new coroutine. It has similar semantic to goroutine in a context of the workflow.
func Go(ctx Context, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newCoroutine(ctx, f)
}
// GoNamed creates a new coroutine with a given human readable name.
// It has similar semantic to goroutine in a context of the workflow.
// Name appears in stack traces that are blocked on this Channel.
func GoNamed(ctx Context, name string, f func(ctx Context)) {
state := getState(ctx)
state.dispatcher.newNamedCoroutine(ctx, name, f)
}
// NewFuture creates a new future as well as associated Settable that is used to set its value.
func NewFuture(ctx Context) (Future, Settable) {
impl := &futureImpl{channel: NewChannel(ctx).(*channelImpl)}
return impl, impl
}
// ExecuteActivity requests activity execution in the context of a workflow.
// - Context can be used to pass the settings for this activity.
// For example: task list that this need to be routed, timeouts that need to be configured.
// Use ActivityOptions to pass down the options.
// ao := ActivityOptions{
// TaskList: "exampleTaskList",
// ScheduleToStartTimeout: 10 * time.Second,
// StartToCloseTimeout: 5 * time.Second,
// ScheduleToCloseTimeout: 10 * time.Second,
// HeartbeatTimeout: 0,
// }
// ctx1 := WithActivityOptions(ctx, ao)
//
// or to override a single option
//
// ctx1 := WithTaskList(ctx, "exampleTaskList")
// - f - Either a activity name or a function that is getting scheduled.
// - args - The arguments that need to be passed to the function represented by 'f'.
// - If the activity failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending activity using context(WithCancel(ctx)) and that will fail the activity with
// error CanceledError.
// - returns Future with activity result or failure
func ExecuteActivity(ctx Context, f interface{}, args ...interface{}) Future {
// Validate type and its arguments.
future, settable := newDecodeFuture(ctx, f)
activityType, input, err := getValidatedActivityFunction(f, args)
if err != nil {
settable.Set(nil, err)
return future
}
// Validate context options.
parameters := getActivityOptions(ctx)
parameters, err = getValidatedActivityOptions(ctx)
if err != nil {
settable.Set(nil, err)
return future
}
parameters.ActivityType = *activityType
parameters.Input = input
a := getWorkflowEnvironment(ctx).ExecuteActivity(*parameters, func(r []byte, e error) {
settable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
getWorkflowEnvironment(ctx).RequestCancelActivity(a.activityID)
}
})
return future
}
// ExecuteChildWorkflow requests child workflow execution in the context of a workflow.
// - Context can be used to pass the settings for the child workflow.
// For example: task list that this child workflow should be routed, timeouts that need to be configured.
// Use ChildWorkflowOptions to pass down the options.
// cwo := ChildWorkflowOptions{
// ExecutionStartToCloseTimeout: 10 * time.Minute,
// TaskStartToCloseTimeout: time.Minute,
// }
// ctx1 := WithChildWorkflowOptions(ctx, cwo)
// - f - Either a workflow name or a workflow function that is getting scheduled.
// - args - The arguments that need to be passed to the child workflow function represented by 'f'.
// - If the child workflow failed to complete then the future get error would indicate the failure
// and it can be one of ErrorWithDetails, TimeoutError, CanceledError.
// - You can also cancel the pending child workflow using context(WithCancel(ctx)) and that will fail the workflow with
// error CanceledError.
// - returns ChildWorkflowFuture
func ExecuteChildWorkflow(ctx Context, f interface{}, args ...interface{}) ChildWorkflowFuture {
mainFuture, mainSettable := newDecodeFuture(ctx, f)
executionFuture, executionSettable := NewFuture(ctx)
result := childWorkflowFutureImpl{
decodeFutureImpl: mainFuture.(*decodeFutureImpl),
executionFuture: executionFuture.(*futureImpl)}
wfType, input, err := getValidatedWorkerFunction(f, args)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options, err := getValidatedWorkflowOptions(ctx)
if err != nil {
mainSettable.Set(nil, err)
return result
}
options.input = input
options.workflowType = wfType
var childWorkflowExecution *WorkflowExecution
getWorkflowEnvironment(ctx).ExecuteChildWorkflow(*options, func(r []byte, e error) {
mainSettable.Set(r, e)
}, func(r WorkflowExecution, e error) {
if e == nil {
childWorkflowExecution = &r
}
executionSettable.Set(r, e)
})
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
if ctx.Done().Receive(ctx, nil); ctx.Err() == ErrCanceled {
if childWorkflowExecution != nil {
getWorkflowEnvironment(ctx).RequestCancelWorkflow(
*options.domain, childWorkflowExecution.ID, childWorkflowExecution.RunID)
}
}
})
return result
}
// WorkflowInfo information about currently executing workflow
type WorkflowInfo struct {
WorkflowExecution WorkflowExecution
WorkflowType WorkflowType
TaskListName string
ExecutionStartToCloseTimeoutSeconds int32
TaskStartToCloseTimeoutSeconds int32
Domain string
}
// GetWorkflowInfo extracts info of a current workflow from a context.
func GetWorkflowInfo(ctx Context) *WorkflowInfo {
return getWorkflowEnvironment(ctx).WorkflowInfo()
}
// GetLogger returns a logger to be used in workflow's context
func GetLogger(ctx Context) *zap.Logger {
return getWorkflowEnvironment(ctx).GetLogger()
}
// Now returns the current time when the decision is started or replayed.
// The workflow needs to use this Now() to get the wall clock time instead of the Go lang library one.
func Now(ctx Context) time.Time {
return getWorkflowEnvironment(ctx).Now()
}
// NewTimer returns immediately and the future becomes ready after the specified timeout.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this NewTimer() to get the timer instead of the Go lang library one(timer.NewTimer())
// - You can also cancel the pending timer using context(WithCancel(ctx)) and that will cancel the timer with
// error TimerCanceledError.
func NewTimer(ctx Context, d time.Duration) Future {
future, settable := NewFuture(ctx)
if d <= 0 {
settable.Set(true, nil)
return future
}
t := getWorkflowEnvironment(ctx).NewTimer(d, func(r []byte, e error) {
settable.Set(nil, e)
})
if t != nil {
Go(ctx, func(ctx Context) {
if ctx.Done() == nil {
return // not cancellable.
}
// We will cancel the timer either it is explicit cancellation
// (or) we are closed.
ctx.Done().Receive(ctx, nil)
getWorkflowEnvironment(ctx).RequestCancelTimer(t.timerID)
})
}
return future
}
// Sleep pauses the current goroutine for at least the duration d.
// A negative or zero duration causes Sleep to return immediately.
// - The current timer resolution implementation is in seconds but is subjected to change.
// - The workflow needs to use this Sleep() to sleep instead of the Go lang library one(timer.Sleep())
// - You can also cancel the pending sleep using context(WithCancel(ctx)) and that will cancel the sleep with
// error TimerCanceledError.
func Sleep(ctx Context, d time.Duration) (err error) {
t := NewTimer(ctx, d)
err = t.Get(ctx, nil)
return
}
// RequestCancelWorkflow can be used to request cancellation of an external workflow.
// - workflowID - name of the workflow ID.
// - runID - Optional - indicates the instance of a workflow.
// You can specify the domain of the workflow using the context like
// ctx := WithWorkflowDomain(ctx, "domain-name")
func RequestCancelWorkflow(ctx Context, workflowID, runID string) error {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
options := getWorkflowEnvOptions(ctx1)
if options.domain == nil {
return errors.New("need a valid domain")
}
return getWorkflowEnvironment(ctx).RequestCancelWorkflow(*options.domain, workflowID, runID)
}
// WithChildWorkflowOptions adds all workflow options to the context.
func WithChildWorkflowOptions(ctx Context, cwo ChildWorkflowOptions) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
wfOptions := getWorkflowEnvOptions(ctx1)
wfOptions.domain = common.StringPtr(cwo.Domain)
wfOptions.taskListName = common.StringPtr(cwo.TaskList)
wfOptions.workflowID = cwo.WorkflowID
wfOptions.executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.ExecutionStartToCloseTimeout.Seconds()))
wfOptions.taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(cwo.TaskStartToCloseTimeout.Seconds()))
wfOptions.childPolicy = cwo.ChildPolicy
wfOptions.waitForCancellation = cwo.WaitForCancellation
return ctx1
}
// WithWorkflowDomain adds a domain to the context.
func WithWorkflowDomain(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).domain = common.StringPtr(name)
return ctx1
}
// WithWorkflowTaskList adds a task list to the context.
func WithWorkflowTaskList(ctx Context, name string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskListName = common.StringPtr(name)
return ctx1
}
// WithWorkflowID adds a workflowID to the context.
func WithWorkflowID(ctx Context, workflowID string) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).workflowID = workflowID
return ctx1
}
// WithChildPolicy adds a ChildWorkflowPolicy to the context.
func WithChildPolicy(ctx Context, childPolicy ChildWorkflowPolicy) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).childPolicy = childPolicy
return ctx1
}
// WithExecutionStartToCloseTimeout adds a workflow execution timeout to the context.
func WithExecutionStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).executionStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// WithWorkflowTaskStartToCloseTimeout adds a decision timeout to the context.
func WithWorkflowTaskStartToCloseTimeout(ctx Context, d time.Duration) Context {
ctx1 := setWorkflowEnvOptionsIfNotExist(ctx)
getWorkflowEnvOptions(ctx1).taskStartToCloseTimeoutSeconds = common.Int32Ptr(int32(d.Seconds()))
return ctx1
}
// GetSignalChannel returns channel corresponding to the signal name.
func GetSignalChannel(ctx Context, signalName string) Channel {
return getWorkflowEnvOptions(ctx).getSignalChannel(ctx, signalName)
}
// Get extract data from encoded data to desired value type. valuePtr is pointer to the actual value type.
func (b EncodedValue) Get(valuePtr interface{}) error {
return getHostEnvironment().decodeArg(b, valuePtr)
}
// SideEffect executes provided function once, records its result into the workflow history and doesn't
// reexecute it on replay returning recorded result instead. It can be seen as an "inline" activity.
// Use it only for short nondeterministic code snippets like getting random value or generating UUID.
// The only way to fail SideEffect is to panic which causes decision task failure. The decision task after timeout is
// rescheduled and reexecuted giving SideEffect another chance to succeed.
// Be careful to not return any data from SideEffect function any other way than through its recorded return value.
// For example this code is BROKEN:
//
// var executed bool
// cadence.SideEffect(func(ctx cadence.Context) interface{} {
// executed = true
// return nil
// })
// if executed {
// ....
// } else {
// ....
// }
// On replay the function is not executed, the executed flag is not set to true
// and the workflow takes a different path breaking the determinism.
//
// Here is the correct way to use SideEffect:
//
// encodedRandom := SideEffect(func(ctx cadence.Context) interface{} {
// return rand.Intn(100)
// })
// var random int
// encodedRandom.Get(&random)
// if random < 50 {
// ....
// } else {
// ....
// }
func SideEffect(ctx Context, f func(ctx Context) interface{}) EncodedValue {
future, settable := NewFuture(ctx)
wrapperFunc := func() ([]byte, error) {
r := f(ctx)
return getHostEnvironment().encodeArg(r)
}
resultCallback := func(result []byte, err error) {
settable.Set(EncodedValue(result), err)
}
getWorkflowEnvironment(ctx).SideEffect(wrapperFunc, resultCallback)
var encoded EncodedValue
if err := future.Get(ctx, &encoded); err != nil {
panic(err)
}
return encoded
}
| {
thImpl := getHostEnvironment()
err := thImpl.RegisterWorkflow(workflowFunc)
if err != nil {
panic(err)
}
} | identifier_body |
d3viz.js | (function(window,undefined){
/***
* One web page <---> one d3viz
*/
var d3viz = function(container, hlcanvas) {
this.version = "0.1";
this.socket = undefined;
this.name = undefined;
this.uuid = undefined;
this.mapCanvas = undefined; // GeoVizMap
this.mapDict = {}; // uuid:map
this.nameDict = {}; // uuid:name
this.dataDict = {}; // not used
this.o = undefined;
this.prj = undefined;
this.map = undefined;
this.mapType = undefined; //shapefile or json
this.uuids = [];
this.mapConfig = {};
this.mapMeta = {};
this.mapTheme = {};
this.container = container;
this.hlcanvas = hlcanvas;
this.geoviz = new GeoVizMap(container, hlcanvas);
// carto db
this.userid = undefined;
this.key = undefined;
this.resizeTimer;
self = this;
};
d3viz.prototype = {
GetCurrentIdx : function() {
var layerElm = this.container.find('canvas')[this.geoviz.numMaps-1];
return parseInt(layerElm.id);
},
GetUUID : function() {
// return the uuid of current map layer
var currentIdx = this.GetCurrentIdx();
return this.uuids[currentIdx];
},
GetNumMaps : function() {
return this.geoviz.numMaps;
},
GetMap : function(idx) {
// if idx == undefined return current mapcanvas
return this.geoviz.getMap(idx);
},
RemoveMap : function(idx) {
idx = parseInt(idx);
this.geoviz.removeMap(idx);
},
SetupMapMeta: function(metaData) {
var uuid = metaData.layer_uuid;
if ( this.uuids.indexOf(uuid) == -1 ) {
this.uuids.push(uuid);
this.mapMeta[uuid] = metaData;
}
// set uuid to mapcanvas
this.geoviz.getMap(this.uuids.length-1).uuid = uuid;
},
GetMapMeta: function() {
return this.mapMeta[this.GetUUID()];
},
PanMaps : function(offsetX, offsetY) {
this.geoviz.moveAllMaps(offsetX, offsetY);
},
UpdateMaps : function(params) {
this.geoviz.updateAllMaps(params);
},
CleanMaps : function() {
this.geoviz.cleanAllMaps();
},
UpdateLayerOrder : function(order) {
this.geoviz.reorderMaps(order);
},
SetMapConfig : function(config) {
var currentUUID = this.GetUUID();
this.mapConfig[currentUUID] = config;
},
GetMapConfig : function() {
var currentUUID = this.GetUUID();
return this.mapConfig[currentUUID];
},
SetMapTheme : function(data) {
this.mapTheme[data.layer_uuid] = data;
},
GetMapTheme : function() {
var currentUUID = this.GetUUID();
return this.mapTheme[currentUUID];
},
AddFrame : function(layer_uuid, frame_uuid, url) {
},
/**
* AddMap() could be:
* 1. Drag&Drop local ESRI Shape file
* 2. Drag&Drop local GeoJson file
* 3. Dropbox file url of ESRI Shape file
* 4. Dropbox file url of GeoJson file
*/
_setupGeoVizMap : function(isMainMap, map, type, colorTheme, callback) {
self.geoviz.addMap(map, {'color_theme':colorTheme});
if (typeof callback === "function") {
callback(map);
}
},
AddMap : function(name, o, type, isMainMap, precall, callback,L, lmap, prj, colorTheme) {
if (typeof precall === "function") { precall();}
var map;
var options = {"hratio": 1, "vratio": 1, "alpha": 0.8, "noforeground": false};
if ( typeof o == "string") {
if (type == 'shapefile') {
// file url
var xhr = new XMLHttpRequest();
xhr.open("GET", o, true);
xhr.responseType = 'arraybuffer';
xhr.onload = function(evt) {
map = new ShpMap(name, new ShpReader(xhr.response), L, lmap, prj);
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
};
xhr.send(null);
} else if (type == 'geojson' || type == 'json') {
var json_url = o;
this.GetJSON( json_url, function(json) {
map = new JsonMap(name, json, L, lmap, prj);
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
});
}
} else if (!!o.lastModifiedDate || o.constructor == Blob) {
// drag& drop file
var reader = new FileReader();
reader.onload = function(e) {
if (type == 'shapefile') {
map = new ShpMap(name, new ShpReader(reader.result), L, lmap, prj);
} else if (type == 'geojson' || type == 'json') {
var json = JSON.parse(reader.result);
map = new JsonMap(name, json, L, lmap, prj);
}
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
};
if (type == 'shapefile') {
reader.readAsArrayBuffer(o);
} else if (type == 'geojson' || type == 'json') {
reader.readAsText(o);
}
} else if (typeof o == 'object'){
if (o instanceof ShpReader || o.constructor.name == 'ShpReader') {
// ShpReader object
map = new ShpMap(name, o, L, lmap, prj);
} else {
// JSON object
map = new JsonMap(name, o, L, lmap, prj);
}
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
}
self.o = o;
},
Clean : function() {
},
GetJSON : function(url, successHandler, errorHandler) {
var xhr = new XMLHttpRequest();
xhr.open('get', url, true);
xhr.responseType = 'json';
xhr.onload = function() {
var status = xhr.status;
if (status == 200) {
var a = xhr.response;
if (typeof(a) == 'string') {
a = a.replace(/\n/g,"");
a = JSON.parse(a);
}
successHandler && successHandler(a);
} else {
errorHandler && errorHandler(status);
}
};
xhr.send();
},
/**
* Setup Brushing/Linking for base map
*/
SetupBrushLink : function() {
mapDict = this.mapDict;
window.addEventListener('storage', function(e) {
var hl_ids = JSON.parse(localStorage.getItem('HL_IDS')),
hl_ext = JSON.parse(localStorage.getItem('HL_MAP'));
for ( var uuid in hl_ids ) {
var map = self.GetMap();
var ids = hl_ids[uuid];
if ( hl_ext && uuid in hl_ext ) {
map.highlightExt(ids, hl_ext[uuid]);
} else if ( hl_ids && uuid in hl_ids ) {
var context = undefined;
var nolinking = true;
map.highlight(hl_ids[uuid], context, nolinking);
}
//}
}
}, false);
},
/**
* Setup and function for PopUp window
*/
RandUrl : function(url) {
var rnd = Math.random().toString(36).substring(7)
if ( url.indexOf('?') === -1 ) {
return url + "?" + rnd;
}
return url + "&" + rnd;
},
/**
* Create a new thematic map
*/
ShowThematicMap : function(map, colorTheme, callback) {
self.mapCanvas = new GeoVizMap(map, self.canvas, {
"color_theme" : colorTheme
});
if (typeof callback === "function") {
callback();
}
},
UpdateThematicMap : function(uuid, newColorTheme, callback) {
var map = self.mapDict[uuid];
self.mapCanvas.updateColor(newColorTheme);
if (typeof callback === "function") {
callback();
}
},
/**
* Create a new Leaftlet map
*/
PopupThematicMap : function() {
var w = window.open(
this.RandUrl('../../static/thematicmap.html'), // quantile, lisa,
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Leaftlet map
*/
PopupScatterPlot : function() {
var w = window.open(
this.RandUrl('../../static/scatterplot.html'), // quantile, lisa,
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Moran Scatter Plot
*/
PopupMoranScatterPlot : function() {
var w = window.open(
this.RandUrl('../../static/moran_scatterplot.html'), // quantile, lisa,
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Moran Scatter Plot
*/
PopupHistogram : function() {
var w = window.open(
this.RandUrl('../../static/histogram.html'),
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Cartodb map
*/
ShowCartodbMap: function(msg) {
var w = window.open(
this.RandUrl('cartodb_map.html'), // quantile, lisa,
"_blank",
"width=900, height=700, scrollbars=yes"
);
},
/**
* Close all PopUp windows
*/
CloseAllPopUps : function() {
},
/**
* Get selected ids from map
*/
GetSelected : function(msg) {
var uuid = msg["uuid"];
var select_ids = "";
if (localStorage.getItem('HL_IDS')) {
var hl_ids = JSON.parse(localStorage.getItem('HL_IDS'));
if ( uuid in hl_ids) {
var ids = hl_ids[uuid];
for (var i=0, n=ids.length; i<n; i++ ) {
select_ids += ids[i] + ",";
}
}
}
var rsp = {"uuid":uuid,"ids":select_ids};
return rsp;
},
SelectOnMap : function(msg) {
var hl_ids = JSON.parse(localStorage.getItem('HL_IDS')),
hl_ext = JSON.parse(localStorage.getItem('HL_MAP'));
if (!hl_ids) hl_ids = {};
if (!hl_ext) hl_ext = {};
var uuid = msg.uuid;
if (uuid in this.mapDict ) {
var map = this.mapDict[uuid];
var ids = msg.data;
if ( hl_ext && uuid in hl_ext ) {
map.highlightExt(ids, hl_ext[uuid]);
} else {
map.highlight(ids);
}
hl_ids[uuid] = ids;
}
localStorage['HL_IDS'] = JSON.stringify(hl_ids);
},
CartoGetAllTables : function(uid, key, successHandler) {
var msg = {"command":"cartodb_get_all_tables"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_GetAllTables = successHandler;
} else {
setTimeout(function(){self.CartoGetAllTables(uid, key, successHandler)}, 10);
}
},
CartoDownloadTable : function(uid, key, table_name, successHandler) {
var msg = {"command":"cartodb_download_table"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
if (table_name) msg["table_name"] = table_name;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_DownloadTable = successHandler;
} else {
setTimeout(function(){self.CartoDownloadTable(uid, key, table_name,successHandler)}, 10);
}
},
CartoUploadTable : function(uid, key, uuid, successHandler) {
var msg = {"command":"cartodb_upload_table"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
if (uuid) msg["uuid"] = uuid;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_UploadTable = successHandler;
} else {
setTimeout(function(){self.CartoUploadTable(uid, key, uuid, successHandler)}, 10);
}
},
CartoSpatialCount : function(uid, key, first_layer, second_layer, count_col_name, successHandler) {
var msg = {"command":"cartodb_spatial_count"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
msg["firstlayer"] = first_layer;
msg["secondlayer"] = second_layer;
msg["columnname"] = count_col_name;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_SpatialCount = successHandler;
} else {
setTimeout(function(){self.CartoSpatialCount(uid, key, first_layer, second_layer, count_col_name, successHandler)}, 10);
}
},
};
// End and expose d3viz to 'window' |
})(self); | window["d3viz"] = d3viz; | random_line_split |
d3viz.js |
(function(window,undefined){
/***
* One web page <---> one d3viz
*/
var d3viz = function(container, hlcanvas) {
this.version = "0.1";
this.socket = undefined;
this.name = undefined;
this.uuid = undefined;
this.mapCanvas = undefined; // GeoVizMap
this.mapDict = {}; // uuid:map
this.nameDict = {}; // uuid:name
this.dataDict = {}; // not used
this.o = undefined;
this.prj = undefined;
this.map = undefined;
this.mapType = undefined; //shapefile or json
this.uuids = [];
this.mapConfig = {};
this.mapMeta = {};
this.mapTheme = {};
this.container = container;
this.hlcanvas = hlcanvas;
this.geoviz = new GeoVizMap(container, hlcanvas);
// carto db
this.userid = undefined;
this.key = undefined;
this.resizeTimer;
self = this;
};
d3viz.prototype = {
GetCurrentIdx : function() {
var layerElm = this.container.find('canvas')[this.geoviz.numMaps-1];
return parseInt(layerElm.id);
},
GetUUID : function() {
// return the uuid of current map layer
var currentIdx = this.GetCurrentIdx();
return this.uuids[currentIdx];
},
GetNumMaps : function() {
return this.geoviz.numMaps;
},
GetMap : function(idx) {
// if idx == undefined return current mapcanvas
return this.geoviz.getMap(idx);
},
RemoveMap : function(idx) {
idx = parseInt(idx);
this.geoviz.removeMap(idx);
},
SetupMapMeta: function(metaData) {
var uuid = metaData.layer_uuid;
if ( this.uuids.indexOf(uuid) == -1 ) {
this.uuids.push(uuid);
this.mapMeta[uuid] = metaData;
}
// set uuid to mapcanvas
this.geoviz.getMap(this.uuids.length-1).uuid = uuid;
},
GetMapMeta: function() {
return this.mapMeta[this.GetUUID()];
},
PanMaps : function(offsetX, offsetY) {
this.geoviz.moveAllMaps(offsetX, offsetY);
},
UpdateMaps : function(params) {
this.geoviz.updateAllMaps(params);
},
CleanMaps : function() {
this.geoviz.cleanAllMaps();
},
UpdateLayerOrder : function(order) {
this.geoviz.reorderMaps(order);
},
SetMapConfig : function(config) {
var currentUUID = this.GetUUID();
this.mapConfig[currentUUID] = config;
},
GetMapConfig : function() {
var currentUUID = this.GetUUID();
return this.mapConfig[currentUUID];
},
SetMapTheme : function(data) {
this.mapTheme[data.layer_uuid] = data;
},
GetMapTheme : function() {
var currentUUID = this.GetUUID();
return this.mapTheme[currentUUID];
},
AddFrame : function(layer_uuid, frame_uuid, url) {
},
/**
* AddMap() could be:
* 1. Drag&Drop local ESRI Shape file
* 2. Drag&Drop local GeoJson file
* 3. Dropbox file url of ESRI Shape file
* 4. Dropbox file url of GeoJson file
*/
_setupGeoVizMap : function(isMainMap, map, type, colorTheme, callback) {
self.geoviz.addMap(map, {'color_theme':colorTheme});
if (typeof callback === "function") {
callback(map);
}
},
AddMap : function(name, o, type, isMainMap, precall, callback,L, lmap, prj, colorTheme) {
if (typeof precall === "function") { precall();}
var map;
var options = {"hratio": 1, "vratio": 1, "alpha": 0.8, "noforeground": false};
if ( typeof o == "string") {
if (type == 'shapefile') {
// file url
var xhr = new XMLHttpRequest();
xhr.open("GET", o, true);
xhr.responseType = 'arraybuffer';
xhr.onload = function(evt) {
map = new ShpMap(name, new ShpReader(xhr.response), L, lmap, prj);
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
};
xhr.send(null);
} else if (type == 'geojson' || type == 'json') {
var json_url = o;
this.GetJSON( json_url, function(json) {
map = new JsonMap(name, json, L, lmap, prj);
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
});
}
} else if (!!o.lastModifiedDate || o.constructor == Blob) {
// drag& drop file
var reader = new FileReader();
reader.onload = function(e) {
if (type == 'shapefile') {
map = new ShpMap(name, new ShpReader(reader.result), L, lmap, prj);
} else if (type == 'geojson' || type == 'json') {
var json = JSON.parse(reader.result);
map = new JsonMap(name, json, L, lmap, prj);
}
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
};
if (type == 'shapefile') {
reader.readAsArrayBuffer(o);
} else if (type == 'geojson' || type == 'json') {
reader.readAsText(o);
}
} else if (typeof o == 'object'){
if (o instanceof ShpReader || o.constructor.name == 'ShpReader') {
// ShpReader object
map = new ShpMap(name, o, L, lmap, prj);
} else {
// JSON object
map = new JsonMap(name, o, L, lmap, prj);
}
self._setupGeoVizMap(isMainMap, map, type, colorTheme, callback);
}
self.o = o;
},
Clean : function() {
},
GetJSON : function(url, successHandler, errorHandler) {
var xhr = new XMLHttpRequest();
xhr.open('get', url, true);
xhr.responseType = 'json';
xhr.onload = function() {
var status = xhr.status;
if (status == 200) {
var a = xhr.response;
if (typeof(a) == 'string') {
a = a.replace(/\n/g,"");
a = JSON.parse(a);
}
successHandler && successHandler(a);
} else {
errorHandler && errorHandler(status);
}
};
xhr.send();
},
/**
* Setup Brushing/Linking for base map
*/
SetupBrushLink : function() {
mapDict = this.mapDict;
window.addEventListener('storage', function(e) {
var hl_ids = JSON.parse(localStorage.getItem('HL_IDS')),
hl_ext = JSON.parse(localStorage.getItem('HL_MAP'));
for ( var uuid in hl_ids ) {
var map = self.GetMap();
var ids = hl_ids[uuid];
if ( hl_ext && uuid in hl_ext ) {
map.highlightExt(ids, hl_ext[uuid]);
} else if ( hl_ids && uuid in hl_ids ) {
var context = undefined;
var nolinking = true;
map.highlight(hl_ids[uuid], context, nolinking);
}
//}
}
}, false);
},
/**
* Setup and function for PopUp window
*/
RandUrl : function(url) {
var rnd = Math.random().toString(36).substring(7)
if ( url.indexOf('?') === -1 ) {
return url + "?" + rnd;
}
return url + "&" + rnd;
},
/**
* Create a new thematic map
*/
ShowThematicMap : function(map, colorTheme, callback) {
self.mapCanvas = new GeoVizMap(map, self.canvas, {
"color_theme" : colorTheme
});
if (typeof callback === "function") {
callback();
}
},
UpdateThematicMap : function(uuid, newColorTheme, callback) {
var map = self.mapDict[uuid];
self.mapCanvas.updateColor(newColorTheme);
if (typeof callback === "function") {
callback();
}
},
/**
* Create a new Leaftlet map
*/
PopupThematicMap : function() {
var w = window.open(
this.RandUrl('../../static/thematicmap.html'), // quantile, lisa,
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Leaftlet map
*/
PopupScatterPlot : function() {
var w = window.open(
this.RandUrl('../../static/scatterplot.html'), // quantile, lisa,
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Moran Scatter Plot
*/
PopupMoranScatterPlot : function() {
var w = window.open(
this.RandUrl('../../static/moran_scatterplot.html'), // quantile, lisa,
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Moran Scatter Plot
*/
PopupHistogram : function() {
var w = window.open(
this.RandUrl('../../static/histogram.html'),
"_blank",
"titlebar=no,toolbar=no,location=no,width=900, height=700, scrollbars=yes"
);
},
/**
* Create a new Cartodb map
*/
ShowCartodbMap: function(msg) {
var w = window.open(
this.RandUrl('cartodb_map.html'), // quantile, lisa,
"_blank",
"width=900, height=700, scrollbars=yes"
);
},
/**
* Close all PopUp windows
*/
CloseAllPopUps : function() {
},
/**
* Get selected ids from map
*/
GetSelected : function(msg) {
var uuid = msg["uuid"];
var select_ids = "";
if (localStorage.getItem('HL_IDS')) {
var hl_ids = JSON.parse(localStorage.getItem('HL_IDS'));
if ( uuid in hl_ids) {
var ids = hl_ids[uuid];
for (var i=0, n=ids.length; i<n; i++ ) |
}
}
var rsp = {"uuid":uuid,"ids":select_ids};
return rsp;
},
SelectOnMap : function(msg) {
var hl_ids = JSON.parse(localStorage.getItem('HL_IDS')),
hl_ext = JSON.parse(localStorage.getItem('HL_MAP'));
if (!hl_ids) hl_ids = {};
if (!hl_ext) hl_ext = {};
var uuid = msg.uuid;
if (uuid in this.mapDict ) {
var map = this.mapDict[uuid];
var ids = msg.data;
if ( hl_ext && uuid in hl_ext ) {
map.highlightExt(ids, hl_ext[uuid]);
} else {
map.highlight(ids);
}
hl_ids[uuid] = ids;
}
localStorage['HL_IDS'] = JSON.stringify(hl_ids);
},
CartoGetAllTables : function(uid, key, successHandler) {
var msg = {"command":"cartodb_get_all_tables"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_GetAllTables = successHandler;
} else {
setTimeout(function(){self.CartoGetAllTables(uid, key, successHandler)}, 10);
}
},
CartoDownloadTable : function(uid, key, table_name, successHandler) {
var msg = {"command":"cartodb_download_table"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
if (table_name) msg["table_name"] = table_name;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_DownloadTable = successHandler;
} else {
setTimeout(function(){self.CartoDownloadTable(uid, key, table_name,successHandler)}, 10);
}
},
CartoUploadTable : function(uid, key, uuid, successHandler) {
var msg = {"command":"cartodb_upload_table"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
if (uuid) msg["uuid"] = uuid;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_UploadTable = successHandler;
} else {
setTimeout(function(){self.CartoUploadTable(uid, key, uuid, successHandler)}, 10);
}
},
CartoSpatialCount : function(uid, key, first_layer, second_layer, count_col_name, successHandler) {
var msg = {"command":"cartodb_spatial_count"};
if (uid) msg["uid"] = uid;
if (key) msg["key"] = key;
msg["firstlayer"] = first_layer;
msg["secondlayer"] = second_layer;
msg["columnname"] = count_col_name;
if (this.socket && this.socket.readyState == 1) {
this.socket.send(JSON.stringify(msg));
this.callback_SpatialCount = successHandler;
} else {
setTimeout(function(){self.CartoSpatialCount(uid, key, first_layer, second_layer, count_col_name, successHandler)}, 10);
}
},
};
// End and expose d3viz to 'window'
window["d3viz"] = d3viz;
})(self);
| {
select_ids += ids[i] + ",";
} | conditional_block |
input_layer.py | """
Input functions for training and inference.
Author: Philipp Jund, 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from SpatialRelationCNN.model import utility
from SpatialRelationCNN.model.generator_factory import GeneratorFactory
import SpatialRelationCNN.model.augmentation as augment
import numpy as np
import tensorflow as tf
import tfquaternion as tfq
class InputLayer(object):
"""The input pipeline base class, from RelationDataset to projection."""
def __init__(self, dataset, more_augmentation=False):
"""The input pipeline, from RelationDataset to projection.
Args:
dataset: A `RelationDataset` object.
"""
self.dataset = dataset
self.generator_factory = GeneratorFactory(self.dataset,
more_augmentation)
phases = ["train", "validation", "test"]
self.generators = {n: None for n in phases}
self.iterator = None
self.iterator_init_ops = {n: None for n in phases}
self.clouds_tensor, self.cloud_slice_indices = \
self.create_cloud_constants()
self.obj_ids_pl = tf.placeholder(tf.int32, shape=(None, 2),
name="obj_ids")
self.translations_pl = tf.placeholder(tf.float32, shape=(None, 2, 3),
name="translations")
self.rotations_pl = tf.placeholder(tf.float32, shape=(None, 2, 4),
name="rotations")
self.rotations = None # stores the resulting rotations ...
self.translations = None # ... and translations when generalizing
self.translation_vars = []
self.rotation_vars = []
@utility.scope_wrapper
def create_cloud_constants(self):
"""Create two `tf.constant`s of the obj point clouds and their ranges.
The point clouds have differing numbers of points. To efficiently
process them, all object point clouds are concatenated into one
constant. To retrieve them afterwards, we create a second constant with
shape (N+1), containing the start index for each point cloud with the
length as an additional index. With this we can use slicing, which
should be more efficient than using tf.where
"""
np_clouds = [self.dataset.clouds[n] for n in self.dataset.cloud_names]
# Create the slice indices as float32, as they'll only be used with
# tf.gather which has no GPU kernel for integers.
cloud_slice_indices = np.cumsum([0] + [len(c) for c in np_clouds],
dtype=np.float32)
tf_clouds = tf.constant(np.concatenate(np_clouds), dtype=tf.float32)
return tf_clouds, cloud_slice_indices
def switch_input(self, phase, sess):
"""Switch between test and training data."""
if phase in self.iterator_init_ops:
print("Switching input to {}.".format(phase))
sess.run(self.iterator_init_ops[phase])
else:
raise Exception("Invalid phase name, must be one of {}."
"".format(self.iterator_init_ops))
def _create_tf_datasets(self, split, batch_size):
"""Helper function that creates the train and test tf.data.Dataset."""
out_types = (tf.int32, tf.float32, tf.float32, tf.int32, tf.bool)
# out_shapes has an additional batch dim (None) and 3 or 1 scenes.
out_shapes = ((None, None, 2), (None, None, 2, 3), (None, None, 2, 4),
(None, None), (None,))
self.iterator = tf.data.Iterator.from_structure(out_types, out_shapes)
for p in ["train", "validation", "test"]:
# generator factory throws if there's no validation data
try:
self.generators[p] = self.generator_factory.scene_desc_generator(split, p)
except ValueError:
continue
out_shapes = tuple([np.array(x).shape for x in next(self.generators[p]())])
d = tf.data.Dataset.from_generator(self.generators[p], out_types,
out_shapes)
d = d.batch(batch_size if p == "train" else 1)
# d = d.prefetch(3)
self.iterator_init_ops[p] = self.iterator.make_initializer(d)
@staticmethod
def _repeat(a, repeats, batch_size, training_batch_size):
|
@utility.scope_wrapper
def _input_fn(self, obj_ids, translations, rotations, train_batch_size,
num_objs, do_augmentation):
"""The input function 's part that is shared.
This function creates the scene point clouds from scene descriptions.
Returns: Two tf.Tensors, the first contains all points of the
objects in the batch with shape (N, 3) and the second contains the
corresponding segment ids, the shape is (N,).
"""
batch_size = tf.shape(obj_ids)[0]
# flatten all inputs
obj_ids = tf.reshape(obj_ids, (-1,))
translations = tf.reshape(translations, (-1, 3))
rotations = tf.reshape(rotations, (-1, 4))
clouds_num_points = (self.cloud_slice_indices[1:] -
self.cloud_slice_indices[:-1])
# vector with the number of points of each cloud
num_points = tf.gather(clouds_num_points, obj_ids)
# vector with a range where each number i is num_points[i] repeated
segment_ids = self._repeat(tf.range(tf.shape(num_points)[0]),
tf.to_int32(num_points), batch_size,
num_objs)
segment_ids = tf.to_int32(segment_ids)
# repeat translations[i] and rotations[i] num_points[i] times
translations = tf.gather(translations, segment_ids)
rotations = tf.gather(rotations, segment_ids)
rotations = tfq.Quaternion(rotations)
obj_ids = tf.gather(tf.to_float(obj_ids), segment_ids)
# indices of points consist of the start index plus range(num_points)
start = tf.gather(self.cloud_slice_indices, tf.to_int32(obj_ids))
ranges = tf.cond(tf.equal(batch_size, 1),
lambda: tf.concat([tf.range(num_points[i])
for i in range(2)], axis=0),
lambda: tf.concat([tf.range(num_points[i])
for i in range(num_objs)], axis=0))
point_ids = tf.to_int32(start + ranges)
points = tf.gather(self.clouds_tensor, point_ids)
# Rotate objects. Note that the quaternions are relative to the object
# clouds' origins, so no centering using the mean is required.
points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
points = tf.squeeze(points) + translations
# if we're training, randomly rotate around the z_axis
if do_augmentation:
points = augment.pointcloud(points, segment_ids, batch_size,
train_batch_size)
return points, tf.to_float(segment_ids)
def dataset_input_fn(self, train_batch_size, split):
"""The train input function using the tf.data.Dataset API.
Args:
train_batch_size: `int`, the batch size. Test batch size is always
one.
split: `int` in the interval [1, 15], the index of the split.
"""
self._create_tf_datasets(split, train_batch_size)
next_el = self.iterator.get_next()
obj_ids, translations, rotations, labels, is_augmented = next_el
points, segment_ids = self._input_fn(obj_ids, translations,
rotations, train_batch_size,
train_batch_size * 6, True)
return (points, segment_ids, labels, is_augmented)
def generalize_input_fn(self, trainable, disable_rotation=None):
"""Create the input function to use when running the generalization.
This input function creates translation and rotation variables for
each object, if trainable[i] is true or a constant if trainable[i]
is false.
Args:
trainable: A list of `bool`s with one entry for each object that
will be passed via self.obj_ids_pl.
If trainable[i] is true, the translation and rotation for
the i-th object in the batch will be trainable.
disable_rotation: A list of `bool`s with one entry for each object
that will be passed via self.obj_ids_pl. If trainable is set to
true for this object and disable_rotation is set to true, only
the translation of this object will be optimized
"""
if self.translation_vars:
raise ValueError("generalize_input_fn can only be called once per "
"input layer instance")
for i, (t, no_rot) in enumerate(zip(trainable, disable_rotation)):
tensor_t = tf.Variable if t else tf.constant
self.translation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="translation" + str(i))]
if no_rot:
tensor_t = tf.constant
self.rotation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="rotation" + str(i))]
translation_delta = tf.reshape(self.translation_vars, (-1, 2, 3))
rotation_delta = tf.reshape(self.rotation_vars, (-1, 2, 3))
self.translations = self.translations_pl + translation_delta
# don't optimize w of quaternion to prevent numerical instability
rotation_delta = tf.pad(rotation_delta, [[0, 0], [0, 0], [1, 0]])
self.rotations = self.rotations_pl + rotation_delta
return self._input_fn(self.obj_ids_pl, self.translations,
self.rotations, None, len(trainable), False)
def get_transform_vars(self):
"""Return all variables created to perform rotation and translation."""
return [v for v in (self.rotation_vars + self.translation_vars)
if isinstance(v, tf.Variable)]
def reset_transform_vars(self, sess):
"""Reset translation and rotation to identity."""
for v in self.get_transform_vars():
sess.run(v.initializer)
| """Repeat a[i] repeats[i] times."""
return tf.cond(tf.equal(batch_size, 1),
lambda: utility.repeat(a, repeats, num_repeats=2),
lambda: utility.repeat(a, repeats, training_batch_size)) | identifier_body |
input_layer.py | """
Input functions for training and inference.
Author: Philipp Jund, 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from SpatialRelationCNN.model import utility
from SpatialRelationCNN.model.generator_factory import GeneratorFactory
import SpatialRelationCNN.model.augmentation as augment
import numpy as np
import tensorflow as tf
import tfquaternion as tfq
class InputLayer(object):
"""The input pipeline base class, from RelationDataset to projection."""
def __init__(self, dataset, more_augmentation=False):
"""The input pipeline, from RelationDataset to projection.
Args:
dataset: A `RelationDataset` object.
"""
self.dataset = dataset
self.generator_factory = GeneratorFactory(self.dataset,
more_augmentation)
phases = ["train", "validation", "test"]
self.generators = {n: None for n in phases}
self.iterator = None
self.iterator_init_ops = {n: None for n in phases}
self.clouds_tensor, self.cloud_slice_indices = \
self.create_cloud_constants()
self.obj_ids_pl = tf.placeholder(tf.int32, shape=(None, 2),
name="obj_ids")
self.translations_pl = tf.placeholder(tf.float32, shape=(None, 2, 3),
name="translations")
self.rotations_pl = tf.placeholder(tf.float32, shape=(None, 2, 4),
name="rotations")
self.rotations = None # stores the resulting rotations ...
self.translations = None # ... and translations when generalizing
self.translation_vars = []
self.rotation_vars = []
@utility.scope_wrapper
def create_cloud_constants(self):
"""Create two `tf.constant`s of the obj point clouds and their ranges.
The point clouds have differing numbers of points. To efficiently
process them, all object point clouds are concatenated into one
constant. To retrieve them afterwards, we create a second constant with
shape (N+1), containing the start index for each point cloud with the
length as an additional index. With this we can use slicing, which
should be more efficient than using tf.where
"""
np_clouds = [self.dataset.clouds[n] for n in self.dataset.cloud_names]
# Create the slice indices as float32, as they'll only be used with
# tf.gather which has no GPU kernel for integers.
cloud_slice_indices = np.cumsum([0] + [len(c) for c in np_clouds],
dtype=np.float32)
tf_clouds = tf.constant(np.concatenate(np_clouds), dtype=tf.float32)
return tf_clouds, cloud_slice_indices
def switch_input(self, phase, sess):
"""Switch between test and training data."""
if phase in self.iterator_init_ops:
print("Switching input to {}.".format(phase))
sess.run(self.iterator_init_ops[phase])
else:
raise Exception("Invalid phase name, must be one of {}."
"".format(self.iterator_init_ops))
def _create_tf_datasets(self, split, batch_size):
"""Helper function that creates the train and test tf.data.Dataset."""
out_types = (tf.int32, tf.float32, tf.float32, tf.int32, tf.bool)
# out_shapes has an additional batch dim (None) and 3 or 1 scenes.
out_shapes = ((None, None, 2), (None, None, 2, 3), (None, None, 2, 4),
(None, None), (None,))
self.iterator = tf.data.Iterator.from_structure(out_types, out_shapes)
for p in ["train", "validation", "test"]:
# generator factory throws if there's no validation data
try:
self.generators[p] = self.generator_factory.scene_desc_generator(split, p)
except ValueError:
continue
out_shapes = tuple([np.array(x).shape for x in next(self.generators[p]())])
d = tf.data.Dataset.from_generator(self.generators[p], out_types,
out_shapes)
d = d.batch(batch_size if p == "train" else 1)
# d = d.prefetch(3)
self.iterator_init_ops[p] = self.iterator.make_initializer(d)
@staticmethod
def _repeat(a, repeats, batch_size, training_batch_size):
"""Repeat a[i] repeats[i] times."""
return tf.cond(tf.equal(batch_size, 1),
lambda: utility.repeat(a, repeats, num_repeats=2),
lambda: utility.repeat(a, repeats, training_batch_size))
@utility.scope_wrapper
def _input_fn(self, obj_ids, translations, rotations, train_batch_size,
num_objs, do_augmentation):
"""The input function 's part that is shared.
This function creates the scene point clouds from scene descriptions.
Returns: Two tf.Tensors, the first contains all points of the
objects in the batch with shape (N, 3) and the second contains the
corresponding segment ids, the shape is (N,).
"""
batch_size = tf.shape(obj_ids)[0]
# flatten all inputs
obj_ids = tf.reshape(obj_ids, (-1,))
translations = tf.reshape(translations, (-1, 3))
rotations = tf.reshape(rotations, (-1, 4))
clouds_num_points = (self.cloud_slice_indices[1:] -
self.cloud_slice_indices[:-1])
# vector with the number of points of each cloud
num_points = tf.gather(clouds_num_points, obj_ids)
# vector with a range where each number i is num_points[i] repeated
segment_ids = self._repeat(tf.range(tf.shape(num_points)[0]),
tf.to_int32(num_points), batch_size,
num_objs)
segment_ids = tf.to_int32(segment_ids)
# repeat translations[i] and rotations[i] num_points[i] times
translations = tf.gather(translations, segment_ids)
rotations = tf.gather(rotations, segment_ids)
rotations = tfq.Quaternion(rotations)
obj_ids = tf.gather(tf.to_float(obj_ids), segment_ids)
# indices of points consist of the start index plus range(num_points)
start = tf.gather(self.cloud_slice_indices, tf.to_int32(obj_ids))
ranges = tf.cond(tf.equal(batch_size, 1),
lambda: tf.concat([tf.range(num_points[i])
for i in range(2)], axis=0),
lambda: tf.concat([tf.range(num_points[i])
for i in range(num_objs)], axis=0))
point_ids = tf.to_int32(start + ranges)
points = tf.gather(self.clouds_tensor, point_ids)
# Rotate objects. Note that the quaternions are relative to the object
# clouds' origins, so no centering using the mean is required.
points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
points = tf.squeeze(points) + translations
# if we're training, randomly rotate around the z_axis
if do_augmentation:
points = augment.pointcloud(points, segment_ids, batch_size,
train_batch_size)
return points, tf.to_float(segment_ids)
def dataset_input_fn(self, train_batch_size, split):
"""The train input function using the tf.data.Dataset API.
Args:
train_batch_size: `int`, the batch size. Test batch size is always | one.
split: `int` in the interval [1, 15], the index of the split.
"""
self._create_tf_datasets(split, train_batch_size)
next_el = self.iterator.get_next()
obj_ids, translations, rotations, labels, is_augmented = next_el
points, segment_ids = self._input_fn(obj_ids, translations,
rotations, train_batch_size,
train_batch_size * 6, True)
return (points, segment_ids, labels, is_augmented)
def generalize_input_fn(self, trainable, disable_rotation=None):
"""Create the input function to use when running the generalization.
This input function creates translation and rotation variables for
each object, if trainable[i] is true or a constant if trainable[i]
is false.
Args:
trainable: A list of `bool`s with one entry for each object that
will be passed via self.obj_ids_pl.
If trainable[i] is true, the translation and rotation for
the i-th object in the batch will be trainable.
disable_rotation: A list of `bool`s with one entry for each object
that will be passed via self.obj_ids_pl. If trainable is set to
true for this object and disable_rotation is set to true, only
the translation of this object will be optimized
"""
if self.translation_vars:
raise ValueError("generalize_input_fn can only be called once per "
"input layer instance")
for i, (t, no_rot) in enumerate(zip(trainable, disable_rotation)):
tensor_t = tf.Variable if t else tf.constant
self.translation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="translation" + str(i))]
if no_rot:
tensor_t = tf.constant
self.rotation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="rotation" + str(i))]
translation_delta = tf.reshape(self.translation_vars, (-1, 2, 3))
rotation_delta = tf.reshape(self.rotation_vars, (-1, 2, 3))
self.translations = self.translations_pl + translation_delta
# don't optimize w of quaternion to prevent numerical instability
rotation_delta = tf.pad(rotation_delta, [[0, 0], [0, 0], [1, 0]])
self.rotations = self.rotations_pl + rotation_delta
return self._input_fn(self.obj_ids_pl, self.translations,
self.rotations, None, len(trainable), False)
def get_transform_vars(self):
"""Return all variables created to perform rotation and translation."""
return [v for v in (self.rotation_vars + self.translation_vars)
if isinstance(v, tf.Variable)]
def reset_transform_vars(self, sess):
"""Reset translation and rotation to identity."""
for v in self.get_transform_vars():
sess.run(v.initializer) | random_line_split | |
input_layer.py | """
Input functions for training and inference.
Author: Philipp Jund, 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from SpatialRelationCNN.model import utility
from SpatialRelationCNN.model.generator_factory import GeneratorFactory
import SpatialRelationCNN.model.augmentation as augment
import numpy as np
import tensorflow as tf
import tfquaternion as tfq
class InputLayer(object):
"""The input pipeline base class, from RelationDataset to projection."""
def __init__(self, dataset, more_augmentation=False):
"""The input pipeline, from RelationDataset to projection.
Args:
dataset: A `RelationDataset` object.
"""
self.dataset = dataset
self.generator_factory = GeneratorFactory(self.dataset,
more_augmentation)
phases = ["train", "validation", "test"]
self.generators = {n: None for n in phases}
self.iterator = None
self.iterator_init_ops = {n: None for n in phases}
self.clouds_tensor, self.cloud_slice_indices = \
self.create_cloud_constants()
self.obj_ids_pl = tf.placeholder(tf.int32, shape=(None, 2),
name="obj_ids")
self.translations_pl = tf.placeholder(tf.float32, shape=(None, 2, 3),
name="translations")
self.rotations_pl = tf.placeholder(tf.float32, shape=(None, 2, 4),
name="rotations")
self.rotations = None # stores the resulting rotations ...
self.translations = None # ... and translations when generalizing
self.translation_vars = []
self.rotation_vars = []
@utility.scope_wrapper
def create_cloud_constants(self):
"""Create two `tf.constant`s of the obj point clouds and their ranges.
The point clouds have differing numbers of points. To efficiently
process them, all object point clouds are concatenated into one
constant. To retrieve them afterwards, we create a second constant with
shape (N+1), containing the start index for each point cloud with the
length as an additional index. With this we can use slicing, which
should be more efficient than using tf.where
"""
np_clouds = [self.dataset.clouds[n] for n in self.dataset.cloud_names]
# Create the slice indices as float32, as they'll only be used with
# tf.gather which has no GPU kernel for integers.
cloud_slice_indices = np.cumsum([0] + [len(c) for c in np_clouds],
dtype=np.float32)
tf_clouds = tf.constant(np.concatenate(np_clouds), dtype=tf.float32)
return tf_clouds, cloud_slice_indices
def switch_input(self, phase, sess):
"""Switch between test and training data."""
if phase in self.iterator_init_ops:
print("Switching input to {}.".format(phase))
sess.run(self.iterator_init_ops[phase])
else:
raise Exception("Invalid phase name, must be one of {}."
"".format(self.iterator_init_ops))
def _create_tf_datasets(self, split, batch_size):
"""Helper function that creates the train and test tf.data.Dataset."""
out_types = (tf.int32, tf.float32, tf.float32, tf.int32, tf.bool)
# out_shapes has an additional batch dim (None) and 3 or 1 scenes.
out_shapes = ((None, None, 2), (None, None, 2, 3), (None, None, 2, 4),
(None, None), (None,))
self.iterator = tf.data.Iterator.from_structure(out_types, out_shapes)
for p in ["train", "validation", "test"]:
# generator factory throws if there's no validation data
|
@staticmethod
def _repeat(a, repeats, batch_size, training_batch_size):
"""Repeat a[i] repeats[i] times."""
return tf.cond(tf.equal(batch_size, 1),
lambda: utility.repeat(a, repeats, num_repeats=2),
lambda: utility.repeat(a, repeats, training_batch_size))
@utility.scope_wrapper
def _input_fn(self, obj_ids, translations, rotations, train_batch_size,
num_objs, do_augmentation):
"""The input function 's part that is shared.
This function creates the scene point clouds from scene descriptions.
Returns: Two tf.Tensors, the first contains all points of the
objects in the batch with shape (N, 3) and the second contains the
corresponding segment ids, the shape is (N,).
"""
batch_size = tf.shape(obj_ids)[0]
# flatten all inputs
obj_ids = tf.reshape(obj_ids, (-1,))
translations = tf.reshape(translations, (-1, 3))
rotations = tf.reshape(rotations, (-1, 4))
clouds_num_points = (self.cloud_slice_indices[1:] -
self.cloud_slice_indices[:-1])
# vector with the number of points of each cloud
num_points = tf.gather(clouds_num_points, obj_ids)
# vector with a range where each number i is num_points[i] repeated
segment_ids = self._repeat(tf.range(tf.shape(num_points)[0]),
tf.to_int32(num_points), batch_size,
num_objs)
segment_ids = tf.to_int32(segment_ids)
# repeat translations[i] and rotations[i] num_points[i] times
translations = tf.gather(translations, segment_ids)
rotations = tf.gather(rotations, segment_ids)
rotations = tfq.Quaternion(rotations)
obj_ids = tf.gather(tf.to_float(obj_ids), segment_ids)
# indices of points consist of the start index plus range(num_points)
start = tf.gather(self.cloud_slice_indices, tf.to_int32(obj_ids))
ranges = tf.cond(tf.equal(batch_size, 1),
lambda: tf.concat([tf.range(num_points[i])
for i in range(2)], axis=0),
lambda: tf.concat([tf.range(num_points[i])
for i in range(num_objs)], axis=0))
point_ids = tf.to_int32(start + ranges)
points = tf.gather(self.clouds_tensor, point_ids)
# Rotate objects. Note that the quaternions are relative to the object
# clouds' origins, so no centering using the mean is required.
points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
points = tf.squeeze(points) + translations
# if we're training, randomly rotate around the z_axis
if do_augmentation:
points = augment.pointcloud(points, segment_ids, batch_size,
train_batch_size)
return points, tf.to_float(segment_ids)
def dataset_input_fn(self, train_batch_size, split):
"""The train input function using the tf.data.Dataset API.
Args:
train_batch_size: `int`, the batch size. Test batch size is always
one.
split: `int` in the interval [1, 15], the index of the split.
"""
self._create_tf_datasets(split, train_batch_size)
next_el = self.iterator.get_next()
obj_ids, translations, rotations, labels, is_augmented = next_el
points, segment_ids = self._input_fn(obj_ids, translations,
rotations, train_batch_size,
train_batch_size * 6, True)
return (points, segment_ids, labels, is_augmented)
def generalize_input_fn(self, trainable, disable_rotation=None):
"""Create the input function to use when running the generalization.
This input function creates translation and rotation variables for
each object, if trainable[i] is true or a constant if trainable[i]
is false.
Args:
trainable: A list of `bool`s with one entry for each object that
will be passed via self.obj_ids_pl.
If trainable[i] is true, the translation and rotation for
the i-th object in the batch will be trainable.
disable_rotation: A list of `bool`s with one entry for each object
that will be passed via self.obj_ids_pl. If trainable is set to
true for this object and disable_rotation is set to true, only
the translation of this object will be optimized
"""
if self.translation_vars:
raise ValueError("generalize_input_fn can only be called once per "
"input layer instance")
for i, (t, no_rot) in enumerate(zip(trainable, disable_rotation)):
tensor_t = tf.Variable if t else tf.constant
self.translation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="translation" + str(i))]
if no_rot:
tensor_t = tf.constant
self.rotation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="rotation" + str(i))]
translation_delta = tf.reshape(self.translation_vars, (-1, 2, 3))
rotation_delta = tf.reshape(self.rotation_vars, (-1, 2, 3))
self.translations = self.translations_pl + translation_delta
# don't optimize w of quaternion to prevent numerical instability
rotation_delta = tf.pad(rotation_delta, [[0, 0], [0, 0], [1, 0]])
self.rotations = self.rotations_pl + rotation_delta
return self._input_fn(self.obj_ids_pl, self.translations,
self.rotations, None, len(trainable), False)
def get_transform_vars(self):
"""Return all variables created to perform rotation and translation."""
return [v for v in (self.rotation_vars + self.translation_vars)
if isinstance(v, tf.Variable)]
def reset_transform_vars(self, sess):
"""Reset translation and rotation to identity."""
for v in self.get_transform_vars():
sess.run(v.initializer)
| try:
self.generators[p] = self.generator_factory.scene_desc_generator(split, p)
except ValueError:
continue
out_shapes = tuple([np.array(x).shape for x in next(self.generators[p]())])
d = tf.data.Dataset.from_generator(self.generators[p], out_types,
out_shapes)
d = d.batch(batch_size if p == "train" else 1)
# d = d.prefetch(3)
self.iterator_init_ops[p] = self.iterator.make_initializer(d) | conditional_block |
input_layer.py | """
Input functions for training and inference.
Author: Philipp Jund, 2018
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from SpatialRelationCNN.model import utility
from SpatialRelationCNN.model.generator_factory import GeneratorFactory
import SpatialRelationCNN.model.augmentation as augment
import numpy as np
import tensorflow as tf
import tfquaternion as tfq
class InputLayer(object):
"""The input pipeline base class, from RelationDataset to projection."""
def __init__(self, dataset, more_augmentation=False):
"""The input pipeline, from RelationDataset to projection.
Args:
dataset: A `RelationDataset` object.
"""
self.dataset = dataset
self.generator_factory = GeneratorFactory(self.dataset,
more_augmentation)
phases = ["train", "validation", "test"]
self.generators = {n: None for n in phases}
self.iterator = None
self.iterator_init_ops = {n: None for n in phases}
self.clouds_tensor, self.cloud_slice_indices = \
self.create_cloud_constants()
self.obj_ids_pl = tf.placeholder(tf.int32, shape=(None, 2),
name="obj_ids")
self.translations_pl = tf.placeholder(tf.float32, shape=(None, 2, 3),
name="translations")
self.rotations_pl = tf.placeholder(tf.float32, shape=(None, 2, 4),
name="rotations")
self.rotations = None # stores the resulting rotations ...
self.translations = None # ... and translations when generalizing
self.translation_vars = []
self.rotation_vars = []
@utility.scope_wrapper
def | (self):
"""Create two `tf.constant`s of the obj point clouds and their ranges.
The point clouds have differing numbers of points. To efficiently
process them, all object point clouds are concatenated into one
constant. To retrieve them afterwards, we create a second constant with
shape (N+1), containing the start index for each point cloud with the
length as an additional index. With this we can use slicing, which
should be more efficient than using tf.where
"""
np_clouds = [self.dataset.clouds[n] for n in self.dataset.cloud_names]
# Create the slice indices as float32, as they'll only be used with
# tf.gather which has no GPU kernel for integers.
cloud_slice_indices = np.cumsum([0] + [len(c) for c in np_clouds],
dtype=np.float32)
tf_clouds = tf.constant(np.concatenate(np_clouds), dtype=tf.float32)
return tf_clouds, cloud_slice_indices
def switch_input(self, phase, sess):
"""Switch between test and training data."""
if phase in self.iterator_init_ops:
print("Switching input to {}.".format(phase))
sess.run(self.iterator_init_ops[phase])
else:
raise Exception("Invalid phase name, must be one of {}."
"".format(self.iterator_init_ops))
def _create_tf_datasets(self, split, batch_size):
"""Helper function that creates the train and test tf.data.Dataset."""
out_types = (tf.int32, tf.float32, tf.float32, tf.int32, tf.bool)
# out_shapes has an additional batch dim (None) and 3 or 1 scenes.
out_shapes = ((None, None, 2), (None, None, 2, 3), (None, None, 2, 4),
(None, None), (None,))
self.iterator = tf.data.Iterator.from_structure(out_types, out_shapes)
for p in ["train", "validation", "test"]:
# generator factory throws if there's no validation data
try:
self.generators[p] = self.generator_factory.scene_desc_generator(split, p)
except ValueError:
continue
out_shapes = tuple([np.array(x).shape for x in next(self.generators[p]())])
d = tf.data.Dataset.from_generator(self.generators[p], out_types,
out_shapes)
d = d.batch(batch_size if p == "train" else 1)
# d = d.prefetch(3)
self.iterator_init_ops[p] = self.iterator.make_initializer(d)
@staticmethod
def _repeat(a, repeats, batch_size, training_batch_size):
"""Repeat a[i] repeats[i] times."""
return tf.cond(tf.equal(batch_size, 1),
lambda: utility.repeat(a, repeats, num_repeats=2),
lambda: utility.repeat(a, repeats, training_batch_size))
@utility.scope_wrapper
def _input_fn(self, obj_ids, translations, rotations, train_batch_size,
num_objs, do_augmentation):
"""The input function 's part that is shared.
This function creates the scene point clouds from scene descriptions.
Returns: Two tf.Tensors, the first contains all points of the
objects in the batch with shape (N, 3) and the second contains the
corresponding segment ids, the shape is (N,).
"""
batch_size = tf.shape(obj_ids)[0]
# flatten all inputs
obj_ids = tf.reshape(obj_ids, (-1,))
translations = tf.reshape(translations, (-1, 3))
rotations = tf.reshape(rotations, (-1, 4))
clouds_num_points = (self.cloud_slice_indices[1:] -
self.cloud_slice_indices[:-1])
# vector with the number of points of each cloud
num_points = tf.gather(clouds_num_points, obj_ids)
# vector with a range where each number i is num_points[i] repeated
segment_ids = self._repeat(tf.range(tf.shape(num_points)[0]),
tf.to_int32(num_points), batch_size,
num_objs)
segment_ids = tf.to_int32(segment_ids)
# repeat translations[i] and rotations[i] num_points[i] times
translations = tf.gather(translations, segment_ids)
rotations = tf.gather(rotations, segment_ids)
rotations = tfq.Quaternion(rotations)
obj_ids = tf.gather(tf.to_float(obj_ids), segment_ids)
# indices of points consist of the start index plus range(num_points)
start = tf.gather(self.cloud_slice_indices, tf.to_int32(obj_ids))
ranges = tf.cond(tf.equal(batch_size, 1),
lambda: tf.concat([tf.range(num_points[i])
for i in range(2)], axis=0),
lambda: tf.concat([tf.range(num_points[i])
for i in range(num_objs)], axis=0))
point_ids = tf.to_int32(start + ranges)
points = tf.gather(self.clouds_tensor, point_ids)
# Rotate objects. Note that the quaternions are relative to the object
# clouds' origins, so no centering using the mean is required.
points = tfq.rotate_vector_by_quaternion(rotations, points, 2, 2)
points = tf.squeeze(points) + translations
# if we're training, randomly rotate around the z_axis
if do_augmentation:
points = augment.pointcloud(points, segment_ids, batch_size,
train_batch_size)
return points, tf.to_float(segment_ids)
def dataset_input_fn(self, train_batch_size, split):
"""The train input function using the tf.data.Dataset API.
Args:
train_batch_size: `int`, the batch size. Test batch size is always
one.
split: `int` in the interval [1, 15], the index of the split.
"""
self._create_tf_datasets(split, train_batch_size)
next_el = self.iterator.get_next()
obj_ids, translations, rotations, labels, is_augmented = next_el
points, segment_ids = self._input_fn(obj_ids, translations,
rotations, train_batch_size,
train_batch_size * 6, True)
return (points, segment_ids, labels, is_augmented)
def generalize_input_fn(self, trainable, disable_rotation=None):
"""Create the input function to use when running the generalization.
This input function creates translation and rotation variables for
each object, if trainable[i] is true or a constant if trainable[i]
is false.
Args:
trainable: A list of `bool`s with one entry for each object that
will be passed via self.obj_ids_pl.
If trainable[i] is true, the translation and rotation for
the i-th object in the batch will be trainable.
disable_rotation: A list of `bool`s with one entry for each object
that will be passed via self.obj_ids_pl. If trainable is set to
true for this object and disable_rotation is set to true, only
the translation of this object will be optimized
"""
if self.translation_vars:
raise ValueError("generalize_input_fn can only be called once per "
"input layer instance")
for i, (t, no_rot) in enumerate(zip(trainable, disable_rotation)):
tensor_t = tf.Variable if t else tf.constant
self.translation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="translation" + str(i))]
if no_rot:
tensor_t = tf.constant
self.rotation_vars += [tensor_t([(0, 0, 0)], dtype=tf.float32,
name="rotation" + str(i))]
translation_delta = tf.reshape(self.translation_vars, (-1, 2, 3))
rotation_delta = tf.reshape(self.rotation_vars, (-1, 2, 3))
self.translations = self.translations_pl + translation_delta
# don't optimize w of quaternion to prevent numerical instability
rotation_delta = tf.pad(rotation_delta, [[0, 0], [0, 0], [1, 0]])
self.rotations = self.rotations_pl + rotation_delta
return self._input_fn(self.obj_ids_pl, self.translations,
self.rotations, None, len(trainable), False)
def get_transform_vars(self):
"""Return all variables created to perform rotation and translation."""
return [v for v in (self.rotation_vars + self.translation_vars)
if isinstance(v, tf.Variable)]
def reset_transform_vars(self, sess):
"""Reset translation and rotation to identity."""
for v in self.get_transform_vars():
sess.run(v.initializer)
| create_cloud_constants | identifier_name |
world.py | #!/usr/bin/env python3
#####################################################
# #
# ______ _______..___ ___. ______ #
# / __ \ / || \/ | / __ \ #
# | | | | | (----`| \ / | | | | | #
# | | | | \ \ | |\/| | | | | | #
# | `--' | .----) | | | | | | `--' | #
# \______/ |_______/ |__| |__| \______/ #
# #
# #
#####################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import random
import math
import traceback
import logging
from copy import deepcopy
from time import perf_counter as pf
from consts import Consts
from cell import Cell
class World():
| def __init__(self, player0, player1, names = None):
# Variables and setup
self.cells_count = 0
# Init
self.new_game()
self.player0 = player0
self.player1 = player1
self.names = names
# Methods
def new_game(self):
"""Create a new game.
Args:
Returns:
"""
self.cells = [] # Array of cells
self.frame_count = 0
self.database = []
self.timer = [Consts["MAX_TIME"], Consts["MAX_TIME"]]
self.result = None
# Define the players first
self.cells.append(Cell(0, [Consts["WORLD_X"] / 4, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
self.cells.append(Cell(1, [Consts["WORLD_X"] / 4 * 3, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
# Generate a bunch of random cells
for i in range(Consts["CELLS_COUNT"]):
if i < 4:
rad = 1.5 + (random.random() * 1.5) # Small cells
elif i < 10:
rad = 10 + (random.random() * 4) # Big cells
else:
rad = 2 + (random.random() * 9) # Everything else
x = Consts["WORLD_X"] * random.random()
y = Consts["WORLD_Y"] * random.random()
cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)
safe_dist = Consts["SAFE_DIST"] + rad
while min(map(cell.distance_from, self.cells[:2])) < safe_dist:
cell.pos = [
Consts["WORLD_X"] * random.random(),
Consts["WORLD_Y"] * random.random()
]
self.cells.append(cell)
def check_point(self, flag0, flag1, cause):
"""Checkpoint to determine if the game is over.
Args:
flag0: mark the status of player0.
flag1: mark the status of player1.
cause: reason for the end of the game.
Returns:
whether it's endgame.
"""
if not flag0 and flag1:
self.game_over(0, cause, (flag0, flag1))
elif flag0 and not flag1:
self.game_over(1, cause, (flag0, flag1))
elif flag0 and flag1:
self.game_over(-1, cause, (flag0, flag1))
return bool(flag0 or flag1)
def game_over(self, winner, cause, detail = None):
"""Game over.
Args:
winner: id of the winner.
cause: reason for the end of the game.
Returns:
"""
self.result = {
"players": self.names,
"winner": winner,
"cause": cause,
"detail": detail,
"data": self.database,
"saved": False
}
print("Winner Winner Chicken Dinner!")
if winner != -1:
print("Winner: Player {}.".format(winner))
else:
print("Game ends in a draw.")
print(cause)
def eject(self, player, theta):
"""Create a new cell after the ejection process.
Args:
player: the player.
theta: angle.
Returns:
"""
if player.dead or theta == None:
return
# Reduce force in proportion to area
fx = math.sin(theta)
fy = math.cos(theta)
new_veloc_x = player.veloc[0] + Consts["DELTA_VELOC"] * fx * (1 - Consts["EJECT_MASS_RATIO"])
new_veloc_y = player.veloc[1] + Consts["DELTA_VELOC"] * fy * (1 - Consts["EJECT_MASS_RATIO"])
# Push player
player.veloc[0] -= Consts["DELTA_VELOC"] * fx * Consts["EJECT_MASS_RATIO"]
player.veloc[1] -= Consts["DELTA_VELOC"] * fy * Consts["EJECT_MASS_RATIO"]
# Shoot off the expended mass in opposite direction
newrad = player.radius * Consts["EJECT_MASS_RATIO"] ** 0.5
# Lose some mass (shall we say, Consts["EJECT_MASS_RATIO"]?)
player.radius *= (1 - Consts["EJECT_MASS_RATIO"]) ** 0.5
# Create new cell
new_pos_x = player.pos[0] + fx * (player.radius + newrad)
new_pos_y = player.pos[1] + fy * (player.radius + newrad)
new_cell = Cell(len(self.cells), [new_pos_x, new_pos_y], [new_veloc_x, new_veloc_y], newrad)
new_cell.stay_in_bounds()
new_cell.limit_speed()
self.cells.append(new_cell)
def absorb(self, collision):
"""Performing the absorption process.
Args:
collision: all the cells that collided.
Returns:
"""
# Calculate total momentum and mass
mass = sum(self.cells[ele].area() for ele in collision)
px = sum(self.cells[ele].area() * self.cells[ele].veloc[0] for ele in collision)
py = sum(self.cells[ele].area() * self.cells[ele].veloc[1] for ele in collision)
# Determine the biggest cell
collision.sort(key = lambda ele: self.cells[ele].radius)
biggest = collision.pop()
self.cells[biggest].radius = (mass / math.pi) ** 0.5
self.cells[biggest].veloc[0] = px / mass
self.cells[biggest].veloc[1] = py / mass
for ele in collision:
self.cells[ele].dead = True
def update(self, frame_delta):
"""Create new frames.
Args:
frame_delta: Time interval between two frames.
Returns:
"""
# Save
self.database.append(deepcopy(self.cells))
# New frame
self.frame_count += 1
if self.frame_count == Consts["MAX_FRAME"]: # Time's up
self.check_point(self.cells[0].radius <= self.cells[1].radius, self.cells[0].radius >= self.cells[1].radius, "MAX_FRAME")
return
for cell in self.cells:
if not cell.dead:
cell.move(frame_delta)
# Detect collisions
collisions = []
for i in range(len(self.cells)):
if self.cells[i].dead:
continue
for j in range(i + 1, len(self.cells)):
if not self.cells[j].dead and self.cells[i].collide(self.cells[j]):
if self.cells[i].collide_group == None == self.cells[j].collide_group:
self.cells[i].collide_group = self.cells[j].collide_group = len(collisions)
collisions.append([i, j])
elif self.cells[i].collide_group != None == self.cells[j].collide_group:
collisions[self.cells[i].collide_group].append(j)
self.cells[j].collide_group = self.cells[i].collide_group
elif self.cells[i].collide_group == None != self.cells[j].collide_group:
collisions[self.cells[j].collide_group].append(i)
self.cells[i].collide_group = self.cells[j].collide_group
elif self.cells[i].collide_group != self.cells[j].collide_group:
collisions[self.cells[i].collide_group] += collisions[self.cells[j].collide_group]
for ele in collisions[self.cells[j].collide_group]:
self.cells[ele].collide_group = self.cells[i].collide_group
collisions[self.cells[j].collide_group] = []
# Run absorbs
for collision in collisions:
if collision != []:
self.absorb(collision)
# If we just killed the player, Game over
if self.check_point(self.cells[0].dead, self.cells[1].dead, "PLAYER_DEAD"):
return
# Eject!
allcells = [cell for cell in self.cells if not cell.dead]
self.cells_count = len(allcells)
theta0 = theta1 = None
flag0 = flag1 = False
if self.timer[0] > 0:
try:
ti = pf()
theta0 = self.player0.strategy(deepcopy(allcells))
tf = pf()
self.timer[0] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag0 = e
if self.timer[1] > 0:
try:
ti = pf()
theta1 = self.player1.strategy(deepcopy(allcells))
tf = pf()
self.timer[1] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag1 = e
if isinstance(theta0, (int, float, type(None))):
if self.timer[0] >= 0:
self.eject(self.cells[0], theta0)
else:
flag0 = True
if isinstance(theta1, (int, float, type(None))):
if self.timer[1] >= 0:
self.eject(self.cells[1], theta1)
else:
flag1 = True
self.check_point(flag0, flag1, "RUNTIME_ERROR") | identifier_body | |
world.py | #!/usr/bin/env python3
#####################################################
# #
# ______ _______..___ ___. ______ #
# / __ \ / || \/ | / __ \ #
# | | | | | (----`| \ / | | | | | #
# | | | | \ \ | |\/| | | | | | #
# | `--' | .----) | | | | | | `--' | #
# \______/ |_______/ |__| |__| \______/ #
# #
# #
#####################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import random
import math
import traceback
import logging
from copy import deepcopy
from time import perf_counter as pf
from consts import Consts
from cell import Cell
class World():
def __init__(self, player0, player1, names = None):
# Variables and setup
self.cells_count = 0
# Init
self.new_game()
self.player0 = player0
self.player1 = player1
self.names = names
# Methods
def new_game(self):
"""Create a new game.
Args:
Returns:
"""
self.cells = [] # Array of cells
self.frame_count = 0
self.database = []
self.timer = [Consts["MAX_TIME"], Consts["MAX_TIME"]]
self.result = None
# Define the players first
self.cells.append(Cell(0, [Consts["WORLD_X"] / 4, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
self.cells.append(Cell(1, [Consts["WORLD_X"] / 4 * 3, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
# Generate a bunch of random cells
for i in range(Consts["CELLS_COUNT"]):
if i < 4:
rad = 1.5 + (random.random() * 1.5) # Small cells
elif i < 10:
rad = 10 + (random.random() * 4) # Big cells
else:
rad = 2 + (random.random() * 9) # Everything else
x = Consts["WORLD_X"] * random.random()
y = Consts["WORLD_Y"] * random.random()
cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)
safe_dist = Consts["SAFE_DIST"] + rad
while min(map(cell.distance_from, self.cells[:2])) < safe_dist:
cell.pos = [
Consts["WORLD_X"] * random.random(),
Consts["WORLD_Y"] * random.random()
]
self.cells.append(cell)
def check_point(self, flag0, flag1, cause):
"""Checkpoint to determine if the game is over.
Args:
flag0: mark the status of player0.
flag1: mark the status of player1.
cause: reason for the end of the game.
Returns:
whether it's endgame.
"""
if not flag0 and flag1:
self.game_over(0, cause, (flag0, flag1))
elif flag0 and not flag1:
self.game_over(1, cause, (flag0, flag1))
elif flag0 and flag1:
self.game_over(-1, cause, (flag0, flag1))
return bool(flag0 or flag1)
def game_over(self, winner, cause, detail = None):
"""Game over.
Args:
winner: id of the winner.
cause: reason for the end of the game.
Returns:
"""
self.result = {
"players": self.names,
"winner": winner,
"cause": cause,
"detail": detail,
"data": self.database,
"saved": False
}
print("Winner Winner Chicken Dinner!")
if winner != -1:
print("Winner: Player {}.".format(winner))
else:
|
print(cause)
def eject(self, player, theta):
"""Create a new cell after the ejection process.
Args:
player: the player.
theta: angle.
Returns:
"""
if player.dead or theta == None:
return
# Reduce force in proportion to area
fx = math.sin(theta)
fy = math.cos(theta)
new_veloc_x = player.veloc[0] + Consts["DELTA_VELOC"] * fx * (1 - Consts["EJECT_MASS_RATIO"])
new_veloc_y = player.veloc[1] + Consts["DELTA_VELOC"] * fy * (1 - Consts["EJECT_MASS_RATIO"])
# Push player
player.veloc[0] -= Consts["DELTA_VELOC"] * fx * Consts["EJECT_MASS_RATIO"]
player.veloc[1] -= Consts["DELTA_VELOC"] * fy * Consts["EJECT_MASS_RATIO"]
# Shoot off the expended mass in opposite direction
newrad = player.radius * Consts["EJECT_MASS_RATIO"] ** 0.5
# Lose some mass (shall we say, Consts["EJECT_MASS_RATIO"]?)
player.radius *= (1 - Consts["EJECT_MASS_RATIO"]) ** 0.5
# Create new cell
new_pos_x = player.pos[0] + fx * (player.radius + newrad)
new_pos_y = player.pos[1] + fy * (player.radius + newrad)
new_cell = Cell(len(self.cells), [new_pos_x, new_pos_y], [new_veloc_x, new_veloc_y], newrad)
new_cell.stay_in_bounds()
new_cell.limit_speed()
self.cells.append(new_cell)
def absorb(self, collision):
"""Performing the absorption process.
Args:
collision: all the cells that collided.
Returns:
"""
# Calculate total momentum and mass
mass = sum(self.cells[ele].area() for ele in collision)
px = sum(self.cells[ele].area() * self.cells[ele].veloc[0] for ele in collision)
py = sum(self.cells[ele].area() * self.cells[ele].veloc[1] for ele in collision)
# Determine the biggest cell
collision.sort(key = lambda ele: self.cells[ele].radius)
biggest = collision.pop()
self.cells[biggest].radius = (mass / math.pi) ** 0.5
self.cells[biggest].veloc[0] = px / mass
self.cells[biggest].veloc[1] = py / mass
for ele in collision:
self.cells[ele].dead = True
def update(self, frame_delta):
"""Create new frames.
Args:
frame_delta: Time interval between two frames.
Returns:
"""
# Save
self.database.append(deepcopy(self.cells))
# New frame
self.frame_count += 1
if self.frame_count == Consts["MAX_FRAME"]: # Time's up
self.check_point(self.cells[0].radius <= self.cells[1].radius, self.cells[0].radius >= self.cells[1].radius, "MAX_FRAME")
return
for cell in self.cells:
if not cell.dead:
cell.move(frame_delta)
# Detect collisions
collisions = []
for i in range(len(self.cells)):
if self.cells[i].dead:
continue
for j in range(i + 1, len(self.cells)):
if not self.cells[j].dead and self.cells[i].collide(self.cells[j]):
if self.cells[i].collide_group == None == self.cells[j].collide_group:
self.cells[i].collide_group = self.cells[j].collide_group = len(collisions)
collisions.append([i, j])
elif self.cells[i].collide_group != None == self.cells[j].collide_group:
collisions[self.cells[i].collide_group].append(j)
self.cells[j].collide_group = self.cells[i].collide_group
elif self.cells[i].collide_group == None != self.cells[j].collide_group:
collisions[self.cells[j].collide_group].append(i)
self.cells[i].collide_group = self.cells[j].collide_group
elif self.cells[i].collide_group != self.cells[j].collide_group:
collisions[self.cells[i].collide_group] += collisions[self.cells[j].collide_group]
for ele in collisions[self.cells[j].collide_group]:
self.cells[ele].collide_group = self.cells[i].collide_group
collisions[self.cells[j].collide_group] = []
# Run absorbs
for collision in collisions:
if collision != []:
self.absorb(collision)
# If we just killed the player, Game over
if self.check_point(self.cells[0].dead, self.cells[1].dead, "PLAYER_DEAD"):
return
# Eject!
allcells = [cell for cell in self.cells if not cell.dead]
self.cells_count = len(allcells)
theta0 = theta1 = None
flag0 = flag1 = False
if self.timer[0] > 0:
try:
ti = pf()
theta0 = self.player0.strategy(deepcopy(allcells))
tf = pf()
self.timer[0] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag0 = e
if self.timer[1] > 0:
try:
ti = pf()
theta1 = self.player1.strategy(deepcopy(allcells))
tf = pf()
self.timer[1] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag1 = e
if isinstance(theta0, (int, float, type(None))):
if self.timer[0] >= 0:
self.eject(self.cells[0], theta0)
else:
flag0 = True
if isinstance(theta1, (int, float, type(None))):
if self.timer[1] >= 0:
self.eject(self.cells[1], theta1)
else:
flag1 = True
self.check_point(flag0, flag1, "RUNTIME_ERROR")
| print("Game ends in a draw.") | conditional_block |
world.py | #!/usr/bin/env python3
#####################################################
# #
# ______ _______..___ ___. ______ #
# / __ \ / || \/ | / __ \ #
# | | | | | (----`| \ / | | | | | #
# | | | | \ \ | |\/| | | | | | #
# | `--' | .----) | | | | | | `--' | #
# \______/ |_______/ |__| |__| \______/ #
# #
# #
#####################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import random
import math
import traceback
import logging
from copy import deepcopy
from time import perf_counter as pf
from consts import Consts
from cell import Cell
class World():
def __init__(self, player0, player1, names = None):
# Variables and setup
self.cells_count = 0
# Init
self.new_game()
self.player0 = player0
self.player1 = player1
self.names = names
# Methods
def new_game(self):
"""Create a new game.
Args:
Returns:
"""
self.cells = [] # Array of cells
self.frame_count = 0
self.database = []
self.timer = [Consts["MAX_TIME"], Consts["MAX_TIME"]]
self.result = None
# Define the players first
self.cells.append(Cell(0, [Consts["WORLD_X"] / 4, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
self.cells.append(Cell(1, [Consts["WORLD_X"] / 4 * 3, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
# Generate a bunch of random cells
for i in range(Consts["CELLS_COUNT"]):
if i < 4:
rad = 1.5 + (random.random() * 1.5) # Small cells
elif i < 10:
rad = 10 + (random.random() * 4) # Big cells
else:
rad = 2 + (random.random() * 9) # Everything else
x = Consts["WORLD_X"] * random.random()
y = Consts["WORLD_Y"] * random.random()
cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)
safe_dist = Consts["SAFE_DIST"] + rad
while min(map(cell.distance_from, self.cells[:2])) < safe_dist:
cell.pos = [
Consts["WORLD_X"] * random.random(),
Consts["WORLD_Y"] * random.random()
]
self.cells.append(cell)
def check_point(self, flag0, flag1, cause):
"""Checkpoint to determine if the game is over.
Args:
flag0: mark the status of player0.
flag1: mark the status of player1.
cause: reason for the end of the game.
Returns:
whether it's endgame.
"""
if not flag0 and flag1:
self.game_over(0, cause, (flag0, flag1))
elif flag0 and not flag1:
self.game_over(1, cause, (flag0, flag1))
elif flag0 and flag1:
self.game_over(-1, cause, (flag0, flag1))
return bool(flag0 or flag1)
def | (self, winner, cause, detail = None):
"""Game over.
Args:
winner: id of the winner.
cause: reason for the end of the game.
Returns:
"""
self.result = {
"players": self.names,
"winner": winner,
"cause": cause,
"detail": detail,
"data": self.database,
"saved": False
}
print("Winner Winner Chicken Dinner!")
if winner != -1:
print("Winner: Player {}.".format(winner))
else:
print("Game ends in a draw.")
print(cause)
def eject(self, player, theta):
"""Create a new cell after the ejection process.
Args:
player: the player.
theta: angle.
Returns:
"""
if player.dead or theta == None:
return
# Reduce force in proportion to area
fx = math.sin(theta)
fy = math.cos(theta)
new_veloc_x = player.veloc[0] + Consts["DELTA_VELOC"] * fx * (1 - Consts["EJECT_MASS_RATIO"])
new_veloc_y = player.veloc[1] + Consts["DELTA_VELOC"] * fy * (1 - Consts["EJECT_MASS_RATIO"])
# Push player
player.veloc[0] -= Consts["DELTA_VELOC"] * fx * Consts["EJECT_MASS_RATIO"]
player.veloc[1] -= Consts["DELTA_VELOC"] * fy * Consts["EJECT_MASS_RATIO"]
# Shoot off the expended mass in opposite direction
newrad = player.radius * Consts["EJECT_MASS_RATIO"] ** 0.5
# Lose some mass (shall we say, Consts["EJECT_MASS_RATIO"]?)
player.radius *= (1 - Consts["EJECT_MASS_RATIO"]) ** 0.5
# Create new cell
new_pos_x = player.pos[0] + fx * (player.radius + newrad)
new_pos_y = player.pos[1] + fy * (player.radius + newrad)
new_cell = Cell(len(self.cells), [new_pos_x, new_pos_y], [new_veloc_x, new_veloc_y], newrad)
new_cell.stay_in_bounds()
new_cell.limit_speed()
self.cells.append(new_cell)
def absorb(self, collision):
"""Performing the absorption process.
Args:
collision: all the cells that collided.
Returns:
"""
# Calculate total momentum and mass
mass = sum(self.cells[ele].area() for ele in collision)
px = sum(self.cells[ele].area() * self.cells[ele].veloc[0] for ele in collision)
py = sum(self.cells[ele].area() * self.cells[ele].veloc[1] for ele in collision)
# Determine the biggest cell
collision.sort(key = lambda ele: self.cells[ele].radius)
biggest = collision.pop()
self.cells[biggest].radius = (mass / math.pi) ** 0.5
self.cells[biggest].veloc[0] = px / mass
self.cells[biggest].veloc[1] = py / mass
for ele in collision:
self.cells[ele].dead = True
def update(self, frame_delta):
"""Create new frames.
Args:
frame_delta: Time interval between two frames.
Returns:
"""
# Save
self.database.append(deepcopy(self.cells))
# New frame
self.frame_count += 1
if self.frame_count == Consts["MAX_FRAME"]: # Time's up
self.check_point(self.cells[0].radius <= self.cells[1].radius, self.cells[0].radius >= self.cells[1].radius, "MAX_FRAME")
return
for cell in self.cells:
if not cell.dead:
cell.move(frame_delta)
# Detect collisions
collisions = []
for i in range(len(self.cells)):
if self.cells[i].dead:
continue
for j in range(i + 1, len(self.cells)):
if not self.cells[j].dead and self.cells[i].collide(self.cells[j]):
if self.cells[i].collide_group == None == self.cells[j].collide_group:
self.cells[i].collide_group = self.cells[j].collide_group = len(collisions)
collisions.append([i, j])
elif self.cells[i].collide_group != None == self.cells[j].collide_group:
collisions[self.cells[i].collide_group].append(j)
self.cells[j].collide_group = self.cells[i].collide_group
elif self.cells[i].collide_group == None != self.cells[j].collide_group:
collisions[self.cells[j].collide_group].append(i)
self.cells[i].collide_group = self.cells[j].collide_group
elif self.cells[i].collide_group != self.cells[j].collide_group:
collisions[self.cells[i].collide_group] += collisions[self.cells[j].collide_group]
for ele in collisions[self.cells[j].collide_group]:
self.cells[ele].collide_group = self.cells[i].collide_group
collisions[self.cells[j].collide_group] = []
# Run absorbs
for collision in collisions:
if collision != []:
self.absorb(collision)
# If we just killed the player, Game over
if self.check_point(self.cells[0].dead, self.cells[1].dead, "PLAYER_DEAD"):
return
# Eject!
allcells = [cell for cell in self.cells if not cell.dead]
self.cells_count = len(allcells)
theta0 = theta1 = None
flag0 = flag1 = False
if self.timer[0] > 0:
try:
ti = pf()
theta0 = self.player0.strategy(deepcopy(allcells))
tf = pf()
self.timer[0] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag0 = e
if self.timer[1] > 0:
try:
ti = pf()
theta1 = self.player1.strategy(deepcopy(allcells))
tf = pf()
self.timer[1] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag1 = e
if isinstance(theta0, (int, float, type(None))):
if self.timer[0] >= 0:
self.eject(self.cells[0], theta0)
else:
flag0 = True
if isinstance(theta1, (int, float, type(None))):
if self.timer[1] >= 0:
self.eject(self.cells[1], theta1)
else:
flag1 = True
self.check_point(flag0, flag1, "RUNTIME_ERROR")
| game_over | identifier_name |
world.py | #!/usr/bin/env python3
#####################################################
# #
# ______ _______..___ ___. ______ #
# / __ \ / || \/ | / __ \ #
# | | | | | (----`| \ / | | | | | #
# | | | | \ \ | |\/| | | | | | #
# | `--' | .----) | | | | | | `--' | #
# \______/ |_______/ |__| |__| \______/ #
# #
# #
#####################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import random
import math
import traceback
import logging
from copy import deepcopy
from time import perf_counter as pf
from consts import Consts
from cell import Cell
class World():
def __init__(self, player0, player1, names = None):
# Variables and setup
self.cells_count = 0
# Init
self.new_game()
self.player0 = player0
self.player1 = player1
self.names = names
# Methods
def new_game(self):
"""Create a new game.
Args:
Returns:
"""
self.cells = [] # Array of cells
self.frame_count = 0
self.database = []
self.timer = [Consts["MAX_TIME"], Consts["MAX_TIME"]]
self.result = None
# Define the players first
self.cells.append(Cell(0, [Consts["WORLD_X"] / 4, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
self.cells.append(Cell(1, [Consts["WORLD_X"] / 4 * 3, Consts["WORLD_Y"] / 2], [0, 0], Consts["DEFAULT_RADIUS"]))
# Generate a bunch of random cells
for i in range(Consts["CELLS_COUNT"]):
if i < 4:
rad = 1.5 + (random.random() * 1.5) # Small cells
elif i < 10:
rad = 10 + (random.random() * 4) # Big cells
else:
rad = 2 + (random.random() * 9) # Everything else
x = Consts["WORLD_X"] * random.random()
y = Consts["WORLD_Y"] * random.random()
cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)
safe_dist = Consts["SAFE_DIST"] + rad
while min(map(cell.distance_from, self.cells[:2])) < safe_dist:
cell.pos = [
Consts["WORLD_X"] * random.random(),
Consts["WORLD_Y"] * random.random()
]
self.cells.append(cell)
def check_point(self, flag0, flag1, cause):
"""Checkpoint to determine if the game is over.
Args:
flag0: mark the status of player0.
flag1: mark the status of player1.
cause: reason for the end of the game.
Returns:
whether it's endgame.
"""
if not flag0 and flag1:
self.game_over(0, cause, (flag0, flag1))
elif flag0 and not flag1:
self.game_over(1, cause, (flag0, flag1))
elif flag0 and flag1:
self.game_over(-1, cause, (flag0, flag1))
return bool(flag0 or flag1)
def game_over(self, winner, cause, detail = None):
"""Game over.
Args:
winner: id of the winner.
cause: reason for the end of the game.
Returns:
"""
self.result = {
"players": self.names,
"winner": winner,
"cause": cause,
"detail": detail,
"data": self.database,
"saved": False
}
print("Winner Winner Chicken Dinner!")
if winner != -1:
print("Winner: Player {}.".format(winner))
else:
print("Game ends in a draw.")
print(cause)
def eject(self, player, theta):
"""Create a new cell after the ejection process.
Args:
player: the player.
theta: angle.
Returns:
"""
if player.dead or theta == None:
return
# Reduce force in proportion to area
fx = math.sin(theta)
fy = math.cos(theta)
new_veloc_x = player.veloc[0] + Consts["DELTA_VELOC"] * fx * (1 - Consts["EJECT_MASS_RATIO"])
new_veloc_y = player.veloc[1] + Consts["DELTA_VELOC"] * fy * (1 - Consts["EJECT_MASS_RATIO"])
# Push player
player.veloc[0] -= Consts["DELTA_VELOC"] * fx * Consts["EJECT_MASS_RATIO"]
player.veloc[1] -= Consts["DELTA_VELOC"] * fy * Consts["EJECT_MASS_RATIO"]
# Shoot off the expended mass in opposite direction
newrad = player.radius * Consts["EJECT_MASS_RATIO"] ** 0.5
# Lose some mass (shall we say, Consts["EJECT_MASS_RATIO"]?)
player.radius *= (1 - Consts["EJECT_MASS_RATIO"]) ** 0.5
# Create new cell
new_pos_x = player.pos[0] + fx * (player.radius + newrad)
new_pos_y = player.pos[1] + fy * (player.radius + newrad)
new_cell = Cell(len(self.cells), [new_pos_x, new_pos_y], [new_veloc_x, new_veloc_y], newrad)
new_cell.stay_in_bounds()
new_cell.limit_speed()
self.cells.append(new_cell)
def absorb(self, collision):
"""Performing the absorption process.
Args:
collision: all the cells that collided.
Returns:
"""
# Calculate total momentum and mass
mass = sum(self.cells[ele].area() for ele in collision)
px = sum(self.cells[ele].area() * self.cells[ele].veloc[0] for ele in collision)
py = sum(self.cells[ele].area() * self.cells[ele].veloc[1] for ele in collision)
# Determine the biggest cell
collision.sort(key = lambda ele: self.cells[ele].radius)
biggest = collision.pop()
self.cells[biggest].radius = (mass / math.pi) ** 0.5
self.cells[biggest].veloc[0] = px / mass
self.cells[biggest].veloc[1] = py / mass
for ele in collision:
self.cells[ele].dead = True
def update(self, frame_delta):
"""Create new frames.
Args:
frame_delta: Time interval between two frames.
Returns:
"""
# Save
self.database.append(deepcopy(self.cells))
# New frame
self.frame_count += 1
if self.frame_count == Consts["MAX_FRAME"]: # Time's up
self.check_point(self.cells[0].radius <= self.cells[1].radius, self.cells[0].radius >= self.cells[1].radius, "MAX_FRAME")
return
for cell in self.cells:
if not cell.dead:
cell.move(frame_delta)
# Detect collisions
collisions = []
for i in range(len(self.cells)):
if self.cells[i].dead:
continue
for j in range(i + 1, len(self.cells)):
if not self.cells[j].dead and self.cells[i].collide(self.cells[j]):
if self.cells[i].collide_group == None == self.cells[j].collide_group:
self.cells[i].collide_group = self.cells[j].collide_group = len(collisions)
collisions.append([i, j])
elif self.cells[i].collide_group != None == self.cells[j].collide_group:
collisions[self.cells[i].collide_group].append(j)
self.cells[j].collide_group = self.cells[i].collide_group
elif self.cells[i].collide_group == None != self.cells[j].collide_group:
collisions[self.cells[j].collide_group].append(i)
self.cells[i].collide_group = self.cells[j].collide_group
elif self.cells[i].collide_group != self.cells[j].collide_group:
collisions[self.cells[i].collide_group] += collisions[self.cells[j].collide_group]
for ele in collisions[self.cells[j].collide_group]:
self.cells[ele].collide_group = self.cells[i].collide_group
collisions[self.cells[j].collide_group] = []
# Run absorbs
for collision in collisions:
if collision != []:
self.absorb(collision)
# If we just killed the player, Game over
if self.check_point(self.cells[0].dead, self.cells[1].dead, "PLAYER_DEAD"):
return
# Eject!
allcells = [cell for cell in self.cells if not cell.dead]
self.cells_count = len(allcells)
theta0 = theta1 = None
flag0 = flag1 = False
if self.timer[0] > 0:
try:
ti = pf()
theta0 = self.player0.strategy(deepcopy(allcells))
tf = pf()
self.timer[0] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc())
flag0 = e
if self.timer[1] > 0:
try:
ti = pf()
theta1 = self.player1.strategy(deepcopy(allcells)) | flag1 = e
if isinstance(theta0, (int, float, type(None))):
if self.timer[0] >= 0:
self.eject(self.cells[0], theta0)
else:
flag0 = True
if isinstance(theta1, (int, float, type(None))):
if self.timer[1] >= 0:
self.eject(self.cells[1], theta1)
else:
flag1 = True
self.check_point(flag0, flag1, "RUNTIME_ERROR") | tf = pf()
self.timer[1] -= tf - ti
except Exception as e:
logging.error(traceback.format_exc()) | random_line_split |
stress.go | package main
import (
"bytes"
"distributed-system/util"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"runtime"
"sort"
"strconv"
"time"
)
//----------------------------------
// Stress Settings
//----------------------------------
const (
MAX_COCURRENCY = 1024 * 1024
TIMEOUT_SEC = 10
)
//----------------------------------
// Stress Abstracts
//----------------------------------
type Worker struct {
r *Reporter
ctx struct {
addrs []string
}
}
type Context struct {
c *http.Client
w *Worker
user User
orderId string
cartId string
}
type TimeInterval struct {
start int64
end int64
interval int64
}
type Reporter struct {
payMade chan bool
payIntervals chan TimeInterval
requestSent chan bool
userCurr chan User
numOrders int
cocurrency int
nOrderOk int
nOrderErr int
nOrderTotal int
nOrderPerSec []int
payCosts []time.Duration
nRequestOk int
nRequestErr int
nRequestTotal int
nRequestPerSec []int
timeStampPerSec []int
startAt time.Time
elapsed time.Duration
}
//----------------------------------
// Entity Abstracts
//----------------------------------
type User struct {
Id int
Username string
Password string
AccessToken string
}
type Item struct {
Id int `json:"id"`
Price int `json:"price"`
Stock int `json:"stock"`
}
//----------------------------------
// Request JSON Bindings
//----------------------------------
type RequestLogin struct {
Username string `json:"username"`
Password string `json:"password"`
}
type RequestCartAddItem struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
}
type RequestMakeOrder struct {
CartId string `json:"cart_id"`
}
type RequestPayOrder struct {
OrderId string `json:"order_id"`
}
//----------------------------------
// Response JSON Bindings
//----------------------------------
type ResponseLogin struct {
UserId int `json:"user_id"`
Username string `json:"username"`
AccessToken string `json:"access_token"`
}
type ResponseGetItems []Item
type ResponseCreateCart struct {
CartId string `json:"cart_id"`
}
type ResponseMakeOrder struct {
OrderId string `json:"order_id"`
}
type ResponsePayOrder struct {
OrderId string `json:"order_id"`
}
type ResponseQueryOrder struct {
Id string `json:"id"`
Item []struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
} `json:"items"`
Total int `json:"total"`
}
//----------------------------------
// Global Variables
//----------------------------------
var (
users = make([]User, 0) // users
items = make(map[int]Item) // map[item.Id]item
isDebugMode = false
isReportToRedis = false
)
//----------------------------------
// Data Initialization
//----------------------------------
// Load all data.
func LoadData(userCsv, itemCsv string) {
fmt.Printf("Load users from user file..")
LoadUsers(userCsv)
fmt.Printf("OK\n")
fmt.Printf("Load items from item file..")
LoadItems(itemCsv)
fmt.Printf("OK\n")
}
// Load users from user csv file
func LoadUsers(userCsv string) {
// read users
if file, err := os.Open(userCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
userId, _ := strconv.Atoi(strs[0])
if userId != 0 {
userName := strs[1]
password := strs[2]
users = append(users, User{Id: userId, Username: userName, Password: password})
}
}
} else {
panic(err.Error())
}
// root user
// ss.rootToken = userId2Token(ss.UserMap["root"].Id)
}
// Load items from item csv file
func LoadItems(itemCsv string) {
// read items
if file, err := os.Open(itemCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
itemId, _ := strconv.Atoi(strs[0])
price, _ := strconv.Atoi(strs[1])
stock, _ := strconv.Atoi(strs[2])
items[itemId] = Item{Id: itemId, Price: price, Stock: stock}
}
} else {
panic(err.Error())
}
}
//----------------------------------
// Request Utils
//----------------------------------
// Build url with path and parameters.
func (w *Worker) Url(path string, params url.Values) string {
// random choice one host for load balance
i := rand.Intn(len(w.ctx.addrs))
addr := w.ctx.addrs[i]
s := fmt.Sprintf("http://%s%s", addr, path)
if params == nil {
return s
}
p := params.Encode()
return fmt.Sprintf("%s?%s", s, p)
}
// Get json from uri.
func (w *Worker) Get(c *http.Client, url string, bind interface{}) (int, error) {
r, err := c.Get(url)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Post json to uri and get json response.
func (w *Worker) Post(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
var body io.Reader
if data != nil {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
body = bytes.NewReader(bs)
}
r, err := c.Post(url, "application/json", body)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Patch url with json.
func (w *Worker) Patch(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
req, err := http.NewRequest("PATCH", url, bytes.NewReader(bs))
if err != nil {
return 0, err
}
req.Header.Set("Content-Type", "application/json")
res, err := c.Do(req)
if err != nil {
if res != nil {
ioutil.ReadAll(res.Body)
res.Body.Close()
}
return 0, err
}
defer res.Body.Close()
err = json.NewDecoder(res.Body).Decode(bind)
if res.StatusCode == http.StatusNoContent || bind == nil {
return res.StatusCode, nil
}
return res.StatusCode, err
}
//----------------------------------
// Order Handle Utils
//----------------------------------
// Random choice a item. Dont TUCAO this function,
// it works and best O(1).
func GetRandItem() Item {
for {
idx := rand.Intn(len(items))
item, ok := items[idx+1]
if ok {
return item
}
}
}
//----------------------------------
// Work Job Context
//----------------------------------
func (ctx *Context) UrlWithToken(path string) string {
user := ctx.user
params := url.Values{}
params.Add("access_token", user.AccessToken)
return ctx.w.Url(path, params)
}
func (ctx *Context) Login() bool {
user := ctx.user
data := &RequestLogin{user.Username, user.Password}
body := &ResponseLogin{}
url := ctx.w.Url("/login", nil)
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request login error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.user.AccessToken = body.AccessToken
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) GetItems() bool {
// body := &ResponseGetItems{}
url := ctx.UrlWithToken("/items")
statusCode, err := ctx.w.Get(ctx.c, url, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request get items error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CreateCart() bool {
body := &ResponseCreateCart{}
url := ctx.UrlWithToken("/carts")
statusCode, err := ctx.w.Post(ctx.c, url, nil, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request create carts error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.cartId = body.CartId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CartAddItem() bool {
path := fmt.Sprintf("/carts/%s", ctx.cartId)
url := ctx.UrlWithToken(path)
item := GetRandItem()
data := &RequestCartAddItem{item.Id, 1}
statusCode, err := ctx.w.Patch(ctx.c, url, data, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request error cart add item error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusNoContent {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) MakeOrder() bool {
if !ctx.Login() || !ctx.GetItems() || !ctx.CreateCart() {
return false
}
// count := rand.Intn(3) + 1
count := 2
for i := 0; i < count; i++ {
if !ctx.CartAddItem() {
return false
}
}
data := &RequestMakeOrder{ctx.cartId}
body := &ResponseMakeOrder{}
url := ctx.UrlWithToken("/orders")
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request make order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.orderId = body.OrderId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) PayOrder() bool {
if !ctx.MakeOrder() {
return false
}
url := ctx.UrlWithToken("/pay")
data := &RequestPayOrder{ctx.orderId}
body := &ResponsePayOrder{}
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request pay order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
//----------------------------------
// Worker
//----------------------------------
func NewWorker(addrs []string, r *Reporter) *Worker {
w := &Worker{}
w.r = r
w.ctx.addrs = addrs
return w
}
func (w *Worker) Work() {
ctx := &Context{}
ctx.w = w
t := &http.Transport{}
ctx.c = &http.Client{
Timeout: TIMEOUT_SEC * time.Second,
Transport: t,
}
for {
// t.CloseIdleConnections()
startAt := time.Now()
ctx.user = <-w.r.userCurr
w.r.payMade <- ctx.PayOrder()
endAt := time.Now()
w.r.payIntervals <- TimeInterval{start: startAt.UnixNano(), end: endAt.UnixNano(), interval: endAt.Sub(startAt).Nanoseconds()}
}
}
//----------------------------------
// Statstics Reporter
//----------------------------------
// Create reporter
func NewReporter(numOrders int, cocurrency int) *Reporter {
return &Reporter{
make(chan bool, cocurrency),
make(chan TimeInterval, cocurrency),
make(chan bool, cocurrency),
make(chan User, cocurrency),
numOrders,
cocurrency,
0,
0,
0,
make([]int, 0),
make([]time.Duration, 0),
0,
0,
0,
make([]int, 0),
make([]int, 0),
time.Now(),
0,
}
}
// Start reporter
func (r *Reporter) Start() {
r.startAt = time.Now()
go func() {
t := time.NewTicker(1 * time.Second)
for {
nOrderOk := r.nOrderOk
nRequestOk := r.nRequestOk
<-t.C
nOrderPerSec := r.nOrderOk - nOrderOk
r.nOrderPerSec = append(r.nOrderPerSec, nOrderPerSec)
nRequestPerSec := r.nRequestOk - nRequestOk
r.nRequestPerSec = append(r.nRequestPerSec, nRequestPerSec)
r.timeStampPerSec = append(r.timeStampPerSec, time.Now().Second())
fmt.Printf("Finished orders: %d\n", nOrderPerSec)
}
}()
go func() {
for {
payMade := <-r.payMade
payInterval := <-r.payIntervals
if payMade {
r.nOrderOk = r.nOrderOk + 1
r.payCosts = append(r.payCosts, time.Duration(payInterval.interval))
} else {
r.nOrderErr = r.nOrderErr + 1
}
r.nOrderTotal = r.nOrderTotal + 1
if r.nOrderTotal >= r.numOrders {
r.Stop()
}
}
}()
go func() {
for {
requestSent := <-r.requestSent
if requestSent {
r.nRequestOk = r.nRequestOk + 1
} else {
r.nRequestErr = r.nRequestErr + 1
}
r.nRequestTotal = r.nRequestTotal + 1
}
}()
for i := 0; i < len(users); i++ {
r.userCurr <- users[i]
}
timeout := time.After(TIMEOUT_SEC * time.Second)
for r.nOrderTotal < r.numOrders {
select {
case <-timeout:
r.Stop()
}
}
r.Stop()
}
// Stop the reporter and exit full process.
func (r *Reporter) Stop() {
r.elapsed = time.Since(r.startAt)
r.Report()
os.Exit(0)
}
// Report stats to console and redis.
func (r *Reporter) Report() {
//---------------------------------------------------
// Report to console
//---------------------------------------------------
sort.Ints(r.nOrderPerSec)
sort.Ints(r.nRequestPerSec)
nOrderPerSecMax := MeanOfMaxFive(r.nOrderPerSec)
nOrderPerSecMin := MeanOfMinFive(r.nOrderPerSec)
nOrderPerSecMean := Mean(r.nOrderPerSec)
nRequestPerSecMax := MeanOfMaxFive(r.nRequestPerSec)
nRequestPerSecMin := MeanOfMinFive(r.nRequestPerSec)
nRequestPerSecMean := Mean(r.nRequestPerSec)
sort.Ints(r.nRequestPerSec)
payCostNanoseconds := []float64{}
for i := 0; i < len(r.payCosts); i++ {
payCostNanoseconds = append(payCostNanoseconds, float64(r.payCosts[i].Nanoseconds()))
}
sort.Float64s(payCostNanoseconds)
msTakenTotal := int(r.elapsed.Nanoseconds() / 1000000.0)
msPerOrder := MeanFloat64(payCostNanoseconds) / 1000000.0
msPerRequest := SumFloat64(payCostNanoseconds) / 1000000.0 / float64(r.nRequestOk)
//---------------------------------------------------
// Report to console
//---------------------------------------------------
fmt.Print("\nStats\n")
fmt.Printf("Concurrency level: %d\n", r.cocurrency) | fmt.Printf("Failed requests: %d\n", r.nRequestErr)
fmt.Printf("Complete orders: %d\n", r.nOrderOk)
fmt.Printf("Failed orders: %d\n", r.nOrderErr)
fmt.Printf("Time per request: %.2fms\n", msPerRequest)
fmt.Printf("Time per order: %.2fms\n", msPerOrder)
fmt.Printf("Request per second: %d (max) %d (min) %d(mean)\n", nRequestPerSecMax, nRequestPerSecMin, nRequestPerSecMean)
fmt.Printf("Order per second: %d (max) %d (min) %d (mean)\n\n", nOrderPerSecMax, nOrderPerSecMin, nOrderPerSecMean)
fmt.Printf("Percentage of orders made within a certain time (ms)\n")
if len(payCostNanoseconds) == 0 {
return
}
percentages := []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 95.5, 96, 96.5, 97, 97.5, 98, 98.5, 99, 99.9, 99.99, 100}
for _, percentage := range percentages {
idx := int(percentage * float64(len(payCostNanoseconds)) / float64(100.0))
if idx > 0 {
idx = idx - 1
} else {
idx = 0
}
payCostNanosecond := payCostNanoseconds[idx]
fmt.Printf("%.2f%%\t%d ms\n", percentage, int(payCostNanosecond/1000000.0))
}
}
//----------------------------------
// Math util functions
//----------------------------------
func MeanOfMaxFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[1]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[len(sortedArr)-5:])
}
return sortedArr[len(sortedArr)-1]
}
func MeanOfMinFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[0]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[0:5])
}
return sortedArr[0]
}
func Mean(arr []int) int {
if len(arr) == 0 {
return 0
}
sum := 0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return int(float64(sum) / float64(len(arr)))
}
func MeanFloat64(arr []float64) float64 {
return SumFloat64(arr) / float64(len(arr))
}
func SumFloat64(arr []float64) float64 {
if len(arr) == 0 {
return 0
}
sum := 0.0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return sum
}
//----------------------------------
// Main
//----------------------------------
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
//----------------------------------
// Arguments parsing and validation
//----------------------------------
config := flag.String("f", "cfg.json", "config file")
cocurrency := flag.Int("c", 1000, "request cocurrency")
numOrders := flag.Int("n", 1000, "number of orders to perform")
debug := flag.Bool("d", false, "debug mode")
// reportRedis := flag.Bool("r", true, "report to local redis")
flag.Parse()
cfg := util.ParseCfg(*config)
// if flag.NFlag() == 0 {
// flag.PrintDefaults()
// os.Exit(1)
// }
if *debug {
isDebugMode = true
}
// if *reportRedis {
// isReportToRedis = true
// }
//----------------------------------
// Validate cocurrency
//----------------------------------
if *cocurrency > MAX_COCURRENCY {
fmt.Printf("Exceed max cocurrency (is %d)", MAX_COCURRENCY)
os.Exit(1)
}
//----------------------------------
// Load users/items and work
//----------------------------------
LoadData(cfg.UserCSV, cfg.ItemCSV)
reporter := NewReporter(*numOrders, *cocurrency)
for i := 0; i < *cocurrency; i++ {
go func() {
w := NewWorker(cfg.APPAddrs, reporter)
w.Work()
}()
}
// start reporter
reporter.Start()
} | fmt.Printf("Time taken for tests: %dms\n", msTakenTotal)
fmt.Printf("Complete requests: %d\n", r.nRequestOk) | random_line_split |
stress.go | package main
import (
"bytes"
"distributed-system/util"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"runtime"
"sort"
"strconv"
"time"
)
//----------------------------------
// Stress Settings
//----------------------------------
const (
MAX_COCURRENCY = 1024 * 1024
TIMEOUT_SEC = 10
)
//----------------------------------
// Stress Abstracts
//----------------------------------
type Worker struct {
r *Reporter
ctx struct {
addrs []string
}
}
type Context struct {
c *http.Client
w *Worker
user User
orderId string
cartId string
}
type TimeInterval struct {
start int64
end int64
interval int64
}
type Reporter struct {
payMade chan bool
payIntervals chan TimeInterval
requestSent chan bool
userCurr chan User
numOrders int
cocurrency int
nOrderOk int
nOrderErr int
nOrderTotal int
nOrderPerSec []int
payCosts []time.Duration
nRequestOk int
nRequestErr int
nRequestTotal int
nRequestPerSec []int
timeStampPerSec []int
startAt time.Time
elapsed time.Duration
}
//----------------------------------
// Entity Abstracts
//----------------------------------
type User struct {
Id int
Username string
Password string
AccessToken string
}
type Item struct {
Id int `json:"id"`
Price int `json:"price"`
Stock int `json:"stock"`
}
//----------------------------------
// Request JSON Bindings
//----------------------------------
type RequestLogin struct {
Username string `json:"username"`
Password string `json:"password"`
}
type RequestCartAddItem struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
}
type RequestMakeOrder struct {
CartId string `json:"cart_id"`
}
type RequestPayOrder struct {
OrderId string `json:"order_id"`
}
//----------------------------------
// Response JSON Bindings
//----------------------------------
type ResponseLogin struct {
UserId int `json:"user_id"`
Username string `json:"username"`
AccessToken string `json:"access_token"`
}
type ResponseGetItems []Item
type ResponseCreateCart struct {
CartId string `json:"cart_id"`
}
type ResponseMakeOrder struct {
OrderId string `json:"order_id"`
}
type ResponsePayOrder struct {
OrderId string `json:"order_id"`
}
type ResponseQueryOrder struct {
Id string `json:"id"`
Item []struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
} `json:"items"`
Total int `json:"total"`
}
//----------------------------------
// Global Variables
//----------------------------------
var (
users = make([]User, 0) // users
items = make(map[int]Item) // map[item.Id]item
isDebugMode = false
isReportToRedis = false
)
//----------------------------------
// Data Initialization
//----------------------------------
// Load all data.
func LoadData(userCsv, itemCsv string) {
fmt.Printf("Load users from user file..")
LoadUsers(userCsv)
fmt.Printf("OK\n")
fmt.Printf("Load items from item file..")
LoadItems(itemCsv)
fmt.Printf("OK\n")
}
// Load users from user csv file
func LoadUsers(userCsv string) {
// read users
if file, err := os.Open(userCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
userId, _ := strconv.Atoi(strs[0])
if userId != 0 {
userName := strs[1]
password := strs[2]
users = append(users, User{Id: userId, Username: userName, Password: password})
}
}
} else {
panic(err.Error())
}
// root user
// ss.rootToken = userId2Token(ss.UserMap["root"].Id)
}
// Load items from item csv file
func LoadItems(itemCsv string) {
// read items
if file, err := os.Open(itemCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
itemId, _ := strconv.Atoi(strs[0])
price, _ := strconv.Atoi(strs[1])
stock, _ := strconv.Atoi(strs[2])
items[itemId] = Item{Id: itemId, Price: price, Stock: stock}
}
} else {
panic(err.Error())
}
}
//----------------------------------
// Request Utils
//----------------------------------
// Build url with path and parameters.
func (w *Worker) Url(path string, params url.Values) string {
// random choice one host for load balance
i := rand.Intn(len(w.ctx.addrs))
addr := w.ctx.addrs[i]
s := fmt.Sprintf("http://%s%s", addr, path)
if params == nil {
return s
}
p := params.Encode()
return fmt.Sprintf("%s?%s", s, p)
}
// Get json from uri.
func (w *Worker) Get(c *http.Client, url string, bind interface{}) (int, error) {
r, err := c.Get(url)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Post json to uri and get json response.
func (w *Worker) Post(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
var body io.Reader
if data != nil {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
body = bytes.NewReader(bs)
}
r, err := c.Post(url, "application/json", body)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Patch url with json.
func (w *Worker) Patch(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
req, err := http.NewRequest("PATCH", url, bytes.NewReader(bs))
if err != nil {
return 0, err
}
req.Header.Set("Content-Type", "application/json")
res, err := c.Do(req)
if err != nil {
if res != nil {
ioutil.ReadAll(res.Body)
res.Body.Close()
}
return 0, err
}
defer res.Body.Close()
err = json.NewDecoder(res.Body).Decode(bind)
if res.StatusCode == http.StatusNoContent || bind == nil {
return res.StatusCode, nil
}
return res.StatusCode, err
}
//----------------------------------
// Order Handle Utils
//----------------------------------
// Random choice a item. Dont TUCAO this function,
// it works and best O(1).
func GetRandItem() Item {
for {
idx := rand.Intn(len(items))
item, ok := items[idx+1]
if ok {
return item
}
}
}
//----------------------------------
// Work Job Context
//----------------------------------
func (ctx *Context) UrlWithToken(path string) string {
user := ctx.user
params := url.Values{}
params.Add("access_token", user.AccessToken)
return ctx.w.Url(path, params)
}
func (ctx *Context) Login() bool {
user := ctx.user
data := &RequestLogin{user.Username, user.Password}
body := &ResponseLogin{}
url := ctx.w.Url("/login", nil)
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request login error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.user.AccessToken = body.AccessToken
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) GetItems() bool {
// body := &ResponseGetItems{}
url := ctx.UrlWithToken("/items")
statusCode, err := ctx.w.Get(ctx.c, url, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request get items error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CreateCart() bool {
body := &ResponseCreateCart{}
url := ctx.UrlWithToken("/carts")
statusCode, err := ctx.w.Post(ctx.c, url, nil, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request create carts error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.cartId = body.CartId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CartAddItem() bool {
path := fmt.Sprintf("/carts/%s", ctx.cartId)
url := ctx.UrlWithToken(path)
item := GetRandItem()
data := &RequestCartAddItem{item.Id, 1}
statusCode, err := ctx.w.Patch(ctx.c, url, data, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request error cart add item error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusNoContent {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) | () bool {
if !ctx.Login() || !ctx.GetItems() || !ctx.CreateCart() {
return false
}
// count := rand.Intn(3) + 1
count := 2
for i := 0; i < count; i++ {
if !ctx.CartAddItem() {
return false
}
}
data := &RequestMakeOrder{ctx.cartId}
body := &ResponseMakeOrder{}
url := ctx.UrlWithToken("/orders")
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request make order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.orderId = body.OrderId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) PayOrder() bool {
if !ctx.MakeOrder() {
return false
}
url := ctx.UrlWithToken("/pay")
data := &RequestPayOrder{ctx.orderId}
body := &ResponsePayOrder{}
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request pay order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
//----------------------------------
// Worker
//----------------------------------
func NewWorker(addrs []string, r *Reporter) *Worker {
w := &Worker{}
w.r = r
w.ctx.addrs = addrs
return w
}
func (w *Worker) Work() {
ctx := &Context{}
ctx.w = w
t := &http.Transport{}
ctx.c = &http.Client{
Timeout: TIMEOUT_SEC * time.Second,
Transport: t,
}
for {
// t.CloseIdleConnections()
startAt := time.Now()
ctx.user = <-w.r.userCurr
w.r.payMade <- ctx.PayOrder()
endAt := time.Now()
w.r.payIntervals <- TimeInterval{start: startAt.UnixNano(), end: endAt.UnixNano(), interval: endAt.Sub(startAt).Nanoseconds()}
}
}
//----------------------------------
// Statstics Reporter
//----------------------------------
// Create reporter
func NewReporter(numOrders int, cocurrency int) *Reporter {
return &Reporter{
make(chan bool, cocurrency),
make(chan TimeInterval, cocurrency),
make(chan bool, cocurrency),
make(chan User, cocurrency),
numOrders,
cocurrency,
0,
0,
0,
make([]int, 0),
make([]time.Duration, 0),
0,
0,
0,
make([]int, 0),
make([]int, 0),
time.Now(),
0,
}
}
// Start reporter
func (r *Reporter) Start() {
r.startAt = time.Now()
go func() {
t := time.NewTicker(1 * time.Second)
for {
nOrderOk := r.nOrderOk
nRequestOk := r.nRequestOk
<-t.C
nOrderPerSec := r.nOrderOk - nOrderOk
r.nOrderPerSec = append(r.nOrderPerSec, nOrderPerSec)
nRequestPerSec := r.nRequestOk - nRequestOk
r.nRequestPerSec = append(r.nRequestPerSec, nRequestPerSec)
r.timeStampPerSec = append(r.timeStampPerSec, time.Now().Second())
fmt.Printf("Finished orders: %d\n", nOrderPerSec)
}
}()
go func() {
for {
payMade := <-r.payMade
payInterval := <-r.payIntervals
if payMade {
r.nOrderOk = r.nOrderOk + 1
r.payCosts = append(r.payCosts, time.Duration(payInterval.interval))
} else {
r.nOrderErr = r.nOrderErr + 1
}
r.nOrderTotal = r.nOrderTotal + 1
if r.nOrderTotal >= r.numOrders {
r.Stop()
}
}
}()
go func() {
for {
requestSent := <-r.requestSent
if requestSent {
r.nRequestOk = r.nRequestOk + 1
} else {
r.nRequestErr = r.nRequestErr + 1
}
r.nRequestTotal = r.nRequestTotal + 1
}
}()
for i := 0; i < len(users); i++ {
r.userCurr <- users[i]
}
timeout := time.After(TIMEOUT_SEC * time.Second)
for r.nOrderTotal < r.numOrders {
select {
case <-timeout:
r.Stop()
}
}
r.Stop()
}
// Stop the reporter and exit full process.
func (r *Reporter) Stop() {
r.elapsed = time.Since(r.startAt)
r.Report()
os.Exit(0)
}
// Report stats to console and redis.
func (r *Reporter) Report() {
//---------------------------------------------------
// Report to console
//---------------------------------------------------
sort.Ints(r.nOrderPerSec)
sort.Ints(r.nRequestPerSec)
nOrderPerSecMax := MeanOfMaxFive(r.nOrderPerSec)
nOrderPerSecMin := MeanOfMinFive(r.nOrderPerSec)
nOrderPerSecMean := Mean(r.nOrderPerSec)
nRequestPerSecMax := MeanOfMaxFive(r.nRequestPerSec)
nRequestPerSecMin := MeanOfMinFive(r.nRequestPerSec)
nRequestPerSecMean := Mean(r.nRequestPerSec)
sort.Ints(r.nRequestPerSec)
payCostNanoseconds := []float64{}
for i := 0; i < len(r.payCosts); i++ {
payCostNanoseconds = append(payCostNanoseconds, float64(r.payCosts[i].Nanoseconds()))
}
sort.Float64s(payCostNanoseconds)
msTakenTotal := int(r.elapsed.Nanoseconds() / 1000000.0)
msPerOrder := MeanFloat64(payCostNanoseconds) / 1000000.0
msPerRequest := SumFloat64(payCostNanoseconds) / 1000000.0 / float64(r.nRequestOk)
//---------------------------------------------------
// Report to console
//---------------------------------------------------
fmt.Print("\nStats\n")
fmt.Printf("Concurrency level: %d\n", r.cocurrency)
fmt.Printf("Time taken for tests: %dms\n", msTakenTotal)
fmt.Printf("Complete requests: %d\n", r.nRequestOk)
fmt.Printf("Failed requests: %d\n", r.nRequestErr)
fmt.Printf("Complete orders: %d\n", r.nOrderOk)
fmt.Printf("Failed orders: %d\n", r.nOrderErr)
fmt.Printf("Time per request: %.2fms\n", msPerRequest)
fmt.Printf("Time per order: %.2fms\n", msPerOrder)
fmt.Printf("Request per second: %d (max) %d (min) %d(mean)\n", nRequestPerSecMax, nRequestPerSecMin, nRequestPerSecMean)
fmt.Printf("Order per second: %d (max) %d (min) %d (mean)\n\n", nOrderPerSecMax, nOrderPerSecMin, nOrderPerSecMean)
fmt.Printf("Percentage of orders made within a certain time (ms)\n")
if len(payCostNanoseconds) == 0 {
return
}
percentages := []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 95.5, 96, 96.5, 97, 97.5, 98, 98.5, 99, 99.9, 99.99, 100}
for _, percentage := range percentages {
idx := int(percentage * float64(len(payCostNanoseconds)) / float64(100.0))
if idx > 0 {
idx = idx - 1
} else {
idx = 0
}
payCostNanosecond := payCostNanoseconds[idx]
fmt.Printf("%.2f%%\t%d ms\n", percentage, int(payCostNanosecond/1000000.0))
}
}
//----------------------------------
// Math util functions
//----------------------------------
func MeanOfMaxFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[1]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[len(sortedArr)-5:])
}
return sortedArr[len(sortedArr)-1]
}
func MeanOfMinFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[0]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[0:5])
}
return sortedArr[0]
}
func Mean(arr []int) int {
if len(arr) == 0 {
return 0
}
sum := 0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return int(float64(sum) / float64(len(arr)))
}
func MeanFloat64(arr []float64) float64 {
return SumFloat64(arr) / float64(len(arr))
}
func SumFloat64(arr []float64) float64 {
if len(arr) == 0 {
return 0
}
sum := 0.0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return sum
}
//----------------------------------
// Main
//----------------------------------
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
//----------------------------------
// Arguments parsing and validation
//----------------------------------
config := flag.String("f", "cfg.json", "config file")
cocurrency := flag.Int("c", 1000, "request cocurrency")
numOrders := flag.Int("n", 1000, "number of orders to perform")
debug := flag.Bool("d", false, "debug mode")
// reportRedis := flag.Bool("r", true, "report to local redis")
flag.Parse()
cfg := util.ParseCfg(*config)
// if flag.NFlag() == 0 {
// flag.PrintDefaults()
// os.Exit(1)
// }
if *debug {
isDebugMode = true
}
// if *reportRedis {
// isReportToRedis = true
// }
//----------------------------------
// Validate cocurrency
//----------------------------------
if *cocurrency > MAX_COCURRENCY {
fmt.Printf("Exceed max cocurrency (is %d)", MAX_COCURRENCY)
os.Exit(1)
}
//----------------------------------
// Load users/items and work
//----------------------------------
LoadData(cfg.UserCSV, cfg.ItemCSV)
reporter := NewReporter(*numOrders, *cocurrency)
for i := 0; i < *cocurrency; i++ {
go func() {
w := NewWorker(cfg.APPAddrs, reporter)
w.Work()
}()
}
// start reporter
reporter.Start()
}
| MakeOrder | identifier_name |
stress.go | package main
import (
"bytes"
"distributed-system/util"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"runtime"
"sort"
"strconv"
"time"
)
//----------------------------------
// Stress Settings
//----------------------------------
const (
MAX_COCURRENCY = 1024 * 1024
TIMEOUT_SEC = 10
)
//----------------------------------
// Stress Abstracts
//----------------------------------
type Worker struct {
r *Reporter
ctx struct {
addrs []string
}
}
type Context struct {
c *http.Client
w *Worker
user User
orderId string
cartId string
}
type TimeInterval struct {
start int64
end int64
interval int64
}
type Reporter struct {
payMade chan bool
payIntervals chan TimeInterval
requestSent chan bool
userCurr chan User
numOrders int
cocurrency int
nOrderOk int
nOrderErr int
nOrderTotal int
nOrderPerSec []int
payCosts []time.Duration
nRequestOk int
nRequestErr int
nRequestTotal int
nRequestPerSec []int
timeStampPerSec []int
startAt time.Time
elapsed time.Duration
}
//----------------------------------
// Entity Abstracts
//----------------------------------
type User struct {
Id int
Username string
Password string
AccessToken string
}
type Item struct {
Id int `json:"id"`
Price int `json:"price"`
Stock int `json:"stock"`
}
//----------------------------------
// Request JSON Bindings
//----------------------------------
type RequestLogin struct {
Username string `json:"username"`
Password string `json:"password"`
}
type RequestCartAddItem struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
}
type RequestMakeOrder struct {
CartId string `json:"cart_id"`
}
type RequestPayOrder struct {
OrderId string `json:"order_id"`
}
//----------------------------------
// Response JSON Bindings
//----------------------------------
type ResponseLogin struct {
UserId int `json:"user_id"`
Username string `json:"username"`
AccessToken string `json:"access_token"`
}
type ResponseGetItems []Item
type ResponseCreateCart struct {
CartId string `json:"cart_id"`
}
type ResponseMakeOrder struct {
OrderId string `json:"order_id"`
}
type ResponsePayOrder struct {
OrderId string `json:"order_id"`
}
type ResponseQueryOrder struct {
Id string `json:"id"`
Item []struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
} `json:"items"`
Total int `json:"total"`
}
//----------------------------------
// Global Variables
//----------------------------------
var (
users = make([]User, 0) // users
items = make(map[int]Item) // map[item.Id]item
isDebugMode = false
isReportToRedis = false
)
//----------------------------------
// Data Initialization
//----------------------------------
// Load all data.
func LoadData(userCsv, itemCsv string) {
fmt.Printf("Load users from user file..")
LoadUsers(userCsv)
fmt.Printf("OK\n")
fmt.Printf("Load items from item file..")
LoadItems(itemCsv)
fmt.Printf("OK\n")
}
// Load users from user csv file
func LoadUsers(userCsv string) {
// read users
if file, err := os.Open(userCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
userId, _ := strconv.Atoi(strs[0])
if userId != 0 {
userName := strs[1]
password := strs[2]
users = append(users, User{Id: userId, Username: userName, Password: password})
}
}
} else {
panic(err.Error())
}
// root user
// ss.rootToken = userId2Token(ss.UserMap["root"].Id)
}
// Load items from item csv file
func LoadItems(itemCsv string) {
// read items
if file, err := os.Open(itemCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
itemId, _ := strconv.Atoi(strs[0])
price, _ := strconv.Atoi(strs[1])
stock, _ := strconv.Atoi(strs[2])
items[itemId] = Item{Id: itemId, Price: price, Stock: stock}
}
} else {
panic(err.Error())
}
}
//----------------------------------
// Request Utils
//----------------------------------
// Build url with path and parameters.
func (w *Worker) Url(path string, params url.Values) string |
// Get json from uri.
func (w *Worker) Get(c *http.Client, url string, bind interface{}) (int, error) {
r, err := c.Get(url)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Post json to uri and get json response.
func (w *Worker) Post(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
var body io.Reader
if data != nil {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
body = bytes.NewReader(bs)
}
r, err := c.Post(url, "application/json", body)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Patch url with json.
func (w *Worker) Patch(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
req, err := http.NewRequest("PATCH", url, bytes.NewReader(bs))
if err != nil {
return 0, err
}
req.Header.Set("Content-Type", "application/json")
res, err := c.Do(req)
if err != nil {
if res != nil {
ioutil.ReadAll(res.Body)
res.Body.Close()
}
return 0, err
}
defer res.Body.Close()
err = json.NewDecoder(res.Body).Decode(bind)
if res.StatusCode == http.StatusNoContent || bind == nil {
return res.StatusCode, nil
}
return res.StatusCode, err
}
//----------------------------------
// Order Handle Utils
//----------------------------------
// Random choice a item. Dont TUCAO this function,
// it works and best O(1).
func GetRandItem() Item {
for {
idx := rand.Intn(len(items))
item, ok := items[idx+1]
if ok {
return item
}
}
}
//----------------------------------
// Work Job Context
//----------------------------------
func (ctx *Context) UrlWithToken(path string) string {
user := ctx.user
params := url.Values{}
params.Add("access_token", user.AccessToken)
return ctx.w.Url(path, params)
}
func (ctx *Context) Login() bool {
user := ctx.user
data := &RequestLogin{user.Username, user.Password}
body := &ResponseLogin{}
url := ctx.w.Url("/login", nil)
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request login error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.user.AccessToken = body.AccessToken
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) GetItems() bool {
// body := &ResponseGetItems{}
url := ctx.UrlWithToken("/items")
statusCode, err := ctx.w.Get(ctx.c, url, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request get items error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CreateCart() bool {
body := &ResponseCreateCart{}
url := ctx.UrlWithToken("/carts")
statusCode, err := ctx.w.Post(ctx.c, url, nil, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request create carts error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.cartId = body.CartId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CartAddItem() bool {
path := fmt.Sprintf("/carts/%s", ctx.cartId)
url := ctx.UrlWithToken(path)
item := GetRandItem()
data := &RequestCartAddItem{item.Id, 1}
statusCode, err := ctx.w.Patch(ctx.c, url, data, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request error cart add item error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusNoContent {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) MakeOrder() bool {
if !ctx.Login() || !ctx.GetItems() || !ctx.CreateCart() {
return false
}
// count := rand.Intn(3) + 1
count := 2
for i := 0; i < count; i++ {
if !ctx.CartAddItem() {
return false
}
}
data := &RequestMakeOrder{ctx.cartId}
body := &ResponseMakeOrder{}
url := ctx.UrlWithToken("/orders")
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request make order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.orderId = body.OrderId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) PayOrder() bool {
if !ctx.MakeOrder() {
return false
}
url := ctx.UrlWithToken("/pay")
data := &RequestPayOrder{ctx.orderId}
body := &ResponsePayOrder{}
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request pay order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
//----------------------------------
// Worker
//----------------------------------
func NewWorker(addrs []string, r *Reporter) *Worker {
w := &Worker{}
w.r = r
w.ctx.addrs = addrs
return w
}
func (w *Worker) Work() {
ctx := &Context{}
ctx.w = w
t := &http.Transport{}
ctx.c = &http.Client{
Timeout: TIMEOUT_SEC * time.Second,
Transport: t,
}
for {
// t.CloseIdleConnections()
startAt := time.Now()
ctx.user = <-w.r.userCurr
w.r.payMade <- ctx.PayOrder()
endAt := time.Now()
w.r.payIntervals <- TimeInterval{start: startAt.UnixNano(), end: endAt.UnixNano(), interval: endAt.Sub(startAt).Nanoseconds()}
}
}
//----------------------------------
// Statstics Reporter
//----------------------------------
// Create reporter
func NewReporter(numOrders int, cocurrency int) *Reporter {
return &Reporter{
make(chan bool, cocurrency),
make(chan TimeInterval, cocurrency),
make(chan bool, cocurrency),
make(chan User, cocurrency),
numOrders,
cocurrency,
0,
0,
0,
make([]int, 0),
make([]time.Duration, 0),
0,
0,
0,
make([]int, 0),
make([]int, 0),
time.Now(),
0,
}
}
// Start reporter
func (r *Reporter) Start() {
r.startAt = time.Now()
go func() {
t := time.NewTicker(1 * time.Second)
for {
nOrderOk := r.nOrderOk
nRequestOk := r.nRequestOk
<-t.C
nOrderPerSec := r.nOrderOk - nOrderOk
r.nOrderPerSec = append(r.nOrderPerSec, nOrderPerSec)
nRequestPerSec := r.nRequestOk - nRequestOk
r.nRequestPerSec = append(r.nRequestPerSec, nRequestPerSec)
r.timeStampPerSec = append(r.timeStampPerSec, time.Now().Second())
fmt.Printf("Finished orders: %d\n", nOrderPerSec)
}
}()
go func() {
for {
payMade := <-r.payMade
payInterval := <-r.payIntervals
if payMade {
r.nOrderOk = r.nOrderOk + 1
r.payCosts = append(r.payCosts, time.Duration(payInterval.interval))
} else {
r.nOrderErr = r.nOrderErr + 1
}
r.nOrderTotal = r.nOrderTotal + 1
if r.nOrderTotal >= r.numOrders {
r.Stop()
}
}
}()
go func() {
for {
requestSent := <-r.requestSent
if requestSent {
r.nRequestOk = r.nRequestOk + 1
} else {
r.nRequestErr = r.nRequestErr + 1
}
r.nRequestTotal = r.nRequestTotal + 1
}
}()
for i := 0; i < len(users); i++ {
r.userCurr <- users[i]
}
timeout := time.After(TIMEOUT_SEC * time.Second)
for r.nOrderTotal < r.numOrders {
select {
case <-timeout:
r.Stop()
}
}
r.Stop()
}
// Stop the reporter and exit full process.
func (r *Reporter) Stop() {
r.elapsed = time.Since(r.startAt)
r.Report()
os.Exit(0)
}
// Report stats to console and redis.
func (r *Reporter) Report() {
//---------------------------------------------------
// Report to console
//---------------------------------------------------
sort.Ints(r.nOrderPerSec)
sort.Ints(r.nRequestPerSec)
nOrderPerSecMax := MeanOfMaxFive(r.nOrderPerSec)
nOrderPerSecMin := MeanOfMinFive(r.nOrderPerSec)
nOrderPerSecMean := Mean(r.nOrderPerSec)
nRequestPerSecMax := MeanOfMaxFive(r.nRequestPerSec)
nRequestPerSecMin := MeanOfMinFive(r.nRequestPerSec)
nRequestPerSecMean := Mean(r.nRequestPerSec)
sort.Ints(r.nRequestPerSec)
payCostNanoseconds := []float64{}
for i := 0; i < len(r.payCosts); i++ {
payCostNanoseconds = append(payCostNanoseconds, float64(r.payCosts[i].Nanoseconds()))
}
sort.Float64s(payCostNanoseconds)
msTakenTotal := int(r.elapsed.Nanoseconds() / 1000000.0)
msPerOrder := MeanFloat64(payCostNanoseconds) / 1000000.0
msPerRequest := SumFloat64(payCostNanoseconds) / 1000000.0 / float64(r.nRequestOk)
//---------------------------------------------------
// Report to console
//---------------------------------------------------
fmt.Print("\nStats\n")
fmt.Printf("Concurrency level: %d\n", r.cocurrency)
fmt.Printf("Time taken for tests: %dms\n", msTakenTotal)
fmt.Printf("Complete requests: %d\n", r.nRequestOk)
fmt.Printf("Failed requests: %d\n", r.nRequestErr)
fmt.Printf("Complete orders: %d\n", r.nOrderOk)
fmt.Printf("Failed orders: %d\n", r.nOrderErr)
fmt.Printf("Time per request: %.2fms\n", msPerRequest)
fmt.Printf("Time per order: %.2fms\n", msPerOrder)
fmt.Printf("Request per second: %d (max) %d (min) %d(mean)\n", nRequestPerSecMax, nRequestPerSecMin, nRequestPerSecMean)
fmt.Printf("Order per second: %d (max) %d (min) %d (mean)\n\n", nOrderPerSecMax, nOrderPerSecMin, nOrderPerSecMean)
fmt.Printf("Percentage of orders made within a certain time (ms)\n")
if len(payCostNanoseconds) == 0 {
return
}
percentages := []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 95.5, 96, 96.5, 97, 97.5, 98, 98.5, 99, 99.9, 99.99, 100}
for _, percentage := range percentages {
idx := int(percentage * float64(len(payCostNanoseconds)) / float64(100.0))
if idx > 0 {
idx = idx - 1
} else {
idx = 0
}
payCostNanosecond := payCostNanoseconds[idx]
fmt.Printf("%.2f%%\t%d ms\n", percentage, int(payCostNanosecond/1000000.0))
}
}
//----------------------------------
// Math util functions
//----------------------------------
func MeanOfMaxFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[1]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[len(sortedArr)-5:])
}
return sortedArr[len(sortedArr)-1]
}
func MeanOfMinFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[0]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[0:5])
}
return sortedArr[0]
}
func Mean(arr []int) int {
if len(arr) == 0 {
return 0
}
sum := 0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return int(float64(sum) / float64(len(arr)))
}
func MeanFloat64(arr []float64) float64 {
return SumFloat64(arr) / float64(len(arr))
}
func SumFloat64(arr []float64) float64 {
if len(arr) == 0 {
return 0
}
sum := 0.0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return sum
}
//----------------------------------
// Main
//----------------------------------
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
//----------------------------------
// Arguments parsing and validation
//----------------------------------
config := flag.String("f", "cfg.json", "config file")
cocurrency := flag.Int("c", 1000, "request cocurrency")
numOrders := flag.Int("n", 1000, "number of orders to perform")
debug := flag.Bool("d", false, "debug mode")
// reportRedis := flag.Bool("r", true, "report to local redis")
flag.Parse()
cfg := util.ParseCfg(*config)
// if flag.NFlag() == 0 {
// flag.PrintDefaults()
// os.Exit(1)
// }
if *debug {
isDebugMode = true
}
// if *reportRedis {
// isReportToRedis = true
// }
//----------------------------------
// Validate cocurrency
//----------------------------------
if *cocurrency > MAX_COCURRENCY {
fmt.Printf("Exceed max cocurrency (is %d)", MAX_COCURRENCY)
os.Exit(1)
}
//----------------------------------
// Load users/items and work
//----------------------------------
LoadData(cfg.UserCSV, cfg.ItemCSV)
reporter := NewReporter(*numOrders, *cocurrency)
for i := 0; i < *cocurrency; i++ {
go func() {
w := NewWorker(cfg.APPAddrs, reporter)
w.Work()
}()
}
// start reporter
reporter.Start()
}
| {
// random choice one host for load balance
i := rand.Intn(len(w.ctx.addrs))
addr := w.ctx.addrs[i]
s := fmt.Sprintf("http://%s%s", addr, path)
if params == nil {
return s
}
p := params.Encode()
return fmt.Sprintf("%s?%s", s, p)
} | identifier_body |
stress.go | package main
import (
"bytes"
"distributed-system/util"
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"os"
"runtime"
"sort"
"strconv"
"time"
)
//----------------------------------
// Stress Settings
//----------------------------------
const (
MAX_COCURRENCY = 1024 * 1024
TIMEOUT_SEC = 10
)
//----------------------------------
// Stress Abstracts
//----------------------------------
type Worker struct {
r *Reporter
ctx struct {
addrs []string
}
}
type Context struct {
c *http.Client
w *Worker
user User
orderId string
cartId string
}
type TimeInterval struct {
start int64
end int64
interval int64
}
type Reporter struct {
payMade chan bool
payIntervals chan TimeInterval
requestSent chan bool
userCurr chan User
numOrders int
cocurrency int
nOrderOk int
nOrderErr int
nOrderTotal int
nOrderPerSec []int
payCosts []time.Duration
nRequestOk int
nRequestErr int
nRequestTotal int
nRequestPerSec []int
timeStampPerSec []int
startAt time.Time
elapsed time.Duration
}
//----------------------------------
// Entity Abstracts
//----------------------------------
type User struct {
Id int
Username string
Password string
AccessToken string
}
type Item struct {
Id int `json:"id"`
Price int `json:"price"`
Stock int `json:"stock"`
}
//----------------------------------
// Request JSON Bindings
//----------------------------------
type RequestLogin struct {
Username string `json:"username"`
Password string `json:"password"`
}
type RequestCartAddItem struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
}
type RequestMakeOrder struct {
CartId string `json:"cart_id"`
}
type RequestPayOrder struct {
OrderId string `json:"order_id"`
}
//----------------------------------
// Response JSON Bindings
//----------------------------------
type ResponseLogin struct {
UserId int `json:"user_id"`
Username string `json:"username"`
AccessToken string `json:"access_token"`
}
type ResponseGetItems []Item
type ResponseCreateCart struct {
CartId string `json:"cart_id"`
}
type ResponseMakeOrder struct {
OrderId string `json:"order_id"`
}
type ResponsePayOrder struct {
OrderId string `json:"order_id"`
}
type ResponseQueryOrder struct {
Id string `json:"id"`
Item []struct {
ItemId int `json:"item_id"`
Count int `json:"count"`
} `json:"items"`
Total int `json:"total"`
}
//----------------------------------
// Global Variables
//----------------------------------
var (
users = make([]User, 0) // users
items = make(map[int]Item) // map[item.Id]item
isDebugMode = false
isReportToRedis = false
)
//----------------------------------
// Data Initialization
//----------------------------------
// Load all data.
func LoadData(userCsv, itemCsv string) {
fmt.Printf("Load users from user file..")
LoadUsers(userCsv)
fmt.Printf("OK\n")
fmt.Printf("Load items from item file..")
LoadItems(itemCsv)
fmt.Printf("OK\n")
}
// Load users from user csv file
func LoadUsers(userCsv string) {
// read users
if file, err := os.Open(userCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
userId, _ := strconv.Atoi(strs[0])
if userId != 0 {
userName := strs[1]
password := strs[2]
users = append(users, User{Id: userId, Username: userName, Password: password})
}
}
} else {
panic(err.Error())
}
// root user
// ss.rootToken = userId2Token(ss.UserMap["root"].Id)
}
// Load items from item csv file
func LoadItems(itemCsv string) {
// read items
if file, err := os.Open(itemCsv); err == nil {
reader := csv.NewReader(file)
defer file.Close()
for strs, err := reader.Read(); err == nil; strs, err = reader.Read() {
itemId, _ := strconv.Atoi(strs[0])
price, _ := strconv.Atoi(strs[1])
stock, _ := strconv.Atoi(strs[2])
items[itemId] = Item{Id: itemId, Price: price, Stock: stock}
}
} else {
panic(err.Error())
}
}
//----------------------------------
// Request Utils
//----------------------------------
// Build url with path and parameters.
func (w *Worker) Url(path string, params url.Values) string {
// random choice one host for load balance
i := rand.Intn(len(w.ctx.addrs))
addr := w.ctx.addrs[i]
s := fmt.Sprintf("http://%s%s", addr, path)
if params == nil {
return s
}
p := params.Encode()
return fmt.Sprintf("%s?%s", s, p)
}
// Get json from uri.
func (w *Worker) Get(c *http.Client, url string, bind interface{}) (int, error) {
r, err := c.Get(url)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Post json to uri and get json response.
func (w *Worker) Post(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
var body io.Reader
if data != nil {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
body = bytes.NewReader(bs)
}
r, err := c.Post(url, "application/json", body)
if err != nil {
if r != nil {
ioutil.ReadAll(r.Body)
r.Body.Close()
}
return 0, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(bind)
if bind == nil {
return r.StatusCode, nil
}
return r.StatusCode, err
}
// Patch url with json.
func (w *Worker) Patch(c *http.Client, url string, data interface{}, bind interface{}) (int, error) {
bs, err := json.Marshal(data)
if err != nil {
return 0, err
}
req, err := http.NewRequest("PATCH", url, bytes.NewReader(bs))
if err != nil |
req.Header.Set("Content-Type", "application/json")
res, err := c.Do(req)
if err != nil {
if res != nil {
ioutil.ReadAll(res.Body)
res.Body.Close()
}
return 0, err
}
defer res.Body.Close()
err = json.NewDecoder(res.Body).Decode(bind)
if res.StatusCode == http.StatusNoContent || bind == nil {
return res.StatusCode, nil
}
return res.StatusCode, err
}
//----------------------------------
// Order Handle Utils
//----------------------------------
// Random choice a item. Dont TUCAO this function,
// it works and best O(1).
func GetRandItem() Item {
for {
idx := rand.Intn(len(items))
item, ok := items[idx+1]
if ok {
return item
}
}
}
//----------------------------------
// Work Job Context
//----------------------------------
func (ctx *Context) UrlWithToken(path string) string {
user := ctx.user
params := url.Values{}
params.Add("access_token", user.AccessToken)
return ctx.w.Url(path, params)
}
func (ctx *Context) Login() bool {
user := ctx.user
data := &RequestLogin{user.Username, user.Password}
body := &ResponseLogin{}
url := ctx.w.Url("/login", nil)
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request login error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.user.AccessToken = body.AccessToken
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) GetItems() bool {
// body := &ResponseGetItems{}
url := ctx.UrlWithToken("/items")
statusCode, err := ctx.w.Get(ctx.c, url, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request get items error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CreateCart() bool {
body := &ResponseCreateCart{}
url := ctx.UrlWithToken("/carts")
statusCode, err := ctx.w.Post(ctx.c, url, nil, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request create carts error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.cartId = body.CartId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) CartAddItem() bool {
path := fmt.Sprintf("/carts/%s", ctx.cartId)
url := ctx.UrlWithToken(path)
item := GetRandItem()
data := &RequestCartAddItem{item.Id, 1}
statusCode, err := ctx.w.Patch(ctx.c, url, data, nil)
if err != nil {
if isDebugMode {
fmt.Printf("Request error cart add item error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusNoContent {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) MakeOrder() bool {
if !ctx.Login() || !ctx.GetItems() || !ctx.CreateCart() {
return false
}
// count := rand.Intn(3) + 1
count := 2
for i := 0; i < count; i++ {
if !ctx.CartAddItem() {
return false
}
}
data := &RequestMakeOrder{ctx.cartId}
body := &ResponseMakeOrder{}
url := ctx.UrlWithToken("/orders")
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request make order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.orderId = body.OrderId
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
func (ctx *Context) PayOrder() bool {
if !ctx.MakeOrder() {
return false
}
url := ctx.UrlWithToken("/pay")
data := &RequestPayOrder{ctx.orderId}
body := &ResponsePayOrder{}
statusCode, err := ctx.w.Post(ctx.c, url, data, body)
if err != nil {
if isDebugMode {
fmt.Printf("Request pay order error: %v\n", err)
}
ctx.w.r.requestSent <- false
return false
}
if statusCode == http.StatusOK {
ctx.w.r.requestSent <- true
return true
}
ctx.w.r.requestSent <- false
return false
}
//----------------------------------
// Worker
//----------------------------------
func NewWorker(addrs []string, r *Reporter) *Worker {
w := &Worker{}
w.r = r
w.ctx.addrs = addrs
return w
}
func (w *Worker) Work() {
ctx := &Context{}
ctx.w = w
t := &http.Transport{}
ctx.c = &http.Client{
Timeout: TIMEOUT_SEC * time.Second,
Transport: t,
}
for {
// t.CloseIdleConnections()
startAt := time.Now()
ctx.user = <-w.r.userCurr
w.r.payMade <- ctx.PayOrder()
endAt := time.Now()
w.r.payIntervals <- TimeInterval{start: startAt.UnixNano(), end: endAt.UnixNano(), interval: endAt.Sub(startAt).Nanoseconds()}
}
}
//----------------------------------
// Statstics Reporter
//----------------------------------
// Create reporter
func NewReporter(numOrders int, cocurrency int) *Reporter {
return &Reporter{
make(chan bool, cocurrency),
make(chan TimeInterval, cocurrency),
make(chan bool, cocurrency),
make(chan User, cocurrency),
numOrders,
cocurrency,
0,
0,
0,
make([]int, 0),
make([]time.Duration, 0),
0,
0,
0,
make([]int, 0),
make([]int, 0),
time.Now(),
0,
}
}
// Start reporter
func (r *Reporter) Start() {
r.startAt = time.Now()
go func() {
t := time.NewTicker(1 * time.Second)
for {
nOrderOk := r.nOrderOk
nRequestOk := r.nRequestOk
<-t.C
nOrderPerSec := r.nOrderOk - nOrderOk
r.nOrderPerSec = append(r.nOrderPerSec, nOrderPerSec)
nRequestPerSec := r.nRequestOk - nRequestOk
r.nRequestPerSec = append(r.nRequestPerSec, nRequestPerSec)
r.timeStampPerSec = append(r.timeStampPerSec, time.Now().Second())
fmt.Printf("Finished orders: %d\n", nOrderPerSec)
}
}()
go func() {
for {
payMade := <-r.payMade
payInterval := <-r.payIntervals
if payMade {
r.nOrderOk = r.nOrderOk + 1
r.payCosts = append(r.payCosts, time.Duration(payInterval.interval))
} else {
r.nOrderErr = r.nOrderErr + 1
}
r.nOrderTotal = r.nOrderTotal + 1
if r.nOrderTotal >= r.numOrders {
r.Stop()
}
}
}()
go func() {
for {
requestSent := <-r.requestSent
if requestSent {
r.nRequestOk = r.nRequestOk + 1
} else {
r.nRequestErr = r.nRequestErr + 1
}
r.nRequestTotal = r.nRequestTotal + 1
}
}()
for i := 0; i < len(users); i++ {
r.userCurr <- users[i]
}
timeout := time.After(TIMEOUT_SEC * time.Second)
for r.nOrderTotal < r.numOrders {
select {
case <-timeout:
r.Stop()
}
}
r.Stop()
}
// Stop the reporter and exit full process.
func (r *Reporter) Stop() {
r.elapsed = time.Since(r.startAt)
r.Report()
os.Exit(0)
}
// Report stats to console and redis.
func (r *Reporter) Report() {
//---------------------------------------------------
// Report to console
//---------------------------------------------------
sort.Ints(r.nOrderPerSec)
sort.Ints(r.nRequestPerSec)
nOrderPerSecMax := MeanOfMaxFive(r.nOrderPerSec)
nOrderPerSecMin := MeanOfMinFive(r.nOrderPerSec)
nOrderPerSecMean := Mean(r.nOrderPerSec)
nRequestPerSecMax := MeanOfMaxFive(r.nRequestPerSec)
nRequestPerSecMin := MeanOfMinFive(r.nRequestPerSec)
nRequestPerSecMean := Mean(r.nRequestPerSec)
sort.Ints(r.nRequestPerSec)
payCostNanoseconds := []float64{}
for i := 0; i < len(r.payCosts); i++ {
payCostNanoseconds = append(payCostNanoseconds, float64(r.payCosts[i].Nanoseconds()))
}
sort.Float64s(payCostNanoseconds)
msTakenTotal := int(r.elapsed.Nanoseconds() / 1000000.0)
msPerOrder := MeanFloat64(payCostNanoseconds) / 1000000.0
msPerRequest := SumFloat64(payCostNanoseconds) / 1000000.0 / float64(r.nRequestOk)
//---------------------------------------------------
// Report to console
//---------------------------------------------------
fmt.Print("\nStats\n")
fmt.Printf("Concurrency level: %d\n", r.cocurrency)
fmt.Printf("Time taken for tests: %dms\n", msTakenTotal)
fmt.Printf("Complete requests: %d\n", r.nRequestOk)
fmt.Printf("Failed requests: %d\n", r.nRequestErr)
fmt.Printf("Complete orders: %d\n", r.nOrderOk)
fmt.Printf("Failed orders: %d\n", r.nOrderErr)
fmt.Printf("Time per request: %.2fms\n", msPerRequest)
fmt.Printf("Time per order: %.2fms\n", msPerOrder)
fmt.Printf("Request per second: %d (max) %d (min) %d(mean)\n", nRequestPerSecMax, nRequestPerSecMin, nRequestPerSecMean)
fmt.Printf("Order per second: %d (max) %d (min) %d (mean)\n\n", nOrderPerSecMax, nOrderPerSecMin, nOrderPerSecMean)
fmt.Printf("Percentage of orders made within a certain time (ms)\n")
if len(payCostNanoseconds) == 0 {
return
}
percentages := []float64{10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 95.5, 96, 96.5, 97, 97.5, 98, 98.5, 99, 99.9, 99.99, 100}
for _, percentage := range percentages {
idx := int(percentage * float64(len(payCostNanoseconds)) / float64(100.0))
if idx > 0 {
idx = idx - 1
} else {
idx = 0
}
payCostNanosecond := payCostNanoseconds[idx]
fmt.Printf("%.2f%%\t%d ms\n", percentage, int(payCostNanosecond/1000000.0))
}
}
//----------------------------------
// Math util functions
//----------------------------------
func MeanOfMaxFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[1]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[len(sortedArr)-5:])
}
return sortedArr[len(sortedArr)-1]
}
func MeanOfMinFive(sortedArr []int) int {
if len(sortedArr) == 0 {
return 0
}
if len(sortedArr) == 1 {
return sortedArr[0]
}
if len(sortedArr) == 2 {
return sortedArr[0]
}
sortedArr = sortedArr[1 : len(sortedArr)-1]
if len(sortedArr) > 5 {
return Mean(sortedArr[0:5])
}
return sortedArr[0]
}
func Mean(arr []int) int {
if len(arr) == 0 {
return 0
}
sum := 0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return int(float64(sum) / float64(len(arr)))
}
func MeanFloat64(arr []float64) float64 {
return SumFloat64(arr) / float64(len(arr))
}
func SumFloat64(arr []float64) float64 {
if len(arr) == 0 {
return 0
}
sum := 0.0
for i := 0; i < len(arr); i++ {
sum = sum + arr[i]
}
return sum
}
//----------------------------------
// Main
//----------------------------------
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
//----------------------------------
// Arguments parsing and validation
//----------------------------------
config := flag.String("f", "cfg.json", "config file")
cocurrency := flag.Int("c", 1000, "request cocurrency")
numOrders := flag.Int("n", 1000, "number of orders to perform")
debug := flag.Bool("d", false, "debug mode")
// reportRedis := flag.Bool("r", true, "report to local redis")
flag.Parse()
cfg := util.ParseCfg(*config)
// if flag.NFlag() == 0 {
// flag.PrintDefaults()
// os.Exit(1)
// }
if *debug {
isDebugMode = true
}
// if *reportRedis {
// isReportToRedis = true
// }
//----------------------------------
// Validate cocurrency
//----------------------------------
if *cocurrency > MAX_COCURRENCY {
fmt.Printf("Exceed max cocurrency (is %d)", MAX_COCURRENCY)
os.Exit(1)
}
//----------------------------------
// Load users/items and work
//----------------------------------
LoadData(cfg.UserCSV, cfg.ItemCSV)
reporter := NewReporter(*numOrders, *cocurrency)
for i := 0; i < *cocurrency; i++ {
go func() {
w := NewWorker(cfg.APPAddrs, reporter)
w.Work()
}()
}
// start reporter
reporter.Start()
}
| {
return 0, err
} | conditional_block |
geohash.go | package geoutils
import (
"errors"
"fmt"
structure "github.com/eleme/clair/matrix/structure"
"math"
"strconv"
)
const (
base32 = "0123456789bcdefghjkmnpqrstuvwxyz"
)
var (
encodeParamMap = map[int]encodePrecison{
1: encodePrecison{precision: 1, latBinBits: 2, lngBinBits: 3, vagueRange: 2500000.0},
2: encodePrecison{precision: 2, latBinBits: 5, lngBinBits: 5, vagueRange: 630000.0},
3: encodePrecison{precision: 3, latBinBits: 7, lngBinBits: 8, vagueRange: 78000.0},
4: encodePrecison{precision: 4, latBinBits: 10, lngBinBits: 10, vagueRange: 20000.0},
5: encodePrecison{precision: 5, latBinBits: 12, lngBinBits: 13, vagueRange: 2400.0},
6: encodePrecison{precision: 6, latBinBits: 15, lngBinBits: 15, vagueRange: 610.0},
7: encodePrecison{precision: 7, latBinBits: 17, lngBinBits: 18, vagueRange: 76.0},
8: encodePrecison{precision: 8, latBinBits: 20, lngBinBits: 20, vagueRange: 19.11},
9: encodePrecison{precision: 9, latBinBits: 22, lngBinBits: 23, vagueRange: 4.78},
10: encodePrecison{precision: 10, latBinBits: 25, lngBinBits: 25, vagueRange: 0.59},
11: encodePrecison{precision: 11, latBinBits: 27, lngBinBits: 28, vagueRange: 0.15},
12: encodePrecison{precision: 12, latBinBits: 30, lngBinBits: 30, vagueRange: 0.01},
}
globalLatitudeRange = Range{minVal: -90, maxVal: 90}
globalLongitudeRange = Range{minVal: -180, maxVal: 180}
base32Dict = func() []int {
baseDictResult := make([]int, 128)
for i, v := range base32 {
baseDictResult[v] = i
}
return baseDictResult
}()
)
func checkValidBase32(base32Bytes []byte) bool {
for _, base32Byte := range base32Bytes {
if base32Byte != '0' && base32Dict[base32Byte] == 0 {
return false
}
}
return true
}
func binaryToBase32(binaryBytes []byte) ([]byte, error) {
base32Len := (len(binaryBytes) + 4) / 5
resultBase32Bytes := make([]byte, base32Len)
tmpIntVal, err := strconv.ParseInt(string(binaryBytes), 2, 64)
if err != nil {
return nil, err
}
for i := 0; i < base32Len; i++ {
resultBase32Bytes[base32Len-i-1] = base32[tmpIntVal%32]
tmpIntVal /= 32
}
return resultBase32Bytes, nil
}
func base32ToBinary(base32Bytes []byte) ([]byte, error) {
if !checkValidBase32(base32Bytes) {
return []byte{}, errors.New("wrong format base32 bytes")
}
base32Len := len(base32Bytes)
var tmpIntVal int64
for i, v := range base32Bytes {
tmpIntVal += int64(base32Dict[v]) * int64(math.Pow(32, float64(base32Len-i-1)))
}
binaryBytes := []byte(fmt.Sprintf("%b", tmpIntVal))
autoPadding := make([]byte, base32Len*5-len(binaryBytes))
for i := range autoPadding {
autoPadding[i] = '0'
}
result := append(autoPadding, binaryBytes...)
return result, nil
}
//Encode transform the latitude and longtitude to a geohash string
func Encode(latitude, longitude float64, precison int) (string, error) {
findBinaryCode := func(r Range, val float64) (byte, Range) {
midVal := r.GetMidVal()
var binaryCode byte
var nextRange Range
if r.minVal <= val && val <= midVal {
binaryCode = '0'
nextRange = Range{minVal: r.minVal, maxVal: midVal}
} else {
binaryCode = '1'
nextRange = Range{minVal: midVal, maxVal: r.maxVal}
}
return binaryCode, nextRange
}
if !globalLatitudeRange.CheckInRange(latitude) || !globalLongitudeRange.CheckInRange(longitude) {
return "", errors.New("wrong params for latitude and longtitude")
}
precisonParam, ok := encodeParamMap[precison]
if !ok {
return "", errors.New("error precision param")
}
// encode latitude
latitudeBinaryEncode := make([]byte, precisonParam.latBinBits)
for i, latRange := 0, globalLatitudeRange; i < precisonParam.latBinBits; i++ {
latitudeCode, nextLatRange := findBinaryCode(latRange, latitude)
latRange = nextLatRange
latitudeBinaryEncode[i] = latitudeCode
}
// encode longtitude
longitudeBinaryEncode := make([]byte, precisonParam.lngBinBits)
for i, lngRange := 0, globalLongitudeRange; i < precisonParam.lngBinBits; i++ |
binaryEncode := make([]byte, precisonParam.latBinBits+precisonParam.lngBinBits)
// merge lat encode and lng encode
for i := 0; i < precisonParam.latBinBits+precisonParam.lngBinBits; i++ {
if i%2 == 0 {
binaryEncode[i] = longitudeBinaryEncode[int(i/2)]
} else {
binaryEncode[i] = latitudeBinaryEncode[int(i/2)]
}
}
base32Bytes, err := binaryToBase32(binaryEncode)
if err != nil {
return "", err
}
return string(base32Bytes), nil
}
//Decode transform the geohash string to a latitude && longitude location
func Decode(geohashStr string) (float64, float64, error) {
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return 0, 0, err
}
return latitudeRange.GetMidVal(), longtitudeRange.GetMidVal(), nil
}
func decodeToRange(geohashStr string) (*Range, *Range, error) {
getRange := func(r Range, binaryCodes []byte) *Range {
for _, binaryByte := range binaryCodes {
midVal := r.GetMidVal()
if binaryByte == '0' {
r.maxVal = midVal
} else {
r.minVal = midVal
}
}
return &Range{minVal: r.minVal, maxVal: r.maxVal}
}
binaryEncodes, err := base32ToBinary([]byte(geohashStr))
if err != nil {
return nil, nil, err
}
// longtitude encode index: even; latitude encode index: odd
var latitudeBinaryEncode []byte
var longitudeBinaryEncode []byte
for i, val := range binaryEncodes {
if i%2 == 0 {
longitudeBinaryEncode = append(longitudeBinaryEncode, val)
} else {
latitudeBinaryEncode = append(latitudeBinaryEncode, val)
}
}
return getRange(globalLatitudeRange, latitudeBinaryEncode), getRange(globalLongitudeRange, longitudeBinaryEncode), nil
}
//Neighbours find the neighbours geohash strings based on a center geohash.
//Specifically, the neighbourPos list indicates the precise selected postions.
//The input param geohashStr is the center postion.
//The neighbourhood postions is stipultaed as follows:
// --------------------------------------------
// | | | |
// | northwest | north | northeast |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | west | center | east |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | southwest | south | southeast |
// | | | |
// |--------------|--------------|------------|
func Neighbours(geohashStr string, neighbourPos []string) (map[string]string, error) {
findNeighbourShiftIndex := func(pos string) (int16, int16) {
switch pos {
case "northwest":
return 1, -1
case "north":
return 1, 0
case "northeast":
return 1, 1
case "west":
return 0, -1
case "center":
return 0, 0
case "east":
return 0, 1
case "southwest":
return -1, -1
case "south":
return -1, 0
case "southeast":
return -1, 1
default:
return 0, 0
}
}
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return make(map[string]string), err
}
lattitudeRangeDiff, longtitudeRangeDiff := latitudeRange.GetRangeDiff(), longtitudeRange.GetRangeDiff()
neighbours := make(map[string]string)
precision := len(geohashStr)
for _, pos := range neighbourPos {
latitudeShiftIndex, longitudeShiftIndex := findNeighbourShiftIndex(pos)
tmpEncode, tmpErr := Encode(
latitudeRange.GetMidVal()+float64(latitudeShiftIndex)*lattitudeRangeDiff, longtitudeRange.GetMidVal()+float64(longitudeShiftIndex)*longtitudeRangeDiff, precision)
if tmpErr == nil {
neighbours[pos] = tmpEncode
}
}
return neighbours, nil
}
// Nearby returns the nearby geohash string list within the certain area
func Nearby(geohashStr string, radius int32) ([]string, error) {
if len(geohashStr) > 7 {
return []string{}, errors.New("geohash length cannot larger than 7")
}
precisonParam, ok := encodeParamMap[len(geohashStr)]
if !ok {
return []string{}, errors.New("error precision param")
}
extendLayer := int32(math.Ceil(float64(radius) / precisonParam.vagueRange))
var curLayer int32
s := newSquare(geohashStr)
for {
if curLayer >= extendLayer {
break
}
s = s.extendSqaure()
curLayer++
}
var result []string
allElementSet := s.listAllElements()
for _, v := range allElementSet.All() {
nearbyElement := v.(string)
if dis, err := geohashDistance(nearbyElement, geohashStr); err == nil && dis <= float64(radius) {
result = append(result, nearbyElement)
}
}
return result, nil
}
func geohashDistance(fromGeohash, toGeohash string) (float64, error) {
if fromLat, fromLng, err := Decode(fromGeohash); err == nil {
if toLat, toLng, err := Decode(toGeohash); err == nil {
fromPoint := NewLocation(fromLat, fromLng)
toPoint := NewLocation(toLat, toLng)
return fromPoint.EuclideanDistance(toPoint), nil
}
}
return -1, errors.New("Invalid geohash")
}
type square struct {
innerSqaure *square
eastSide, southSide, westSide, northSide *structure.Set
northeastElement, northwestElement, southeastElement, southwestElement string
}
func (s *square) extendSqaure() *square {
extendDirectionSide := func(extendSet *structure.Set, sqaureSideSet *structure.Set, direction string) {
sideSet := sqaureSideSet.All()
for _, side := range sideSet {
neighbour, tmpErr := Neighbours(side.(string), []string{direction})
if tmpErr == nil {
tmpNeighbour, tmpOK := neighbour[direction]
if tmpOK {
extendSet.Add(tmpNeighbour)
}
}
}
}
extendEastSide := structure.NewSet()
extendSouthSide := structure.NewSet()
extendWestSide := structure.NewSet()
extendNorthSide := structure.NewSet()
var northeastElement, northwestElement, southeastElement, southwestElement string
if s.eastSide != nil {
extendDirectionSide(extendEastSide, s.eastSide, "east")
}
if s.southSide != nil {
extendDirectionSide(extendSouthSide, s.southSide, "south")
}
if s.westSide != nil {
extendDirectionSide(extendWestSide, s.westSide, "west")
}
if s.northSide != nil {
extendDirectionSide(extendNorthSide, s.northSide, "north")
}
northeastNeighbours, northeastNeighboursErr := Neighbours(s.northeastElement, []string{"northeast", "north", "east"})
if northeastNeighboursErr == nil {
if northeastNeighbour, northeastNeighbourOK := northeastNeighbours["northeast"]; northeastNeighbourOK {
northeastElement = northeastNeighbour
}
if northNeighbour, northNeighbourOK := northeastNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if eastNeighbour, eastNeighbourOK := northeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
northwestNeighbours, northwestNeighboursErr := Neighbours(s.northwestElement, []string{"northwest", "north", "west"})
if northwestNeighboursErr == nil {
if northwestNeighbour, northwestNeighbourOK := northwestNeighbours["northwest"]; northwestNeighbourOK {
northwestElement = northwestNeighbour
}
if northNeighbour, northNeighbourOK := northwestNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if westNeighbour, westNeighbourOK := northwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
southeastNeighbours, southeastNeighboursErr := Neighbours(s.southeastElement, []string{"southeast", "south", "east"})
if southeastNeighboursErr == nil {
if southeastNeighbour, southeastNeighbourOK := southeastNeighbours["southeast"]; southeastNeighbourOK {
southeastElement = southeastNeighbour
}
if southNeighbour, southNeighbourOK := southeastNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if eastNeighbour, eastNeighbourOK := southeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
southwestNeighbours, southwestNeighboursErr := Neighbours(s.southwestElement, []string{"southwest", "south", "west"})
if southwestNeighboursErr == nil {
if southwestNeighbour, southwestNeighbourOK := southwestNeighbours["southwest"]; southwestNeighbourOK {
southwestElement = southwestNeighbour
}
if southNeighbour, southNeighbourOK := southwestNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if westNeighbour, westNeighbourOK := southwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
return &square{
innerSqaure: s,
eastSide: extendEastSide,
southSide: extendSouthSide,
westSide: extendWestSide,
northSide: extendNorthSide,
northeastElement: northeastElement,
northwestElement: northwestElement,
southeastElement: southeastElement,
southwestElement: southwestElement,
}
}
func (s *square) listAllElements() *structure.Set {
set := structure.NewSet()
set.Add(s.northeastElement)
set.Add(s.northwestElement)
set.Add(s.southeastElement)
set.Add(s.southwestElement)
set = structure.Union(set, s.southSide, s.northSide, s.westSide, s.eastSide)
if s.innerSqaure == nil {
return set
}
return structure.Union(set, s.innerSqaure.listAllElements())
}
func newSquare(element string) *square {
return &square{
innerSqaure: nil,
eastSide: nil,
southSide: nil,
westSide: nil,
northSide: nil,
northeastElement: element,
northwestElement: element,
southeastElement: element,
southwestElement: element,
}
}
// Range indicates a float64 range marked by min val and max val
type Range struct {
minVal, maxVal float64
}
// GetMidVal return the average val of min and max
func (r *Range) GetMidVal() float64 {
return 0.5 * (r.minVal + r.maxVal)
}
// GetRangeDiff return the substract val of max and min
func (r *Range) GetRangeDiff() float64 {
return r.maxVal - r.minVal
}
// CheckInRange check the input param is in the range of min and max
func (r *Range) CheckInRange(val float64) bool {
if val >= r.minVal && val <= r.maxVal {
return true
}
return false
}
//encodePrecison indicates the encode and decode's precison degree.
// precision: defined as base32 encode string length,
// normnally precison increases in accordance with the encode string length
// latBinBits: defined as the binary encoding bits for latitude value
// lngBinBits: defined as the binary encoding bits for longtitude value
type encodePrecison struct {
precision, latBinBits, lngBinBits int
vagueRange float64
}
| {
longitudeCode, nextLngRange := findBinaryCode(lngRange, longitude)
lngRange = nextLngRange
longitudeBinaryEncode[i] = longitudeCode
} | conditional_block |
geohash.go | package geoutils
import (
"errors"
"fmt"
structure "github.com/eleme/clair/matrix/structure"
"math"
"strconv"
)
const (
base32 = "0123456789bcdefghjkmnpqrstuvwxyz"
)
var (
encodeParamMap = map[int]encodePrecison{
1: encodePrecison{precision: 1, latBinBits: 2, lngBinBits: 3, vagueRange: 2500000.0},
2: encodePrecison{precision: 2, latBinBits: 5, lngBinBits: 5, vagueRange: 630000.0},
3: encodePrecison{precision: 3, latBinBits: 7, lngBinBits: 8, vagueRange: 78000.0},
4: encodePrecison{precision: 4, latBinBits: 10, lngBinBits: 10, vagueRange: 20000.0},
5: encodePrecison{precision: 5, latBinBits: 12, lngBinBits: 13, vagueRange: 2400.0},
6: encodePrecison{precision: 6, latBinBits: 15, lngBinBits: 15, vagueRange: 610.0},
7: encodePrecison{precision: 7, latBinBits: 17, lngBinBits: 18, vagueRange: 76.0},
8: encodePrecison{precision: 8, latBinBits: 20, lngBinBits: 20, vagueRange: 19.11},
9: encodePrecison{precision: 9, latBinBits: 22, lngBinBits: 23, vagueRange: 4.78},
10: encodePrecison{precision: 10, latBinBits: 25, lngBinBits: 25, vagueRange: 0.59},
11: encodePrecison{precision: 11, latBinBits: 27, lngBinBits: 28, vagueRange: 0.15},
12: encodePrecison{precision: 12, latBinBits: 30, lngBinBits: 30, vagueRange: 0.01},
}
globalLatitudeRange = Range{minVal: -90, maxVal: 90}
globalLongitudeRange = Range{minVal: -180, maxVal: 180}
base32Dict = func() []int {
baseDictResult := make([]int, 128)
for i, v := range base32 {
baseDictResult[v] = i
}
return baseDictResult
}()
)
func checkValidBase32(base32Bytes []byte) bool {
for _, base32Byte := range base32Bytes {
if base32Byte != '0' && base32Dict[base32Byte] == 0 {
return false
}
}
return true
}
func binaryToBase32(binaryBytes []byte) ([]byte, error) {
base32Len := (len(binaryBytes) + 4) / 5
resultBase32Bytes := make([]byte, base32Len)
tmpIntVal, err := strconv.ParseInt(string(binaryBytes), 2, 64)
if err != nil {
return nil, err
}
for i := 0; i < base32Len; i++ {
resultBase32Bytes[base32Len-i-1] = base32[tmpIntVal%32]
tmpIntVal /= 32
}
return resultBase32Bytes, nil
}
func | (base32Bytes []byte) ([]byte, error) {
if !checkValidBase32(base32Bytes) {
return []byte{}, errors.New("wrong format base32 bytes")
}
base32Len := len(base32Bytes)
var tmpIntVal int64
for i, v := range base32Bytes {
tmpIntVal += int64(base32Dict[v]) * int64(math.Pow(32, float64(base32Len-i-1)))
}
binaryBytes := []byte(fmt.Sprintf("%b", tmpIntVal))
autoPadding := make([]byte, base32Len*5-len(binaryBytes))
for i := range autoPadding {
autoPadding[i] = '0'
}
result := append(autoPadding, binaryBytes...)
return result, nil
}
//Encode transform the latitude and longtitude to a geohash string
func Encode(latitude, longitude float64, precison int) (string, error) {
findBinaryCode := func(r Range, val float64) (byte, Range) {
midVal := r.GetMidVal()
var binaryCode byte
var nextRange Range
if r.minVal <= val && val <= midVal {
binaryCode = '0'
nextRange = Range{minVal: r.minVal, maxVal: midVal}
} else {
binaryCode = '1'
nextRange = Range{minVal: midVal, maxVal: r.maxVal}
}
return binaryCode, nextRange
}
if !globalLatitudeRange.CheckInRange(latitude) || !globalLongitudeRange.CheckInRange(longitude) {
return "", errors.New("wrong params for latitude and longtitude")
}
precisonParam, ok := encodeParamMap[precison]
if !ok {
return "", errors.New("error precision param")
}
// encode latitude
latitudeBinaryEncode := make([]byte, precisonParam.latBinBits)
for i, latRange := 0, globalLatitudeRange; i < precisonParam.latBinBits; i++ {
latitudeCode, nextLatRange := findBinaryCode(latRange, latitude)
latRange = nextLatRange
latitudeBinaryEncode[i] = latitudeCode
}
// encode longtitude
longitudeBinaryEncode := make([]byte, precisonParam.lngBinBits)
for i, lngRange := 0, globalLongitudeRange; i < precisonParam.lngBinBits; i++ {
longitudeCode, nextLngRange := findBinaryCode(lngRange, longitude)
lngRange = nextLngRange
longitudeBinaryEncode[i] = longitudeCode
}
binaryEncode := make([]byte, precisonParam.latBinBits+precisonParam.lngBinBits)
// merge lat encode and lng encode
for i := 0; i < precisonParam.latBinBits+precisonParam.lngBinBits; i++ {
if i%2 == 0 {
binaryEncode[i] = longitudeBinaryEncode[int(i/2)]
} else {
binaryEncode[i] = latitudeBinaryEncode[int(i/2)]
}
}
base32Bytes, err := binaryToBase32(binaryEncode)
if err != nil {
return "", err
}
return string(base32Bytes), nil
}
//Decode transform the geohash string to a latitude && longitude location
func Decode(geohashStr string) (float64, float64, error) {
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return 0, 0, err
}
return latitudeRange.GetMidVal(), longtitudeRange.GetMidVal(), nil
}
func decodeToRange(geohashStr string) (*Range, *Range, error) {
getRange := func(r Range, binaryCodes []byte) *Range {
for _, binaryByte := range binaryCodes {
midVal := r.GetMidVal()
if binaryByte == '0' {
r.maxVal = midVal
} else {
r.minVal = midVal
}
}
return &Range{minVal: r.minVal, maxVal: r.maxVal}
}
binaryEncodes, err := base32ToBinary([]byte(geohashStr))
if err != nil {
return nil, nil, err
}
// longtitude encode index: even; latitude encode index: odd
var latitudeBinaryEncode []byte
var longitudeBinaryEncode []byte
for i, val := range binaryEncodes {
if i%2 == 0 {
longitudeBinaryEncode = append(longitudeBinaryEncode, val)
} else {
latitudeBinaryEncode = append(latitudeBinaryEncode, val)
}
}
return getRange(globalLatitudeRange, latitudeBinaryEncode), getRange(globalLongitudeRange, longitudeBinaryEncode), nil
}
//Neighbours find the neighbours geohash strings based on a center geohash.
//Specifically, the neighbourPos list indicates the precise selected postions.
//The input param geohashStr is the center postion.
//The neighbourhood postions is stipultaed as follows:
// --------------------------------------------
// | | | |
// | northwest | north | northeast |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | west | center | east |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | southwest | south | southeast |
// | | | |
// |--------------|--------------|------------|
func Neighbours(geohashStr string, neighbourPos []string) (map[string]string, error) {
findNeighbourShiftIndex := func(pos string) (int16, int16) {
switch pos {
case "northwest":
return 1, -1
case "north":
return 1, 0
case "northeast":
return 1, 1
case "west":
return 0, -1
case "center":
return 0, 0
case "east":
return 0, 1
case "southwest":
return -1, -1
case "south":
return -1, 0
case "southeast":
return -1, 1
default:
return 0, 0
}
}
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return make(map[string]string), err
}
lattitudeRangeDiff, longtitudeRangeDiff := latitudeRange.GetRangeDiff(), longtitudeRange.GetRangeDiff()
neighbours := make(map[string]string)
precision := len(geohashStr)
for _, pos := range neighbourPos {
latitudeShiftIndex, longitudeShiftIndex := findNeighbourShiftIndex(pos)
tmpEncode, tmpErr := Encode(
latitudeRange.GetMidVal()+float64(latitudeShiftIndex)*lattitudeRangeDiff, longtitudeRange.GetMidVal()+float64(longitudeShiftIndex)*longtitudeRangeDiff, precision)
if tmpErr == nil {
neighbours[pos] = tmpEncode
}
}
return neighbours, nil
}
// Nearby returns the nearby geohash string list within the certain area
func Nearby(geohashStr string, radius int32) ([]string, error) {
if len(geohashStr) > 7 {
return []string{}, errors.New("geohash length cannot larger than 7")
}
precisonParam, ok := encodeParamMap[len(geohashStr)]
if !ok {
return []string{}, errors.New("error precision param")
}
extendLayer := int32(math.Ceil(float64(radius) / precisonParam.vagueRange))
var curLayer int32
s := newSquare(geohashStr)
for {
if curLayer >= extendLayer {
break
}
s = s.extendSqaure()
curLayer++
}
var result []string
allElementSet := s.listAllElements()
for _, v := range allElementSet.All() {
nearbyElement := v.(string)
if dis, err := geohashDistance(nearbyElement, geohashStr); err == nil && dis <= float64(radius) {
result = append(result, nearbyElement)
}
}
return result, nil
}
func geohashDistance(fromGeohash, toGeohash string) (float64, error) {
if fromLat, fromLng, err := Decode(fromGeohash); err == nil {
if toLat, toLng, err := Decode(toGeohash); err == nil {
fromPoint := NewLocation(fromLat, fromLng)
toPoint := NewLocation(toLat, toLng)
return fromPoint.EuclideanDistance(toPoint), nil
}
}
return -1, errors.New("Invalid geohash")
}
type square struct {
innerSqaure *square
eastSide, southSide, westSide, northSide *structure.Set
northeastElement, northwestElement, southeastElement, southwestElement string
}
func (s *square) extendSqaure() *square {
extendDirectionSide := func(extendSet *structure.Set, sqaureSideSet *structure.Set, direction string) {
sideSet := sqaureSideSet.All()
for _, side := range sideSet {
neighbour, tmpErr := Neighbours(side.(string), []string{direction})
if tmpErr == nil {
tmpNeighbour, tmpOK := neighbour[direction]
if tmpOK {
extendSet.Add(tmpNeighbour)
}
}
}
}
extendEastSide := structure.NewSet()
extendSouthSide := structure.NewSet()
extendWestSide := structure.NewSet()
extendNorthSide := structure.NewSet()
var northeastElement, northwestElement, southeastElement, southwestElement string
if s.eastSide != nil {
extendDirectionSide(extendEastSide, s.eastSide, "east")
}
if s.southSide != nil {
extendDirectionSide(extendSouthSide, s.southSide, "south")
}
if s.westSide != nil {
extendDirectionSide(extendWestSide, s.westSide, "west")
}
if s.northSide != nil {
extendDirectionSide(extendNorthSide, s.northSide, "north")
}
northeastNeighbours, northeastNeighboursErr := Neighbours(s.northeastElement, []string{"northeast", "north", "east"})
if northeastNeighboursErr == nil {
if northeastNeighbour, northeastNeighbourOK := northeastNeighbours["northeast"]; northeastNeighbourOK {
northeastElement = northeastNeighbour
}
if northNeighbour, northNeighbourOK := northeastNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if eastNeighbour, eastNeighbourOK := northeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
northwestNeighbours, northwestNeighboursErr := Neighbours(s.northwestElement, []string{"northwest", "north", "west"})
if northwestNeighboursErr == nil {
if northwestNeighbour, northwestNeighbourOK := northwestNeighbours["northwest"]; northwestNeighbourOK {
northwestElement = northwestNeighbour
}
if northNeighbour, northNeighbourOK := northwestNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if westNeighbour, westNeighbourOK := northwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
southeastNeighbours, southeastNeighboursErr := Neighbours(s.southeastElement, []string{"southeast", "south", "east"})
if southeastNeighboursErr == nil {
if southeastNeighbour, southeastNeighbourOK := southeastNeighbours["southeast"]; southeastNeighbourOK {
southeastElement = southeastNeighbour
}
if southNeighbour, southNeighbourOK := southeastNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if eastNeighbour, eastNeighbourOK := southeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
southwestNeighbours, southwestNeighboursErr := Neighbours(s.southwestElement, []string{"southwest", "south", "west"})
if southwestNeighboursErr == nil {
if southwestNeighbour, southwestNeighbourOK := southwestNeighbours["southwest"]; southwestNeighbourOK {
southwestElement = southwestNeighbour
}
if southNeighbour, southNeighbourOK := southwestNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if westNeighbour, westNeighbourOK := southwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
return &square{
innerSqaure: s,
eastSide: extendEastSide,
southSide: extendSouthSide,
westSide: extendWestSide,
northSide: extendNorthSide,
northeastElement: northeastElement,
northwestElement: northwestElement,
southeastElement: southeastElement,
southwestElement: southwestElement,
}
}
func (s *square) listAllElements() *structure.Set {
set := structure.NewSet()
set.Add(s.northeastElement)
set.Add(s.northwestElement)
set.Add(s.southeastElement)
set.Add(s.southwestElement)
set = structure.Union(set, s.southSide, s.northSide, s.westSide, s.eastSide)
if s.innerSqaure == nil {
return set
}
return structure.Union(set, s.innerSqaure.listAllElements())
}
func newSquare(element string) *square {
return &square{
innerSqaure: nil,
eastSide: nil,
southSide: nil,
westSide: nil,
northSide: nil,
northeastElement: element,
northwestElement: element,
southeastElement: element,
southwestElement: element,
}
}
// Range indicates a float64 range marked by min val and max val
type Range struct {
minVal, maxVal float64
}
// GetMidVal return the average val of min and max
func (r *Range) GetMidVal() float64 {
return 0.5 * (r.minVal + r.maxVal)
}
// GetRangeDiff return the substract val of max and min
func (r *Range) GetRangeDiff() float64 {
return r.maxVal - r.minVal
}
// CheckInRange check the input param is in the range of min and max
func (r *Range) CheckInRange(val float64) bool {
if val >= r.minVal && val <= r.maxVal {
return true
}
return false
}
//encodePrecison indicates the encode and decode's precison degree.
// precision: defined as base32 encode string length,
// normnally precison increases in accordance with the encode string length
// latBinBits: defined as the binary encoding bits for latitude value
// lngBinBits: defined as the binary encoding bits for longtitude value
type encodePrecison struct {
precision, latBinBits, lngBinBits int
vagueRange float64
}
| base32ToBinary | identifier_name |
geohash.go | package geoutils
import (
"errors"
"fmt"
structure "github.com/eleme/clair/matrix/structure"
"math"
"strconv"
)
const (
base32 = "0123456789bcdefghjkmnpqrstuvwxyz"
)
var (
encodeParamMap = map[int]encodePrecison{
1: encodePrecison{precision: 1, latBinBits: 2, lngBinBits: 3, vagueRange: 2500000.0},
2: encodePrecison{precision: 2, latBinBits: 5, lngBinBits: 5, vagueRange: 630000.0},
3: encodePrecison{precision: 3, latBinBits: 7, lngBinBits: 8, vagueRange: 78000.0},
4: encodePrecison{precision: 4, latBinBits: 10, lngBinBits: 10, vagueRange: 20000.0},
5: encodePrecison{precision: 5, latBinBits: 12, lngBinBits: 13, vagueRange: 2400.0},
6: encodePrecison{precision: 6, latBinBits: 15, lngBinBits: 15, vagueRange: 610.0},
7: encodePrecison{precision: 7, latBinBits: 17, lngBinBits: 18, vagueRange: 76.0},
8: encodePrecison{precision: 8, latBinBits: 20, lngBinBits: 20, vagueRange: 19.11}, | }
globalLatitudeRange = Range{minVal: -90, maxVal: 90}
globalLongitudeRange = Range{minVal: -180, maxVal: 180}
base32Dict = func() []int {
baseDictResult := make([]int, 128)
for i, v := range base32 {
baseDictResult[v] = i
}
return baseDictResult
}()
)
func checkValidBase32(base32Bytes []byte) bool {
for _, base32Byte := range base32Bytes {
if base32Byte != '0' && base32Dict[base32Byte] == 0 {
return false
}
}
return true
}
func binaryToBase32(binaryBytes []byte) ([]byte, error) {
base32Len := (len(binaryBytes) + 4) / 5
resultBase32Bytes := make([]byte, base32Len)
tmpIntVal, err := strconv.ParseInt(string(binaryBytes), 2, 64)
if err != nil {
return nil, err
}
for i := 0; i < base32Len; i++ {
resultBase32Bytes[base32Len-i-1] = base32[tmpIntVal%32]
tmpIntVal /= 32
}
return resultBase32Bytes, nil
}
func base32ToBinary(base32Bytes []byte) ([]byte, error) {
if !checkValidBase32(base32Bytes) {
return []byte{}, errors.New("wrong format base32 bytes")
}
base32Len := len(base32Bytes)
var tmpIntVal int64
for i, v := range base32Bytes {
tmpIntVal += int64(base32Dict[v]) * int64(math.Pow(32, float64(base32Len-i-1)))
}
binaryBytes := []byte(fmt.Sprintf("%b", tmpIntVal))
autoPadding := make([]byte, base32Len*5-len(binaryBytes))
for i := range autoPadding {
autoPadding[i] = '0'
}
result := append(autoPadding, binaryBytes...)
return result, nil
}
//Encode transform the latitude and longtitude to a geohash string
func Encode(latitude, longitude float64, precison int) (string, error) {
findBinaryCode := func(r Range, val float64) (byte, Range) {
midVal := r.GetMidVal()
var binaryCode byte
var nextRange Range
if r.minVal <= val && val <= midVal {
binaryCode = '0'
nextRange = Range{minVal: r.minVal, maxVal: midVal}
} else {
binaryCode = '1'
nextRange = Range{minVal: midVal, maxVal: r.maxVal}
}
return binaryCode, nextRange
}
if !globalLatitudeRange.CheckInRange(latitude) || !globalLongitudeRange.CheckInRange(longitude) {
return "", errors.New("wrong params for latitude and longtitude")
}
precisonParam, ok := encodeParamMap[precison]
if !ok {
return "", errors.New("error precision param")
}
// encode latitude
latitudeBinaryEncode := make([]byte, precisonParam.latBinBits)
for i, latRange := 0, globalLatitudeRange; i < precisonParam.latBinBits; i++ {
latitudeCode, nextLatRange := findBinaryCode(latRange, latitude)
latRange = nextLatRange
latitudeBinaryEncode[i] = latitudeCode
}
// encode longtitude
longitudeBinaryEncode := make([]byte, precisonParam.lngBinBits)
for i, lngRange := 0, globalLongitudeRange; i < precisonParam.lngBinBits; i++ {
longitudeCode, nextLngRange := findBinaryCode(lngRange, longitude)
lngRange = nextLngRange
longitudeBinaryEncode[i] = longitudeCode
}
binaryEncode := make([]byte, precisonParam.latBinBits+precisonParam.lngBinBits)
// merge lat encode and lng encode
for i := 0; i < precisonParam.latBinBits+precisonParam.lngBinBits; i++ {
if i%2 == 0 {
binaryEncode[i] = longitudeBinaryEncode[int(i/2)]
} else {
binaryEncode[i] = latitudeBinaryEncode[int(i/2)]
}
}
base32Bytes, err := binaryToBase32(binaryEncode)
if err != nil {
return "", err
}
return string(base32Bytes), nil
}
//Decode transform the geohash string to a latitude && longitude location
func Decode(geohashStr string) (float64, float64, error) {
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return 0, 0, err
}
return latitudeRange.GetMidVal(), longtitudeRange.GetMidVal(), nil
}
func decodeToRange(geohashStr string) (*Range, *Range, error) {
getRange := func(r Range, binaryCodes []byte) *Range {
for _, binaryByte := range binaryCodes {
midVal := r.GetMidVal()
if binaryByte == '0' {
r.maxVal = midVal
} else {
r.minVal = midVal
}
}
return &Range{minVal: r.minVal, maxVal: r.maxVal}
}
binaryEncodes, err := base32ToBinary([]byte(geohashStr))
if err != nil {
return nil, nil, err
}
// longtitude encode index: even; latitude encode index: odd
var latitudeBinaryEncode []byte
var longitudeBinaryEncode []byte
for i, val := range binaryEncodes {
if i%2 == 0 {
longitudeBinaryEncode = append(longitudeBinaryEncode, val)
} else {
latitudeBinaryEncode = append(latitudeBinaryEncode, val)
}
}
return getRange(globalLatitudeRange, latitudeBinaryEncode), getRange(globalLongitudeRange, longitudeBinaryEncode), nil
}
//Neighbours find the neighbours geohash strings based on a center geohash.
//Specifically, the neighbourPos list indicates the precise selected postions.
//The input param geohashStr is the center postion.
//The neighbourhood postions is stipultaed as follows:
// --------------------------------------------
// | | | |
// | northwest | north | northeast |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | west | center | east |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | southwest | south | southeast |
// | | | |
// |--------------|--------------|------------|
func Neighbours(geohashStr string, neighbourPos []string) (map[string]string, error) {
findNeighbourShiftIndex := func(pos string) (int16, int16) {
switch pos {
case "northwest":
return 1, -1
case "north":
return 1, 0
case "northeast":
return 1, 1
case "west":
return 0, -1
case "center":
return 0, 0
case "east":
return 0, 1
case "southwest":
return -1, -1
case "south":
return -1, 0
case "southeast":
return -1, 1
default:
return 0, 0
}
}
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return make(map[string]string), err
}
lattitudeRangeDiff, longtitudeRangeDiff := latitudeRange.GetRangeDiff(), longtitudeRange.GetRangeDiff()
neighbours := make(map[string]string)
precision := len(geohashStr)
for _, pos := range neighbourPos {
latitudeShiftIndex, longitudeShiftIndex := findNeighbourShiftIndex(pos)
tmpEncode, tmpErr := Encode(
latitudeRange.GetMidVal()+float64(latitudeShiftIndex)*lattitudeRangeDiff, longtitudeRange.GetMidVal()+float64(longitudeShiftIndex)*longtitudeRangeDiff, precision)
if tmpErr == nil {
neighbours[pos] = tmpEncode
}
}
return neighbours, nil
}
// Nearby returns the nearby geohash string list within the certain area
func Nearby(geohashStr string, radius int32) ([]string, error) {
if len(geohashStr) > 7 {
return []string{}, errors.New("geohash length cannot larger than 7")
}
precisonParam, ok := encodeParamMap[len(geohashStr)]
if !ok {
return []string{}, errors.New("error precision param")
}
extendLayer := int32(math.Ceil(float64(radius) / precisonParam.vagueRange))
var curLayer int32
s := newSquare(geohashStr)
for {
if curLayer >= extendLayer {
break
}
s = s.extendSqaure()
curLayer++
}
var result []string
allElementSet := s.listAllElements()
for _, v := range allElementSet.All() {
nearbyElement := v.(string)
if dis, err := geohashDistance(nearbyElement, geohashStr); err == nil && dis <= float64(radius) {
result = append(result, nearbyElement)
}
}
return result, nil
}
func geohashDistance(fromGeohash, toGeohash string) (float64, error) {
if fromLat, fromLng, err := Decode(fromGeohash); err == nil {
if toLat, toLng, err := Decode(toGeohash); err == nil {
fromPoint := NewLocation(fromLat, fromLng)
toPoint := NewLocation(toLat, toLng)
return fromPoint.EuclideanDistance(toPoint), nil
}
}
return -1, errors.New("Invalid geohash")
}
type square struct {
innerSqaure *square
eastSide, southSide, westSide, northSide *structure.Set
northeastElement, northwestElement, southeastElement, southwestElement string
}
func (s *square) extendSqaure() *square {
extendDirectionSide := func(extendSet *structure.Set, sqaureSideSet *structure.Set, direction string) {
sideSet := sqaureSideSet.All()
for _, side := range sideSet {
neighbour, tmpErr := Neighbours(side.(string), []string{direction})
if tmpErr == nil {
tmpNeighbour, tmpOK := neighbour[direction]
if tmpOK {
extendSet.Add(tmpNeighbour)
}
}
}
}
extendEastSide := structure.NewSet()
extendSouthSide := structure.NewSet()
extendWestSide := structure.NewSet()
extendNorthSide := structure.NewSet()
var northeastElement, northwestElement, southeastElement, southwestElement string
if s.eastSide != nil {
extendDirectionSide(extendEastSide, s.eastSide, "east")
}
if s.southSide != nil {
extendDirectionSide(extendSouthSide, s.southSide, "south")
}
if s.westSide != nil {
extendDirectionSide(extendWestSide, s.westSide, "west")
}
if s.northSide != nil {
extendDirectionSide(extendNorthSide, s.northSide, "north")
}
northeastNeighbours, northeastNeighboursErr := Neighbours(s.northeastElement, []string{"northeast", "north", "east"})
if northeastNeighboursErr == nil {
if northeastNeighbour, northeastNeighbourOK := northeastNeighbours["northeast"]; northeastNeighbourOK {
northeastElement = northeastNeighbour
}
if northNeighbour, northNeighbourOK := northeastNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if eastNeighbour, eastNeighbourOK := northeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
northwestNeighbours, northwestNeighboursErr := Neighbours(s.northwestElement, []string{"northwest", "north", "west"})
if northwestNeighboursErr == nil {
if northwestNeighbour, northwestNeighbourOK := northwestNeighbours["northwest"]; northwestNeighbourOK {
northwestElement = northwestNeighbour
}
if northNeighbour, northNeighbourOK := northwestNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if westNeighbour, westNeighbourOK := northwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
southeastNeighbours, southeastNeighboursErr := Neighbours(s.southeastElement, []string{"southeast", "south", "east"})
if southeastNeighboursErr == nil {
if southeastNeighbour, southeastNeighbourOK := southeastNeighbours["southeast"]; southeastNeighbourOK {
southeastElement = southeastNeighbour
}
if southNeighbour, southNeighbourOK := southeastNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if eastNeighbour, eastNeighbourOK := southeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
southwestNeighbours, southwestNeighboursErr := Neighbours(s.southwestElement, []string{"southwest", "south", "west"})
if southwestNeighboursErr == nil {
if southwestNeighbour, southwestNeighbourOK := southwestNeighbours["southwest"]; southwestNeighbourOK {
southwestElement = southwestNeighbour
}
if southNeighbour, southNeighbourOK := southwestNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if westNeighbour, westNeighbourOK := southwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
return &square{
innerSqaure: s,
eastSide: extendEastSide,
southSide: extendSouthSide,
westSide: extendWestSide,
northSide: extendNorthSide,
northeastElement: northeastElement,
northwestElement: northwestElement,
southeastElement: southeastElement,
southwestElement: southwestElement,
}
}
func (s *square) listAllElements() *structure.Set {
set := structure.NewSet()
set.Add(s.northeastElement)
set.Add(s.northwestElement)
set.Add(s.southeastElement)
set.Add(s.southwestElement)
set = structure.Union(set, s.southSide, s.northSide, s.westSide, s.eastSide)
if s.innerSqaure == nil {
return set
}
return structure.Union(set, s.innerSqaure.listAllElements())
}
func newSquare(element string) *square {
return &square{
innerSqaure: nil,
eastSide: nil,
southSide: nil,
westSide: nil,
northSide: nil,
northeastElement: element,
northwestElement: element,
southeastElement: element,
southwestElement: element,
}
}
// Range indicates a float64 range marked by min val and max val
type Range struct {
minVal, maxVal float64
}
// GetMidVal return the average val of min and max
func (r *Range) GetMidVal() float64 {
return 0.5 * (r.minVal + r.maxVal)
}
// GetRangeDiff return the substract val of max and min
func (r *Range) GetRangeDiff() float64 {
return r.maxVal - r.minVal
}
// CheckInRange check the input param is in the range of min and max
func (r *Range) CheckInRange(val float64) bool {
if val >= r.minVal && val <= r.maxVal {
return true
}
return false
}
//encodePrecison indicates the encode and decode's precison degree.
// precision: defined as base32 encode string length,
// normnally precison increases in accordance with the encode string length
// latBinBits: defined as the binary encoding bits for latitude value
// lngBinBits: defined as the binary encoding bits for longtitude value
type encodePrecison struct {
precision, latBinBits, lngBinBits int
vagueRange float64
} | 9: encodePrecison{precision: 9, latBinBits: 22, lngBinBits: 23, vagueRange: 4.78},
10: encodePrecison{precision: 10, latBinBits: 25, lngBinBits: 25, vagueRange: 0.59},
11: encodePrecison{precision: 11, latBinBits: 27, lngBinBits: 28, vagueRange: 0.15},
12: encodePrecison{precision: 12, latBinBits: 30, lngBinBits: 30, vagueRange: 0.01}, | random_line_split |
geohash.go | package geoutils
import (
"errors"
"fmt"
structure "github.com/eleme/clair/matrix/structure"
"math"
"strconv"
)
const (
base32 = "0123456789bcdefghjkmnpqrstuvwxyz"
)
var (
encodeParamMap = map[int]encodePrecison{
1: encodePrecison{precision: 1, latBinBits: 2, lngBinBits: 3, vagueRange: 2500000.0},
2: encodePrecison{precision: 2, latBinBits: 5, lngBinBits: 5, vagueRange: 630000.0},
3: encodePrecison{precision: 3, latBinBits: 7, lngBinBits: 8, vagueRange: 78000.0},
4: encodePrecison{precision: 4, latBinBits: 10, lngBinBits: 10, vagueRange: 20000.0},
5: encodePrecison{precision: 5, latBinBits: 12, lngBinBits: 13, vagueRange: 2400.0},
6: encodePrecison{precision: 6, latBinBits: 15, lngBinBits: 15, vagueRange: 610.0},
7: encodePrecison{precision: 7, latBinBits: 17, lngBinBits: 18, vagueRange: 76.0},
8: encodePrecison{precision: 8, latBinBits: 20, lngBinBits: 20, vagueRange: 19.11},
9: encodePrecison{precision: 9, latBinBits: 22, lngBinBits: 23, vagueRange: 4.78},
10: encodePrecison{precision: 10, latBinBits: 25, lngBinBits: 25, vagueRange: 0.59},
11: encodePrecison{precision: 11, latBinBits: 27, lngBinBits: 28, vagueRange: 0.15},
12: encodePrecison{precision: 12, latBinBits: 30, lngBinBits: 30, vagueRange: 0.01},
}
globalLatitudeRange = Range{minVal: -90, maxVal: 90}
globalLongitudeRange = Range{minVal: -180, maxVal: 180}
base32Dict = func() []int {
baseDictResult := make([]int, 128)
for i, v := range base32 {
baseDictResult[v] = i
}
return baseDictResult
}()
)
func checkValidBase32(base32Bytes []byte) bool {
for _, base32Byte := range base32Bytes {
if base32Byte != '0' && base32Dict[base32Byte] == 0 {
return false
}
}
return true
}
func binaryToBase32(binaryBytes []byte) ([]byte, error) {
base32Len := (len(binaryBytes) + 4) / 5
resultBase32Bytes := make([]byte, base32Len)
tmpIntVal, err := strconv.ParseInt(string(binaryBytes), 2, 64)
if err != nil {
return nil, err
}
for i := 0; i < base32Len; i++ {
resultBase32Bytes[base32Len-i-1] = base32[tmpIntVal%32]
tmpIntVal /= 32
}
return resultBase32Bytes, nil
}
func base32ToBinary(base32Bytes []byte) ([]byte, error) {
if !checkValidBase32(base32Bytes) {
return []byte{}, errors.New("wrong format base32 bytes")
}
base32Len := len(base32Bytes)
var tmpIntVal int64
for i, v := range base32Bytes {
tmpIntVal += int64(base32Dict[v]) * int64(math.Pow(32, float64(base32Len-i-1)))
}
binaryBytes := []byte(fmt.Sprintf("%b", tmpIntVal))
autoPadding := make([]byte, base32Len*5-len(binaryBytes))
for i := range autoPadding {
autoPadding[i] = '0'
}
result := append(autoPadding, binaryBytes...)
return result, nil
}
//Encode transform the latitude and longtitude to a geohash string
func Encode(latitude, longitude float64, precison int) (string, error) {
findBinaryCode := func(r Range, val float64) (byte, Range) {
midVal := r.GetMidVal()
var binaryCode byte
var nextRange Range
if r.minVal <= val && val <= midVal {
binaryCode = '0'
nextRange = Range{minVal: r.minVal, maxVal: midVal}
} else {
binaryCode = '1'
nextRange = Range{minVal: midVal, maxVal: r.maxVal}
}
return binaryCode, nextRange
}
if !globalLatitudeRange.CheckInRange(latitude) || !globalLongitudeRange.CheckInRange(longitude) {
return "", errors.New("wrong params for latitude and longtitude")
}
precisonParam, ok := encodeParamMap[precison]
if !ok {
return "", errors.New("error precision param")
}
// encode latitude
latitudeBinaryEncode := make([]byte, precisonParam.latBinBits)
for i, latRange := 0, globalLatitudeRange; i < precisonParam.latBinBits; i++ {
latitudeCode, nextLatRange := findBinaryCode(latRange, latitude)
latRange = nextLatRange
latitudeBinaryEncode[i] = latitudeCode
}
// encode longtitude
longitudeBinaryEncode := make([]byte, precisonParam.lngBinBits)
for i, lngRange := 0, globalLongitudeRange; i < precisonParam.lngBinBits; i++ {
longitudeCode, nextLngRange := findBinaryCode(lngRange, longitude)
lngRange = nextLngRange
longitudeBinaryEncode[i] = longitudeCode
}
binaryEncode := make([]byte, precisonParam.latBinBits+precisonParam.lngBinBits)
// merge lat encode and lng encode
for i := 0; i < precisonParam.latBinBits+precisonParam.lngBinBits; i++ {
if i%2 == 0 {
binaryEncode[i] = longitudeBinaryEncode[int(i/2)]
} else {
binaryEncode[i] = latitudeBinaryEncode[int(i/2)]
}
}
base32Bytes, err := binaryToBase32(binaryEncode)
if err != nil {
return "", err
}
return string(base32Bytes), nil
}
//Decode transform the geohash string to a latitude && longitude location
func Decode(geohashStr string) (float64, float64, error) {
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return 0, 0, err
}
return latitudeRange.GetMidVal(), longtitudeRange.GetMidVal(), nil
}
func decodeToRange(geohashStr string) (*Range, *Range, error) {
getRange := func(r Range, binaryCodes []byte) *Range {
for _, binaryByte := range binaryCodes {
midVal := r.GetMidVal()
if binaryByte == '0' {
r.maxVal = midVal
} else {
r.minVal = midVal
}
}
return &Range{minVal: r.minVal, maxVal: r.maxVal}
}
binaryEncodes, err := base32ToBinary([]byte(geohashStr))
if err != nil {
return nil, nil, err
}
// longtitude encode index: even; latitude encode index: odd
var latitudeBinaryEncode []byte
var longitudeBinaryEncode []byte
for i, val := range binaryEncodes {
if i%2 == 0 {
longitudeBinaryEncode = append(longitudeBinaryEncode, val)
} else {
latitudeBinaryEncode = append(latitudeBinaryEncode, val)
}
}
return getRange(globalLatitudeRange, latitudeBinaryEncode), getRange(globalLongitudeRange, longitudeBinaryEncode), nil
}
//Neighbours find the neighbours geohash strings based on a center geohash.
//Specifically, the neighbourPos list indicates the precise selected postions.
//The input param geohashStr is the center postion.
//The neighbourhood postions is stipultaed as follows:
// --------------------------------------------
// | | | |
// | northwest | north | northeast |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | west | center | east |
// | | | |
// |--------------|--------------|------------|
// | | | |
// | southwest | south | southeast |
// | | | |
// |--------------|--------------|------------|
func Neighbours(geohashStr string, neighbourPos []string) (map[string]string, error) {
findNeighbourShiftIndex := func(pos string) (int16, int16) {
switch pos {
case "northwest":
return 1, -1
case "north":
return 1, 0
case "northeast":
return 1, 1
case "west":
return 0, -1
case "center":
return 0, 0
case "east":
return 0, 1
case "southwest":
return -1, -1
case "south":
return -1, 0
case "southeast":
return -1, 1
default:
return 0, 0
}
}
latitudeRange, longtitudeRange, err := decodeToRange(geohashStr)
if err != nil {
return make(map[string]string), err
}
lattitudeRangeDiff, longtitudeRangeDiff := latitudeRange.GetRangeDiff(), longtitudeRange.GetRangeDiff()
neighbours := make(map[string]string)
precision := len(geohashStr)
for _, pos := range neighbourPos {
latitudeShiftIndex, longitudeShiftIndex := findNeighbourShiftIndex(pos)
tmpEncode, tmpErr := Encode(
latitudeRange.GetMidVal()+float64(latitudeShiftIndex)*lattitudeRangeDiff, longtitudeRange.GetMidVal()+float64(longitudeShiftIndex)*longtitudeRangeDiff, precision)
if tmpErr == nil {
neighbours[pos] = tmpEncode
}
}
return neighbours, nil
}
// Nearby returns the nearby geohash string list within the certain area
func Nearby(geohashStr string, radius int32) ([]string, error) {
if len(geohashStr) > 7 {
return []string{}, errors.New("geohash length cannot larger than 7")
}
precisonParam, ok := encodeParamMap[len(geohashStr)]
if !ok {
return []string{}, errors.New("error precision param")
}
extendLayer := int32(math.Ceil(float64(radius) / precisonParam.vagueRange))
var curLayer int32
s := newSquare(geohashStr)
for {
if curLayer >= extendLayer {
break
}
s = s.extendSqaure()
curLayer++
}
var result []string
allElementSet := s.listAllElements()
for _, v := range allElementSet.All() {
nearbyElement := v.(string)
if dis, err := geohashDistance(nearbyElement, geohashStr); err == nil && dis <= float64(radius) {
result = append(result, nearbyElement)
}
}
return result, nil
}
func geohashDistance(fromGeohash, toGeohash string) (float64, error) |
type square struct {
innerSqaure *square
eastSide, southSide, westSide, northSide *structure.Set
northeastElement, northwestElement, southeastElement, southwestElement string
}
func (s *square) extendSqaure() *square {
extendDirectionSide := func(extendSet *structure.Set, sqaureSideSet *structure.Set, direction string) {
sideSet := sqaureSideSet.All()
for _, side := range sideSet {
neighbour, tmpErr := Neighbours(side.(string), []string{direction})
if tmpErr == nil {
tmpNeighbour, tmpOK := neighbour[direction]
if tmpOK {
extendSet.Add(tmpNeighbour)
}
}
}
}
extendEastSide := structure.NewSet()
extendSouthSide := structure.NewSet()
extendWestSide := structure.NewSet()
extendNorthSide := structure.NewSet()
var northeastElement, northwestElement, southeastElement, southwestElement string
if s.eastSide != nil {
extendDirectionSide(extendEastSide, s.eastSide, "east")
}
if s.southSide != nil {
extendDirectionSide(extendSouthSide, s.southSide, "south")
}
if s.westSide != nil {
extendDirectionSide(extendWestSide, s.westSide, "west")
}
if s.northSide != nil {
extendDirectionSide(extendNorthSide, s.northSide, "north")
}
northeastNeighbours, northeastNeighboursErr := Neighbours(s.northeastElement, []string{"northeast", "north", "east"})
if northeastNeighboursErr == nil {
if northeastNeighbour, northeastNeighbourOK := northeastNeighbours["northeast"]; northeastNeighbourOK {
northeastElement = northeastNeighbour
}
if northNeighbour, northNeighbourOK := northeastNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if eastNeighbour, eastNeighbourOK := northeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
northwestNeighbours, northwestNeighboursErr := Neighbours(s.northwestElement, []string{"northwest", "north", "west"})
if northwestNeighboursErr == nil {
if northwestNeighbour, northwestNeighbourOK := northwestNeighbours["northwest"]; northwestNeighbourOK {
northwestElement = northwestNeighbour
}
if northNeighbour, northNeighbourOK := northwestNeighbours["north"]; northNeighbourOK {
extendNorthSide.Add(northNeighbour)
}
if westNeighbour, westNeighbourOK := northwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
southeastNeighbours, southeastNeighboursErr := Neighbours(s.southeastElement, []string{"southeast", "south", "east"})
if southeastNeighboursErr == nil {
if southeastNeighbour, southeastNeighbourOK := southeastNeighbours["southeast"]; southeastNeighbourOK {
southeastElement = southeastNeighbour
}
if southNeighbour, southNeighbourOK := southeastNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if eastNeighbour, eastNeighbourOK := southeastNeighbours["east"]; eastNeighbourOK {
extendEastSide.Add(eastNeighbour)
}
}
southwestNeighbours, southwestNeighboursErr := Neighbours(s.southwestElement, []string{"southwest", "south", "west"})
if southwestNeighboursErr == nil {
if southwestNeighbour, southwestNeighbourOK := southwestNeighbours["southwest"]; southwestNeighbourOK {
southwestElement = southwestNeighbour
}
if southNeighbour, southNeighbourOK := southwestNeighbours["south"]; southNeighbourOK {
extendSouthSide.Add(southNeighbour)
}
if westNeighbour, westNeighbourOK := southwestNeighbours["west"]; westNeighbourOK {
extendWestSide.Add(westNeighbour)
}
}
return &square{
innerSqaure: s,
eastSide: extendEastSide,
southSide: extendSouthSide,
westSide: extendWestSide,
northSide: extendNorthSide,
northeastElement: northeastElement,
northwestElement: northwestElement,
southeastElement: southeastElement,
southwestElement: southwestElement,
}
}
func (s *square) listAllElements() *structure.Set {
set := structure.NewSet()
set.Add(s.northeastElement)
set.Add(s.northwestElement)
set.Add(s.southeastElement)
set.Add(s.southwestElement)
set = structure.Union(set, s.southSide, s.northSide, s.westSide, s.eastSide)
if s.innerSqaure == nil {
return set
}
return structure.Union(set, s.innerSqaure.listAllElements())
}
func newSquare(element string) *square {
return &square{
innerSqaure: nil,
eastSide: nil,
southSide: nil,
westSide: nil,
northSide: nil,
northeastElement: element,
northwestElement: element,
southeastElement: element,
southwestElement: element,
}
}
// Range indicates a float64 range marked by min val and max val
type Range struct {
minVal, maxVal float64
}
// GetMidVal return the average val of min and max
func (r *Range) GetMidVal() float64 {
return 0.5 * (r.minVal + r.maxVal)
}
// GetRangeDiff return the substract val of max and min
func (r *Range) GetRangeDiff() float64 {
return r.maxVal - r.minVal
}
// CheckInRange check the input param is in the range of min and max
func (r *Range) CheckInRange(val float64) bool {
if val >= r.minVal && val <= r.maxVal {
return true
}
return false
}
//encodePrecison indicates the encode and decode's precison degree.
// precision: defined as base32 encode string length,
// normnally precison increases in accordance with the encode string length
// latBinBits: defined as the binary encoding bits for latitude value
// lngBinBits: defined as the binary encoding bits for longtitude value
type encodePrecison struct {
precision, latBinBits, lngBinBits int
vagueRange float64
}
| {
if fromLat, fromLng, err := Decode(fromGeohash); err == nil {
if toLat, toLng, err := Decode(toGeohash); err == nil {
fromPoint := NewLocation(fromLat, fromLng)
toPoint := NewLocation(toLat, toLng)
return fromPoint.EuclideanDistance(toPoint), nil
}
}
return -1, errors.New("Invalid geohash")
} | identifier_body |
thanos.py | ##t STOCK DEVATION TRACKER
## Tracks the tails in the devation of price from the 200dayMA
## Charts the 50day and 200day views of price versus the 200dayMA
## Sends an email when the deviation has moved beyond the 15% or 85% percentile (on z-score)
## 06/23/2020
## modified the script to take a universe file on a string of stocks at the commmand line
## it also creates a composite file that gives all the current metrics fro the universe being analyzed.
## Usage: python3 thanos.py <universe_file OR 'sym1,sym2,sym3'> <tag to save the file>
## i.e. python3 thanos.py new_universe.txt ./composites/new_univ ## composite saved to ./composites/new_univ_{date}.csv
## python3 thanos.py 'IVV,SPY,IAU' ## dataframes just dumped to stdout
## python3 thanos.py 'IVV,SPY,AAPL' ./composites/mylist ## composites saved to ./composites/mylist_{date}.csv
import mail_client
import math, datetime
import pandas
import pathlib
import numpy as np
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.stats as st
import os, sys, time
import alpha_vantage_data as avd
sns.set()
## universe file for all symbols monitored
## one symbol per line
HOME = '/home/jcarter/sandbox/trading/thanos'
UNIVERSE_FILE = f'{HOME}/universe.txt'
THANOS_DATA = f'{HOME}/thanos_data/'
THANOS_CHARTS = f'{HOME}/thanos_charts/'
THANOS_LOG = f'{HOME}/thanos_data/thanos_log.csv'
## table formating
HEADER_STYLE = r'<th style="padding-top: 10px; padding-bottom: 8px; color: #D8D8D8;" align="center" bgcolor="#2F4F4F">'
ROW_STYLE = r'<td style="padding: 8px 8px 6px; border: 1px solid #4f6228; font-size: 12">'
EMAIL_HEADER = """
This is a simple tracker of the current price deviation to the ma200
Where:
Deviation = log( price/ma200 )
Zscore = [Current Deviation - 100dayAverage(Deviation)] / 100daySTDEV(Deviation)
Downside Tails ( prob < 0.15 ): Signal good long term entry points
Upside Tails ( prob > 0.85 ): Signal good points to secure profits (take off % of current longs OR seek options as hedge)
"""
def get_data(symbol):
## sleep between data calls - only allowed 5 calls per minute and 500 calls per day.
time.sleep(13)
data = None
try:
hist_file = THANOS_DATA + f'thanos_{symbol}.csv'
data = avd.update_data(symbol,hist_file)
except:
pass
return data
def cvt_date(date_obj):
if isinstance(date_obj, basestring):
return dt.datetime.strptime(date_obj,"%Y-%m-%d").date()
else:
return date_obj.date()
def plot(symbol, dts, a, b):
return None
current_date = dts.iloc[-1]
for i,N in enumerate([50, 200],1):
dates = dts.tail(N)
s1 = a.tail(N)
s2 = b.tail(N)
plt.subplot(1,2,i)
plt.title("%s %s: %d days" % (symbol,current_date,N))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m_%d'))
#u = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in dates.tolist()]
u = [cvt_date(d) for d in dates.tolist()]
plt.plot(u, s1)
plt.plot(u, s2)
plt.gcf().autofmt_xdate()
return plt
def send_alert(symbol,df,low_bound,high_bound):
## check for a significant deviation within the last 5 days
## if so - send an email!!
snapshot = df.tail(5)
current_stats = df.tail(1).iloc[0].apply(str).tolist()
# initilaize log file if one does not exist
if not os.path.isfile(THANOS_LOG):
with open(THANOS_LOG,'w') as f:
header = df.columns.tolist() + ['symbol','ext']
f.write(",".join(header)+"\n")
vals = snapshot['prob'].tolist()
hi_val, low_val = max(vals), min(vals)
if low_val < low_bound or hi_val > high_bound:
hi_idx = low_idx = -1
if hi_val > high_bound: hi_idx = vals.index(hi_val)
if low_val < low_bound: low_idx = vals.index(low_val)
side = 'BOTTOM' if low_idx > hi_idx else 'TOP'
ffmt = "{:,.4f}".format
table = snapshot.to_html(float_format=ffmt)
table = table.replace('<th>',HEADER_STYLE)
table = table.replace('<td>',ROW_STYLE)
subj = f'{symbol} {side} Deviation!!!'
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table,attach=chart_file)
#mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table)
todays_prob = float(current_stats[-1])
if todays_prob < low_bound or todays_prob > high_bound:
current_stats.append(symbol)
# flag extension type
side = 'BOTTOM'
if todays_prob > high_bound:
side = 'TOP'
current_stats.append(side)
outs = ",".join(current_stats)
with open(THANOS_LOG,'a') as f:
f.write(outs + '\n')
def send_heartbeat(uni):
def modification_date(filename):
t = os.path.getmtime(filename)
return dt.datetime.fromtimestamp(t)
uni_list = "\nUniverse:\n" + "\n".join(uni)
now = dt.datetime.now()
try:
most_recent = modification_date(THANOS_LOG)
delta = (now - most_recent).days
if delta > 0 and delta % 5 == 0:
message = f'Most recent update = {most_recent} ({delta} days)\n' + uni_list | mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat FAILURE! Check Process!',text=uni_list)
def get_universe(universe_fn):
univ = [line.strip() for line in open(universe_fn)]
return univ
def thanosize(symbol,df,show_charts=False):
CLOSE = 'close'
#CLOSE = 'adjusted_close'
## calc ma200
df['ma200'] = df[CLOSE].rolling(200).mean()
## current deviation from moving average
df['dev'] = (df[CLOSE]/df['ma200']).map(math.log)
## calc Zscore
df['Zscore'] = (df['dev'] - df['dev'].rolling(100).mean())/df['dev'].rolling(100).std()
df['prob'] = df['Zscore'].map(st.norm.cdf)
## 20 day low
df['low20'] = df['low'].rolling(20).min()
## new 200day high
df['hi200'] = df['high'].rolling(200).max()
df['NewH'] = df.apply(lambda x: 'NH' if x['high'] >= x['hi200'] else '',axis=1)
## daily price volatility stats
df['prev'] = df['close'].shift(1)
df['tr'] = df.apply(lambda x: max(x['high'] - x['low'],x['high']-x['prev'],x['prev']-x['low']),axis=1)
df['atr20'] = df['tr'].rolling(20).mean()
df['ATRsToLow'] = (df['close'] - df['low20'])/df['atr20']
df['threeATRs'] = 3 * df['atr20']
## 20day price change stats
df['mo20'] = df[CLOSE]-df[CLOSE].shift(20)
## standard dev of 20day price change
df['stdMove'] = df['mo20'].rolling(100).std()
zz = df[['date',CLOSE,'high','ma200','dev','Zscore','prob','NewH','low20','ATRsToLow','atr20','threeATRs','stdMove']]
zfile = THANOS_DATA + f'thanos_{symbol}_ztest.csv'
zz.to_csv(zfile,index=False)
## save the chart and clear it.
chart = None
if show_charts:
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
chart = plot(symbol,zz.date,zz[CLOSE],zz.ma200)
if chart is not None: chart.savefig(chart_file)
return zz,chart
def evaluate(symbol):
tagged = chart = None
#try:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
return tagged, chart
zz, chart = thanosize(symbol,df)
## tag each dataframe with the symbol evaluated
tagged = zz.copy()
tagged['symbol'] = symbol
#except:
# print(f'ERROR: Failed to analyze {symbol}.')
return tagged, chart
## created to do passive / email based monitoring
def monitor_universe(universe_fn):
uni = get_universe(universe_fn)
for symbol in uni:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
else:
zz, chart = thanosize(symbol,df)
if chart is not None: chart.clf()
# send_alert(symbol,zz,0.15,0.85)
# send heartbeat every 5 days since last update
# send_heartbeat(uni)
if __name__ == "__main__":
## usage: python thanos.py <optional symbol>
## if no symbol given - does monitoring on all the names the universe file thanos.csv
## with a symbol given - does all the analytics, dumps the output and chart the data
symbol_list = None
composite = list()
header = None
if len(sys.argv) > 1:
## use a universe file
if pathlib.Path(sys.argv[1]).is_file():
symbol_list = get_universe(sys.argv[1])
else:
## or use commna separated string
symbol_list = sys.argv[1].split(',')
for symbol in symbol_list:
print(f'running thanos on: {symbol}')
metrics, chart = evaluate(symbol)
if metrics is not None:
if header is None: header = metrics.columns
print(metrics.tail(10))
if len(composite) < 500:
## alpha_vantage FREE service limits you to 500 calls a day.
composite.append(metrics.iloc[-1].tolist())
else:
print('cannot append {symbol} - 500 symbol data retrieval limit exceeded')
#if chart is not None: chart.show()
comp_df = pandas.DataFrame(columns=header,data=composite)
comp_df['Composite'] = True
comp_df = comp_df.sort_values(['Zscore'])
print("\nComposite Table:")
print(comp_df)
if comp_df is not None:
## save composite file to the desired directory
destination= './'
if len(sys.argv) > 2: destination = sys.argv[2] + "/"
curr_date = datetime.datetime.now().date().strftime("%Y%m%d")
if not pathlib.Path(destination).exists():
import os
os.makedirs(destination)
current_comp_file = f'{destination}{curr_date}.csv'
comp_df.to_csv(current_comp_file,index=False) | mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat!',text=message)
except: | random_line_split |
thanos.py |
##t STOCK DEVATION TRACKER
## Tracks the tails in the devation of price from the 200dayMA
## Charts the 50day and 200day views of price versus the 200dayMA
## Sends an email when the deviation has moved beyond the 15% or 85% percentile (on z-score)
## 06/23/2020
## modified the script to take a universe file on a string of stocks at the commmand line
## it also creates a composite file that gives all the current metrics fro the universe being analyzed.
## Usage: python3 thanos.py <universe_file OR 'sym1,sym2,sym3'> <tag to save the file>
## i.e. python3 thanos.py new_universe.txt ./composites/new_univ ## composite saved to ./composites/new_univ_{date}.csv
## python3 thanos.py 'IVV,SPY,IAU' ## dataframes just dumped to stdout
## python3 thanos.py 'IVV,SPY,AAPL' ./composites/mylist ## composites saved to ./composites/mylist_{date}.csv
import mail_client
import math, datetime
import pandas
import pathlib
import numpy as np
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.stats as st
import os, sys, time
import alpha_vantage_data as avd
sns.set()
## universe file for all symbols monitored
## one symbol per line
HOME = '/home/jcarter/sandbox/trading/thanos'
UNIVERSE_FILE = f'{HOME}/universe.txt'
THANOS_DATA = f'{HOME}/thanos_data/'
THANOS_CHARTS = f'{HOME}/thanos_charts/'
THANOS_LOG = f'{HOME}/thanos_data/thanos_log.csv'
## table formating
HEADER_STYLE = r'<th style="padding-top: 10px; padding-bottom: 8px; color: #D8D8D8;" align="center" bgcolor="#2F4F4F">'
ROW_STYLE = r'<td style="padding: 8px 8px 6px; border: 1px solid #4f6228; font-size: 12">'
EMAIL_HEADER = """
This is a simple tracker of the current price deviation to the ma200
Where:
Deviation = log( price/ma200 )
Zscore = [Current Deviation - 100dayAverage(Deviation)] / 100daySTDEV(Deviation)
Downside Tails ( prob < 0.15 ): Signal good long term entry points
Upside Tails ( prob > 0.85 ): Signal good points to secure profits (take off % of current longs OR seek options as hedge)
"""
def get_data(symbol):
## sleep between data calls - only allowed 5 calls per minute and 500 calls per day.
time.sleep(13)
data = None
try:
hist_file = THANOS_DATA + f'thanos_{symbol}.csv'
data = avd.update_data(symbol,hist_file)
except:
pass
return data
def cvt_date(date_obj):
if isinstance(date_obj, basestring):
return dt.datetime.strptime(date_obj,"%Y-%m-%d").date()
else:
return date_obj.date()
def plot(symbol, dts, a, b):
return None
current_date = dts.iloc[-1]
for i,N in enumerate([50, 200],1):
dates = dts.tail(N)
s1 = a.tail(N)
s2 = b.tail(N)
plt.subplot(1,2,i)
plt.title("%s %s: %d days" % (symbol,current_date,N))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m_%d'))
#u = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in dates.tolist()]
u = [cvt_date(d) for d in dates.tolist()]
plt.plot(u, s1)
plt.plot(u, s2)
plt.gcf().autofmt_xdate()
return plt
def | (symbol,df,low_bound,high_bound):
## check for a significant deviation within the last 5 days
## if so - send an email!!
snapshot = df.tail(5)
current_stats = df.tail(1).iloc[0].apply(str).tolist()
# initilaize log file if one does not exist
if not os.path.isfile(THANOS_LOG):
with open(THANOS_LOG,'w') as f:
header = df.columns.tolist() + ['symbol','ext']
f.write(",".join(header)+"\n")
vals = snapshot['prob'].tolist()
hi_val, low_val = max(vals), min(vals)
if low_val < low_bound or hi_val > high_bound:
hi_idx = low_idx = -1
if hi_val > high_bound: hi_idx = vals.index(hi_val)
if low_val < low_bound: low_idx = vals.index(low_val)
side = 'BOTTOM' if low_idx > hi_idx else 'TOP'
ffmt = "{:,.4f}".format
table = snapshot.to_html(float_format=ffmt)
table = table.replace('<th>',HEADER_STYLE)
table = table.replace('<td>',ROW_STYLE)
subj = f'{symbol} {side} Deviation!!!'
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table,attach=chart_file)
#mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table)
todays_prob = float(current_stats[-1])
if todays_prob < low_bound or todays_prob > high_bound:
current_stats.append(symbol)
# flag extension type
side = 'BOTTOM'
if todays_prob > high_bound:
side = 'TOP'
current_stats.append(side)
outs = ",".join(current_stats)
with open(THANOS_LOG,'a') as f:
f.write(outs + '\n')
def send_heartbeat(uni):
def modification_date(filename):
t = os.path.getmtime(filename)
return dt.datetime.fromtimestamp(t)
uni_list = "\nUniverse:\n" + "\n".join(uni)
now = dt.datetime.now()
try:
most_recent = modification_date(THANOS_LOG)
delta = (now - most_recent).days
if delta > 0 and delta % 5 == 0:
message = f'Most recent update = {most_recent} ({delta} days)\n' + uni_list
mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat!',text=message)
except:
mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat FAILURE! Check Process!',text=uni_list)
def get_universe(universe_fn):
univ = [line.strip() for line in open(universe_fn)]
return univ
def thanosize(symbol,df,show_charts=False):
CLOSE = 'close'
#CLOSE = 'adjusted_close'
## calc ma200
df['ma200'] = df[CLOSE].rolling(200).mean()
## current deviation from moving average
df['dev'] = (df[CLOSE]/df['ma200']).map(math.log)
## calc Zscore
df['Zscore'] = (df['dev'] - df['dev'].rolling(100).mean())/df['dev'].rolling(100).std()
df['prob'] = df['Zscore'].map(st.norm.cdf)
## 20 day low
df['low20'] = df['low'].rolling(20).min()
## new 200day high
df['hi200'] = df['high'].rolling(200).max()
df['NewH'] = df.apply(lambda x: 'NH' if x['high'] >= x['hi200'] else '',axis=1)
## daily price volatility stats
df['prev'] = df['close'].shift(1)
df['tr'] = df.apply(lambda x: max(x['high'] - x['low'],x['high']-x['prev'],x['prev']-x['low']),axis=1)
df['atr20'] = df['tr'].rolling(20).mean()
df['ATRsToLow'] = (df['close'] - df['low20'])/df['atr20']
df['threeATRs'] = 3 * df['atr20']
## 20day price change stats
df['mo20'] = df[CLOSE]-df[CLOSE].shift(20)
## standard dev of 20day price change
df['stdMove'] = df['mo20'].rolling(100).std()
zz = df[['date',CLOSE,'high','ma200','dev','Zscore','prob','NewH','low20','ATRsToLow','atr20','threeATRs','stdMove']]
zfile = THANOS_DATA + f'thanos_{symbol}_ztest.csv'
zz.to_csv(zfile,index=False)
## save the chart and clear it.
chart = None
if show_charts:
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
chart = plot(symbol,zz.date,zz[CLOSE],zz.ma200)
if chart is not None: chart.savefig(chart_file)
return zz,chart
def evaluate(symbol):
tagged = chart = None
#try:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
return tagged, chart
zz, chart = thanosize(symbol,df)
## tag each dataframe with the symbol evaluated
tagged = zz.copy()
tagged['symbol'] = symbol
#except:
# print(f'ERROR: Failed to analyze {symbol}.')
return tagged, chart
## created to do passive / email based monitoring
def monitor_universe(universe_fn):
uni = get_universe(universe_fn)
for symbol in uni:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
else:
zz, chart = thanosize(symbol,df)
if chart is not None: chart.clf()
# send_alert(symbol,zz,0.15,0.85)
# send heartbeat every 5 days since last update
# send_heartbeat(uni)
if __name__ == "__main__":
## usage: python thanos.py <optional symbol>
## if no symbol given - does monitoring on all the names the universe file thanos.csv
## with a symbol given - does all the analytics, dumps the output and chart the data
symbol_list = None
composite = list()
header = None
if len(sys.argv) > 1:
## use a universe file
if pathlib.Path(sys.argv[1]).is_file():
symbol_list = get_universe(sys.argv[1])
else:
## or use commna separated string
symbol_list = sys.argv[1].split(',')
for symbol in symbol_list:
print(f'running thanos on: {symbol}')
metrics, chart = evaluate(symbol)
if metrics is not None:
if header is None: header = metrics.columns
print(metrics.tail(10))
if len(composite) < 500:
## alpha_vantage FREE service limits you to 500 calls a day.
composite.append(metrics.iloc[-1].tolist())
else:
print('cannot append {symbol} - 500 symbol data retrieval limit exceeded')
#if chart is not None: chart.show()
comp_df = pandas.DataFrame(columns=header,data=composite)
comp_df['Composite'] = True
comp_df = comp_df.sort_values(['Zscore'])
print("\nComposite Table:")
print(comp_df)
if comp_df is not None:
## save composite file to the desired directory
destination= './'
if len(sys.argv) > 2: destination = sys.argv[2] + "/"
curr_date = datetime.datetime.now().date().strftime("%Y%m%d")
if not pathlib.Path(destination).exists():
import os
os.makedirs(destination)
current_comp_file = f'{destination}{curr_date}.csv'
comp_df.to_csv(current_comp_file,index=False)
| send_alert | identifier_name |
thanos.py |
##t STOCK DEVATION TRACKER
## Tracks the tails in the devation of price from the 200dayMA
## Charts the 50day and 200day views of price versus the 200dayMA
## Sends an email when the deviation has moved beyond the 15% or 85% percentile (on z-score)
## 06/23/2020
## modified the script to take a universe file on a string of stocks at the commmand line
## it also creates a composite file that gives all the current metrics fro the universe being analyzed.
## Usage: python3 thanos.py <universe_file OR 'sym1,sym2,sym3'> <tag to save the file>
## i.e. python3 thanos.py new_universe.txt ./composites/new_univ ## composite saved to ./composites/new_univ_{date}.csv
## python3 thanos.py 'IVV,SPY,IAU' ## dataframes just dumped to stdout
## python3 thanos.py 'IVV,SPY,AAPL' ./composites/mylist ## composites saved to ./composites/mylist_{date}.csv
import mail_client
import math, datetime
import pandas
import pathlib
import numpy as np
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.stats as st
import os, sys, time
import alpha_vantage_data as avd
sns.set()
## universe file for all symbols monitored
## one symbol per line
HOME = '/home/jcarter/sandbox/trading/thanos'
UNIVERSE_FILE = f'{HOME}/universe.txt'
THANOS_DATA = f'{HOME}/thanos_data/'
THANOS_CHARTS = f'{HOME}/thanos_charts/'
THANOS_LOG = f'{HOME}/thanos_data/thanos_log.csv'
## table formating
HEADER_STYLE = r'<th style="padding-top: 10px; padding-bottom: 8px; color: #D8D8D8;" align="center" bgcolor="#2F4F4F">'
ROW_STYLE = r'<td style="padding: 8px 8px 6px; border: 1px solid #4f6228; font-size: 12">'
EMAIL_HEADER = """
This is a simple tracker of the current price deviation to the ma200
Where:
Deviation = log( price/ma200 )
Zscore = [Current Deviation - 100dayAverage(Deviation)] / 100daySTDEV(Deviation)
Downside Tails ( prob < 0.15 ): Signal good long term entry points
Upside Tails ( prob > 0.85 ): Signal good points to secure profits (take off % of current longs OR seek options as hedge)
"""
def get_data(symbol):
## sleep between data calls - only allowed 5 calls per minute and 500 calls per day.
time.sleep(13)
data = None
try:
hist_file = THANOS_DATA + f'thanos_{symbol}.csv'
data = avd.update_data(symbol,hist_file)
except:
pass
return data
def cvt_date(date_obj):
if isinstance(date_obj, basestring):
return dt.datetime.strptime(date_obj,"%Y-%m-%d").date()
else:
return date_obj.date()
def plot(symbol, dts, a, b):
return None
current_date = dts.iloc[-1]
for i,N in enumerate([50, 200],1):
dates = dts.tail(N)
s1 = a.tail(N)
s2 = b.tail(N)
plt.subplot(1,2,i)
plt.title("%s %s: %d days" % (symbol,current_date,N))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m_%d'))
#u = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in dates.tolist()]
u = [cvt_date(d) for d in dates.tolist()]
plt.plot(u, s1)
plt.plot(u, s2)
plt.gcf().autofmt_xdate()
return plt
def send_alert(symbol,df,low_bound,high_bound):
## check for a significant deviation within the last 5 days
## if so - send an email!!
snapshot = df.tail(5)
current_stats = df.tail(1).iloc[0].apply(str).tolist()
# initilaize log file if one does not exist
if not os.path.isfile(THANOS_LOG):
with open(THANOS_LOG,'w') as f:
header = df.columns.tolist() + ['symbol','ext']
f.write(",".join(header)+"\n")
vals = snapshot['prob'].tolist()
hi_val, low_val = max(vals), min(vals)
if low_val < low_bound or hi_val > high_bound:
hi_idx = low_idx = -1
if hi_val > high_bound: hi_idx = vals.index(hi_val)
if low_val < low_bound: low_idx = vals.index(low_val)
side = 'BOTTOM' if low_idx > hi_idx else 'TOP'
ffmt = "{:,.4f}".format
table = snapshot.to_html(float_format=ffmt)
table = table.replace('<th>',HEADER_STYLE)
table = table.replace('<td>',ROW_STYLE)
subj = f'{symbol} {side} Deviation!!!'
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table,attach=chart_file)
#mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table)
todays_prob = float(current_stats[-1])
if todays_prob < low_bound or todays_prob > high_bound:
current_stats.append(symbol)
# flag extension type
side = 'BOTTOM'
if todays_prob > high_bound:
side = 'TOP'
current_stats.append(side)
outs = ",".join(current_stats)
with open(THANOS_LOG,'a') as f:
f.write(outs + '\n')
def send_heartbeat(uni):
def modification_date(filename):
t = os.path.getmtime(filename)
return dt.datetime.fromtimestamp(t)
uni_list = "\nUniverse:\n" + "\n".join(uni)
now = dt.datetime.now()
try:
most_recent = modification_date(THANOS_LOG)
delta = (now - most_recent).days
if delta > 0 and delta % 5 == 0:
message = f'Most recent update = {most_recent} ({delta} days)\n' + uni_list
mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat!',text=message)
except:
mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat FAILURE! Check Process!',text=uni_list)
def get_universe(universe_fn):
univ = [line.strip() for line in open(universe_fn)]
return univ
def thanosize(symbol,df,show_charts=False):
CLOSE = 'close'
#CLOSE = 'adjusted_close'
## calc ma200
df['ma200'] = df[CLOSE].rolling(200).mean()
## current deviation from moving average
df['dev'] = (df[CLOSE]/df['ma200']).map(math.log)
## calc Zscore
df['Zscore'] = (df['dev'] - df['dev'].rolling(100).mean())/df['dev'].rolling(100).std()
df['prob'] = df['Zscore'].map(st.norm.cdf)
## 20 day low
df['low20'] = df['low'].rolling(20).min()
## new 200day high
df['hi200'] = df['high'].rolling(200).max()
df['NewH'] = df.apply(lambda x: 'NH' if x['high'] >= x['hi200'] else '',axis=1)
## daily price volatility stats
df['prev'] = df['close'].shift(1)
df['tr'] = df.apply(lambda x: max(x['high'] - x['low'],x['high']-x['prev'],x['prev']-x['low']),axis=1)
df['atr20'] = df['tr'].rolling(20).mean()
df['ATRsToLow'] = (df['close'] - df['low20'])/df['atr20']
df['threeATRs'] = 3 * df['atr20']
## 20day price change stats
df['mo20'] = df[CLOSE]-df[CLOSE].shift(20)
## standard dev of 20day price change
df['stdMove'] = df['mo20'].rolling(100).std()
zz = df[['date',CLOSE,'high','ma200','dev','Zscore','prob','NewH','low20','ATRsToLow','atr20','threeATRs','stdMove']]
zfile = THANOS_DATA + f'thanos_{symbol}_ztest.csv'
zz.to_csv(zfile,index=False)
## save the chart and clear it.
chart = None
if show_charts:
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
chart = plot(symbol,zz.date,zz[CLOSE],zz.ma200)
if chart is not None: chart.savefig(chart_file)
return zz,chart
def evaluate(symbol):
tagged = chart = None
#try:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
return tagged, chart
zz, chart = thanosize(symbol,df)
## tag each dataframe with the symbol evaluated
tagged = zz.copy()
tagged['symbol'] = symbol
#except:
# print(f'ERROR: Failed to analyze {symbol}.')
return tagged, chart
## created to do passive / email based monitoring
def monitor_universe(universe_fn):
|
if __name__ == "__main__":
## usage: python thanos.py <optional symbol>
## if no symbol given - does monitoring on all the names the universe file thanos.csv
## with a symbol given - does all the analytics, dumps the output and chart the data
symbol_list = None
composite = list()
header = None
if len(sys.argv) > 1:
## use a universe file
if pathlib.Path(sys.argv[1]).is_file():
symbol_list = get_universe(sys.argv[1])
else:
## or use commna separated string
symbol_list = sys.argv[1].split(',')
for symbol in symbol_list:
print(f'running thanos on: {symbol}')
metrics, chart = evaluate(symbol)
if metrics is not None:
if header is None: header = metrics.columns
print(metrics.tail(10))
if len(composite) < 500:
## alpha_vantage FREE service limits you to 500 calls a day.
composite.append(metrics.iloc[-1].tolist())
else:
print('cannot append {symbol} - 500 symbol data retrieval limit exceeded')
#if chart is not None: chart.show()
comp_df = pandas.DataFrame(columns=header,data=composite)
comp_df['Composite'] = True
comp_df = comp_df.sort_values(['Zscore'])
print("\nComposite Table:")
print(comp_df)
if comp_df is not None:
## save composite file to the desired directory
destination= './'
if len(sys.argv) > 2: destination = sys.argv[2] + "/"
curr_date = datetime.datetime.now().date().strftime("%Y%m%d")
if not pathlib.Path(destination).exists():
import os
os.makedirs(destination)
current_comp_file = f'{destination}{curr_date}.csv'
comp_df.to_csv(current_comp_file,index=False)
| uni = get_universe(universe_fn)
for symbol in uni:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
else:
zz, chart = thanosize(symbol,df)
if chart is not None: chart.clf()
# send_alert(symbol,zz,0.15,0.85)
# send heartbeat every 5 days since last update
# send_heartbeat(uni) | identifier_body |
thanos.py |
##t STOCK DEVATION TRACKER
## Tracks the tails in the devation of price from the 200dayMA
## Charts the 50day and 200day views of price versus the 200dayMA
## Sends an email when the deviation has moved beyond the 15% or 85% percentile (on z-score)
## 06/23/2020
## modified the script to take a universe file on a string of stocks at the commmand line
## it also creates a composite file that gives all the current metrics fro the universe being analyzed.
## Usage: python3 thanos.py <universe_file OR 'sym1,sym2,sym3'> <tag to save the file>
## i.e. python3 thanos.py new_universe.txt ./composites/new_univ ## composite saved to ./composites/new_univ_{date}.csv
## python3 thanos.py 'IVV,SPY,IAU' ## dataframes just dumped to stdout
## python3 thanos.py 'IVV,SPY,AAPL' ./composites/mylist ## composites saved to ./composites/mylist_{date}.csv
import mail_client
import math, datetime
import pandas
import pathlib
import numpy as np
import seaborn as sns
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.stats as st
import os, sys, time
import alpha_vantage_data as avd
sns.set()
## universe file for all symbols monitored
## one symbol per line
HOME = '/home/jcarter/sandbox/trading/thanos'
UNIVERSE_FILE = f'{HOME}/universe.txt'
THANOS_DATA = f'{HOME}/thanos_data/'
THANOS_CHARTS = f'{HOME}/thanos_charts/'
THANOS_LOG = f'{HOME}/thanos_data/thanos_log.csv'
## table formating
HEADER_STYLE = r'<th style="padding-top: 10px; padding-bottom: 8px; color: #D8D8D8;" align="center" bgcolor="#2F4F4F">'
ROW_STYLE = r'<td style="padding: 8px 8px 6px; border: 1px solid #4f6228; font-size: 12">'
EMAIL_HEADER = """
This is a simple tracker of the current price deviation to the ma200
Where:
Deviation = log( price/ma200 )
Zscore = [Current Deviation - 100dayAverage(Deviation)] / 100daySTDEV(Deviation)
Downside Tails ( prob < 0.15 ): Signal good long term entry points
Upside Tails ( prob > 0.85 ): Signal good points to secure profits (take off % of current longs OR seek options as hedge)
"""
def get_data(symbol):
## sleep between data calls - only allowed 5 calls per minute and 500 calls per day.
time.sleep(13)
data = None
try:
hist_file = THANOS_DATA + f'thanos_{symbol}.csv'
data = avd.update_data(symbol,hist_file)
except:
pass
return data
def cvt_date(date_obj):
if isinstance(date_obj, basestring):
return dt.datetime.strptime(date_obj,"%Y-%m-%d").date()
else:
return date_obj.date()
def plot(symbol, dts, a, b):
return None
current_date = dts.iloc[-1]
for i,N in enumerate([50, 200],1):
|
return plt
def send_alert(symbol,df,low_bound,high_bound):
## check for a significant deviation within the last 5 days
## if so - send an email!!
snapshot = df.tail(5)
current_stats = df.tail(1).iloc[0].apply(str).tolist()
# initilaize log file if one does not exist
if not os.path.isfile(THANOS_LOG):
with open(THANOS_LOG,'w') as f:
header = df.columns.tolist() + ['symbol','ext']
f.write(",".join(header)+"\n")
vals = snapshot['prob'].tolist()
hi_val, low_val = max(vals), min(vals)
if low_val < low_bound or hi_val > high_bound:
hi_idx = low_idx = -1
if hi_val > high_bound: hi_idx = vals.index(hi_val)
if low_val < low_bound: low_idx = vals.index(low_val)
side = 'BOTTOM' if low_idx > hi_idx else 'TOP'
ffmt = "{:,.4f}".format
table = snapshot.to_html(float_format=ffmt)
table = table.replace('<th>',HEADER_STYLE)
table = table.replace('<td>',ROW_STYLE)
subj = f'{symbol} {side} Deviation!!!'
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table,attach=chart_file)
#mail_client.mail('xjcarter@gmail.com',subj,text=EMAIL_HEADER,html=table)
todays_prob = float(current_stats[-1])
if todays_prob < low_bound or todays_prob > high_bound:
current_stats.append(symbol)
# flag extension type
side = 'BOTTOM'
if todays_prob > high_bound:
side = 'TOP'
current_stats.append(side)
outs = ",".join(current_stats)
with open(THANOS_LOG,'a') as f:
f.write(outs + '\n')
def send_heartbeat(uni):
def modification_date(filename):
t = os.path.getmtime(filename)
return dt.datetime.fromtimestamp(t)
uni_list = "\nUniverse:\n" + "\n".join(uni)
now = dt.datetime.now()
try:
most_recent = modification_date(THANOS_LOG)
delta = (now - most_recent).days
if delta > 0 and delta % 5 == 0:
message = f'Most recent update = {most_recent} ({delta} days)\n' + uni_list
mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat!',text=message)
except:
mail_client.mail('xjcarter@gmail.com','THANOS Heartbeat FAILURE! Check Process!',text=uni_list)
def get_universe(universe_fn):
univ = [line.strip() for line in open(universe_fn)]
return univ
def thanosize(symbol,df,show_charts=False):
CLOSE = 'close'
#CLOSE = 'adjusted_close'
## calc ma200
df['ma200'] = df[CLOSE].rolling(200).mean()
## current deviation from moving average
df['dev'] = (df[CLOSE]/df['ma200']).map(math.log)
## calc Zscore
df['Zscore'] = (df['dev'] - df['dev'].rolling(100).mean())/df['dev'].rolling(100).std()
df['prob'] = df['Zscore'].map(st.norm.cdf)
## 20 day low
df['low20'] = df['low'].rolling(20).min()
## new 200day high
df['hi200'] = df['high'].rolling(200).max()
df['NewH'] = df.apply(lambda x: 'NH' if x['high'] >= x['hi200'] else '',axis=1)
## daily price volatility stats
df['prev'] = df['close'].shift(1)
df['tr'] = df.apply(lambda x: max(x['high'] - x['low'],x['high']-x['prev'],x['prev']-x['low']),axis=1)
df['atr20'] = df['tr'].rolling(20).mean()
df['ATRsToLow'] = (df['close'] - df['low20'])/df['atr20']
df['threeATRs'] = 3 * df['atr20']
## 20day price change stats
df['mo20'] = df[CLOSE]-df[CLOSE].shift(20)
## standard dev of 20day price change
df['stdMove'] = df['mo20'].rolling(100).std()
zz = df[['date',CLOSE,'high','ma200','dev','Zscore','prob','NewH','low20','ATRsToLow','atr20','threeATRs','stdMove']]
zfile = THANOS_DATA + f'thanos_{symbol}_ztest.csv'
zz.to_csv(zfile,index=False)
## save the chart and clear it.
chart = None
if show_charts:
chart_file = THANOS_CHARTS + f'thanos_{symbol}.png'
chart = plot(symbol,zz.date,zz[CLOSE],zz.ma200)
if chart is not None: chart.savefig(chart_file)
return zz,chart
def evaluate(symbol):
tagged = chart = None
#try:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
return tagged, chart
zz, chart = thanosize(symbol,df)
## tag each dataframe with the symbol evaluated
tagged = zz.copy()
tagged['symbol'] = symbol
#except:
# print(f'ERROR: Failed to analyze {symbol}.')
return tagged, chart
## created to do passive / email based monitoring
def monitor_universe(universe_fn):
uni = get_universe(universe_fn)
for symbol in uni:
df = get_data(symbol)
if df is None:
print(f'ERROR: {symbol} data fetch error.')
else:
zz, chart = thanosize(symbol,df)
if chart is not None: chart.clf()
# send_alert(symbol,zz,0.15,0.85)
# send heartbeat every 5 days since last update
# send_heartbeat(uni)
if __name__ == "__main__":
## usage: python thanos.py <optional symbol>
## if no symbol given - does monitoring on all the names the universe file thanos.csv
## with a symbol given - does all the analytics, dumps the output and chart the data
symbol_list = None
composite = list()
header = None
if len(sys.argv) > 1:
## use a universe file
if pathlib.Path(sys.argv[1]).is_file():
symbol_list = get_universe(sys.argv[1])
else:
## or use commna separated string
symbol_list = sys.argv[1].split(',')
for symbol in symbol_list:
print(f'running thanos on: {symbol}')
metrics, chart = evaluate(symbol)
if metrics is not None:
if header is None: header = metrics.columns
print(metrics.tail(10))
if len(composite) < 500:
## alpha_vantage FREE service limits you to 500 calls a day.
composite.append(metrics.iloc[-1].tolist())
else:
print('cannot append {symbol} - 500 symbol data retrieval limit exceeded')
#if chart is not None: chart.show()
comp_df = pandas.DataFrame(columns=header,data=composite)
comp_df['Composite'] = True
comp_df = comp_df.sort_values(['Zscore'])
print("\nComposite Table:")
print(comp_df)
if comp_df is not None:
## save composite file to the desired directory
destination= './'
if len(sys.argv) > 2: destination = sys.argv[2] + "/"
curr_date = datetime.datetime.now().date().strftime("%Y%m%d")
if not pathlib.Path(destination).exists():
import os
os.makedirs(destination)
current_comp_file = f'{destination}{curr_date}.csv'
comp_df.to_csv(current_comp_file,index=False)
| dates = dts.tail(N)
s1 = a.tail(N)
s2 = b.tail(N)
plt.subplot(1,2,i)
plt.title("%s %s: %d days" % (symbol,current_date,N))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m_%d'))
#u = [dt.datetime.strptime(d,'%Y-%m-%d').date() for d in dates.tolist()]
u = [cvt_date(d) for d in dates.tolist()]
plt.plot(u, s1)
plt.plot(u, s2)
plt.gcf().autofmt_xdate() | conditional_block |
keras_cnn_pretrain-embedding_classification.py | #-*-coding:utf-8-*-
from __future__ import print_function,division
'''
Created on 2016年11月20日
ref:
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
notes can be found here http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
https://kiseliu.github.io/2016/08/03/using-pre-trained-word-embeddings-in-a-keras-model/
https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
chines document http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
all of these can be found via Youdao noets
@author: RenaiC
'''
import os,sys,json,pickle,time
import numpy as np
import ReadData
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
N = 500
MAX_SEQUENCE_LENGTH = 10000
MAX_NB_WORDS = 250
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-1000'
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
# first, build index mapping words in the embeddings set
# to their embedding vector
def cnn_raw():
'在训练的过程中训练embedding向量'
TEXT_DATA_DIR =r'H:\network_diagnosis_data\cut-500'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
input_length=MAX_SEQUENCE_LENGTH)
# embedding_layer = Embedding(nb_words + 1,
# EMBEDDING_DIM,
# weights=[embedding_matrix],
# input_length=MAX_SEQUENCE_LENGTH,
# trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
# 可以跑通,本地耗时太长
#我们也可以测试下如果不使用预先训练好的词向量,而是从头开始初始化Embedding层,
#在训练的过程中学习它的值,准确率会如何?我们只需要用下列的代码替换Embedding层:
def cnn_w2v():
'使用固定的embedding weights'
N = 3000
MAX_SEQUENCE_LENGTH = 5000
MAX_NB_WORDS = 300
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\corpus_trained_model\movie_reviews'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
# TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
print('Indexing word vectors.')
embeddings_in | el
End of this programme.
''' | dex = {} # embedding后的词典
word2vec_results=r"H:\corpus_trained_model\glove.6B\glove.6B.50d.txt"
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
print('End of this programme.')
if __name__ == '__main__':
# cnn_raw()
cnn_w2v()
'''
结果:
Using Theano backend.
Indexing word vectors.
Found 400000 word vectors.
Processing text dataset
Load 10000 texts.
Found 56822 unique tokens.
Shape of data tensor: (10000L, 2000L)
Shape of label tensor: (10000L, 2L)
Preparing embedding matrix.
Training model.
Fitting
Train on 7200 samples, validate on 800 samples
Epoch 1/2
7200/7200 [==============================] - 6536s - loss: 0.6683 - acc: 0.5682 - val_loss: 0.6311 - val_acc: 0.6450
Epoch 2/2
7200/7200 [==============================] - 6398s - loss: 0.6117 - acc: 0.6401 - val_loss: 0.6251 - val_acc: 0.6275
2000/2000 [==============================] - 790s
Test score: 0.616591759771
Test accuracy: 0.628000003546
Saving mod | identifier_body |
keras_cnn_pretrain-embedding_classification.py | #-*-coding:utf-8-*-
from __future__ import print_function,division
'''
Created on 2016年11月20日
ref:
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
notes can be found here http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
https://kiseliu.github.io/2016/08/03/using-pre-trained-word-embeddings-in-a-keras-model/
https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
chines document http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
all of these can be found via Youdao noets
@author: RenaiC
'''
import os,sys,json,pickle,time
import numpy as np
import ReadData
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
N = 500
MAX_SEQUENCE_LENGTH = 10000
MAX_NB_WORDS = 250
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-1000'
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
# first, build index mapping words in the embeddings set
# to their embedding vector
def cnn_ra | '在训练的过程中训练embedding向量'
TEXT_DATA_DIR =r'H:\network_diagnosis_data\cut-500'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
input_length=MAX_SEQUENCE_LENGTH)
# embedding_layer = Embedding(nb_words + 1,
# EMBEDDING_DIM,
# weights=[embedding_matrix],
# input_length=MAX_SEQUENCE_LENGTH,
# trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
# 可以跑通,本地耗时太长
#我们也可以测试下如果不使用预先训练好的词向量,而是从头开始初始化Embedding层,
#在训练的过程中学习它的值,准确率会如何?我们只需要用下列的代码替换Embedding层:
def cnn_w2v():
'使用固定的embedding weights'
N = 3000
MAX_SEQUENCE_LENGTH = 5000
MAX_NB_WORDS = 300
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\corpus_trained_model\movie_reviews'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
# TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
word2vec_results=r"H:\corpus_trained_model\glove.6B\glove.6B.50d.txt"
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
print('End of this programme.')
if __name__ == '__main__':
# cnn_raw()
cnn_w2v()
'''
结果:
Using Theano backend.
Indexing word vectors.
Found 400000 word vectors.
Processing text dataset
Load 10000 texts.
Found 56822 unique tokens.
Shape of data tensor: (10000L, 2000L)
Shape of label tensor: (10000L, 2L)
Preparing embedding matrix.
Training model.
Fitting
Train on 7200 samples, validate on 800 samples
Epoch 1/2
7200/7200 [==============================] - 6536s - loss: 0.6683 - acc: 0.5682 - val_loss: 0.6311 - val_acc: 0.6450
Epoch 2/2
7200/7200 [==============================] - 6398s - loss: 0.6117 - acc: 0.6401 - val_loss: 0.6251 - val_acc: 0.6275
2000/2000 [==============================] - 790s
Test score: 0.616591759771
Test accuracy: 0.628000003546
Saving model
End of this programme.
''' | w():
| identifier_name |
keras_cnn_pretrain-embedding_classification.py | #-*-coding:utf-8-*-
from __future__ import print_function,division
'''
Created on 2016年11月20日
ref:
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
notes can be found here http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
https://kiseliu.github.io/2016/08/03/using-pre-trained-word-embeddings-in-a-keras-model/
https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
chines document http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
all of these can be found via Youdao noets
@author: RenaiC
'''
import os,sys,json,pickle,time
import numpy as np
import ReadData
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
N = 500
MAX_SEQUENCE_LENGTH = 10000
MAX_NB_WORDS = 250
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-1000'
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
# first, build index mapping words in the embeddings set
# to their embedding vector
def cnn_raw():
'在训练的过程中训练embedding向量'
TEXT_DATA_DIR =r'H:\network_diagnosis_data\cut-500'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
input_length=MAX_SEQUENCE_LENGTH)
# embedding_layer = Embedding(nb_words + 1,
# EMBEDDING_DIM,
# weights=[embedding_matrix],
# input_length=MAX_SEQUENCE_LENGTH, | sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
# 可以跑通,本地耗时太长
#我们也可以测试下如果不使用预先训练好的词向量,而是从头开始初始化Embedding层,
#在训练的过程中学习它的值,准确率会如何?我们只需要用下列的代码替换Embedding层:
def cnn_w2v():
'使用固定的embedding weights'
N = 3000
MAX_SEQUENCE_LENGTH = 5000
MAX_NB_WORDS = 300
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\corpus_trained_model\movie_reviews'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
# TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
word2vec_results=r"H:\corpus_trained_model\glove.6B\glove.6B.50d.txt"
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
print('End of this programme.')
if __name__ == '__main__':
# cnn_raw()
cnn_w2v()
'''
结果:
Using Theano backend.
Indexing word vectors.
Found 400000 word vectors.
Processing text dataset
Load 10000 texts.
Found 56822 unique tokens.
Shape of data tensor: (10000L, 2000L)
Shape of label tensor: (10000L, 2L)
Preparing embedding matrix.
Training model.
Fitting
Train on 7200 samples, validate on 800 samples
Epoch 1/2
7200/7200 [==============================] - 6536s - loss: 0.6683 - acc: 0.5682 - val_loss: 0.6311 - val_acc: 0.6450
Epoch 2/2
7200/7200 [==============================] - 6398s - loss: 0.6117 - acc: 0.6401 - val_loss: 0.6251 - val_acc: 0.6275
2000/2000 [==============================] - 790s
Test score: 0.616591759771
Test accuracy: 0.628000003546
Saving model
End of this programme.
''' | # trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling | random_line_split |
keras_cnn_pretrain-embedding_classification.py | #-*-coding:utf-8-*-
from __future__ import print_function,division
'''
Created on 2016年11月20日
ref:
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
notes can be found here http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
https://kiseliu.github.io/2016/08/03/using-pre-trained-word-embeddings-in-a-keras-model/
https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
chines document http://keras-cn.readthedocs.io/en/latest/blog/word_embedding/
all of these can be found via Youdao noets
@author: RenaiC
'''
import os,sys,json,pickle,time
import numpy as np
import ReadData
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
N = 500
MAX_SEQUENCE_LENGTH = 10000
MAX_NB_WORDS = 250
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-1000'
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
# first, build index mapping words in the embeddings set
# to their embedding vector
def cnn_raw():
'在训练的过程中训练embedding向量'
TEXT_DATA_DIR =r'H:\network_diagnosis_data\cut-500'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
input_length=MAX_SEQUENCE_LENGTH)
# embedding_layer = Embedding(nb_words + 1,
# EMBEDDING_DIM,
# weights=[embedding_matrix],
# input_length=MAX_SEQUENCE_LENGTH,
# trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
# 可以跑通,本地耗时太长
#我们也可以测试下如果不使用预先训练好的词向量,而是从头开始初始化Embedding层,
#在训练的过程中学习它的值,准确率会如何?我们只需要用下列的代码替换Embedding层:
def cnn_w2v():
'使用固定的embedding weights'
N = 3000
MAX_SEQUENCE_LENGTH = 5000
MAX_NB_WORDS = 300
EMBEDDING_DIM = 50
VALIDATION_SPLIT = 0.2
batch_size = 10
epoch_num = 2
TEXT_DATA_DIR = r'H:\corpus_trained_model\movie_reviews'
TEXT_DATA_DIR = r'H:\corpus_trained_model\imdb-large'
TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
# TEXT_DATA_DIR = r'H:\network_diagnosis_data\cut-500'
print('Indexing word vectors.')
embeddings_index = {} # embedding后的词典
word2vec_results=r"H:\corpus_trained_model\glove.6B\glove.6B.50d.txt"
word2vec_results = r"H:\EclipseWorkspace\NetFault_Analysis\Pre-processing\data\vector_list_cut1000-all-50D-w2v.txt"
f = open(word2vec_results,'r')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
# texts,labels,word_dict = ReadData.ReadRaw2HierData(TEXT_DATA_DIR,50)
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(TEXT_DATA_DIR)):
path = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(path): # 是否是 目录
label_id = len(labels_index)
labels_index[name] = label_id
j = 0
for fname in sorted(os.listdir(path)):
if j < N:
fpath = os.path.join(path, fname)
f = open(fpath, 'r')
texts.append(f.read())
f.close()
labels.append(label_id)
j = j + 1
j = 0
print('Load %s texts.' % len(texts))
# finally, vectorize the text samples into a 2D integer tensor
# ref http://keras-cn.readthedocs.io/en/latest/preprocessing/text/
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS) # 选择前 MAX_NB_WORDS个高频词
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index # dict like hello:23
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)#保留的每个文档的最大单词数量,返回 nparray
labels = to_categorical(np.asarray(labels))# 转变成 0 1 序列
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
# split the data into a training set and a validation set 先打乱 后拆分(不需要这么麻烦,已在其他地方实现)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = labels[-nb_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index)) # 每段多少词,总词unique个数中的小者
embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))# embedding 矩阵 ,初始化 为0。故没有的单词词向量是0
for word, i in word_index.items():
if i > MAX_NB_WORDS:
'词频外的不操作'
continue
embedding_vector = embeddings_index.get(word) # 该词对应的词向量
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector # 该行表示该词的词向量。注意行数i对应特定的词
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
# ref http://keras-cn.readthedocs.io/en/latest/layers/embedding_layer/
embedding_layer = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)# 注意weight 的输入形式
print('Training model.')
# train a 1D convnet with global maxpooling
sequence_input = Input(shape=(MAX_SEQU | , 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
class_num = len(np.unique(labels))
preds = Dense(len(labels_index), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print('Fitting')
model.fit(x_train, y_train, validation_split=0.1,
nb_epoch=epoch_num, batch_size=batch_size, verbose=1)
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score)
print('Test accuracy:', acc)
print('Saving model')
from keras.utils.visualize_util import plot
data_today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
plot(model, to_file=r'.\data\cnn-embedding-model'+data_today+'.png')
json_string = model.to_json() #等价于 json_string = model.get_config()
open('.\data\cnn-embedding-model'+data_today+'.json','w+').write(json_string)
model.save_weights('.\data\keras-cnn-embedding'+data_today+'.h5', overwrite=True)
print('End of this programme.')
if __name__ == '__main__':
# cnn_raw()
cnn_w2v()
'''
结果:
Using Theano backend.
Indexing word vectors.
Found 400000 word vectors.
Processing text dataset
Load 10000 texts.
Found 56822 unique tokens.
Shape of data tensor: (10000L, 2000L)
Shape of label tensor: (10000L, 2L)
Preparing embedding matrix.
Training model.
Fitting
Train on 7200 samples, validate on 800 samples
Epoch 1/2
7200/7200 [==============================] - 6536s - loss: 0.6683 - acc: 0.5682 - val_loss: 0.6311 - val_acc: 0.6450
Epoch 2/2
7200/7200 [==============================] - 6398s - loss: 0.6117 - acc: 0.6401 - val_loss: 0.6251 - val_acc: 0.6275
2000/2000 [==============================] - 790s
Test score: 0.616591759771
Test accuracy: 0.628000003546
Saving model
End of this programme.
''' | ENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128 | conditional_block |
git.rs | //! Getting the Git status of files and directories.
use std::ffi::OsStr;
#[cfg(target_family = "unix")]
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use log::*;
use crate::fs::fields as f;
/// A **Git cache** is assembled based on the user’s input arguments.
///
/// This uses vectors to avoid the overhead of hashing: it’s not worth it when the
/// expected number of Git repositories per exa invocation is 0 or 1...
pub struct GitCache {
/// A list of discovered Git repositories and their paths.
repos: Vec<GitRepo>,
/// Paths that we’ve confirmed do not have Git repositories underneath them.
misses: Vec<PathBuf>,
}
impl GitCache {
pub fn has_anything_for(&self, index: &Path) -> bool {
self.repos.iter().any(|e| e.has_path(index))
}
pub fn get(&self, index: &Path, prefix_lookup: bool) -> f::Git {
self.repos.iter()
.find(|e| e.has_path(index))
.map(|repo| repo.search(index, prefix_lookup))
.unwrap_or_default()
}
}
use std::iter::FromIterator;
impl FromIterator<PathBuf> for GitCache {
fn from_iter<I>(iter: I) -> Self
where I: IntoIterator<Item=PathBuf>
{
let iter = iter.into_iter();
let mut git = Self {
repos: Vec::with_capacity(iter.size_hint().0),
misses: Vec::new(),
};
for path in iter {
if git.misses.contains(&path) {
debug!("Skipping {:?} because it already came back Gitless", path);
}
else if git.repos.iter().any(|e| e.has_path(&path)) {
debug!("Skipping {:?} because we already queried it", path);
}
else {
match GitRepo::discover(path) {
Ok(r) => {
if let Some(r2) = git.repos.iter_mut().find(|e| e.has_workdir(&r.workdir)) {
debug!("Adding to existing repo (workdir matches with {:?})", r2.workdir);
r2.extra_paths.push(r.original_path);
continue;
}
debug!("Discovered new Git repo");
git.repos.push(r);
}
Err(miss) => {
git.misses.push(miss)
}
}
}
}
git
}
}
/// A **Git repository** is one we’ve discovered somewhere on the filesystem.
pub struct GitRepo {
/// The queryable contents of the repository: either a `git2` repo, or the
/// cached results from when we queried it last time.
contents: Mutex<GitContents>,
/// The working directory of this repository.
/// This is used to check whether two repositories are the same.
workdir: PathBuf,
/// The path that was originally checked to discover this repository.
/// This is as important as the extra_paths (it gets checked first), but
/// is separate to avoid having to deal with a non-empty Vec.
original_path: PathBuf,
/// Any other paths that were checked only to result in this same
/// repository.
extra_paths: Vec<PathBuf>,
}
/// A repository’s queried state.
enum GitContents {
/// All the interesting Git stuff goes through this.
Before {
repo: git2::Repository,
},
/// Temporary value used in `repo_to_statuses` so we can move the
/// repository out of the `Before` variant.
Processing,
/// The data we’ve extracted from the repository, but only after we’ve
/// actually done so.
After {
statuses: Git,
},
}
impl GitRepo {
/// Searches through this repository for a path (to a file or directory,
/// depending on the prefix-lookup flag) and returns its Git status.
///
/// Actually querying the `git2` repository for the mapping of paths to
/// Git statuses is only done once, and gets cached so we don’t need to
/// re-query the entire repository the times after that.
///
/// The temporary `Processing` enum variant is used after the `git2`
/// repository is moved out, but before the results have been moved in!
/// See <https://stackoverflow.com/q/45985827/3484614>
fn search(&self, index: &Path, prefix_lookup: bool) -> f::Git {
use std::mem::replace;
let mut contents = self.contents.lock().unwrap();
if let GitContents::After { ref statuses } = *contents {
debug!("Git repo {:?} has been found in cache", &self.workdir);
return statuses.status(index, prefix_lookup);
}
debug!("Querying Git repo {:?} for the first time", &self.workdir);
let repo = replace(&mut *contents, GitContents::Processing).inner_repo();
let statuses = repo_to_statuses(&repo, &self.workdir);
let result = statuses.status(index, prefix_lookup);
let _processing = replace(&mut *contents, GitContents::After { statuses });
result
}
/// Whether this repository has the given working directory.
fn has_workdir(&self, path: &Path) -> bool {
self.workdir == path
}
/// Whether this repository cares about the given path at all.
fn has_path(&self, path: &Path) -> bool {
path.starts_with(&self.original_path) || self.extra_paths.iter().any(|e| path.starts_with(e))
}
/// Searches for a Git repository at any point above the given path.
/// Returns the original buffer if none is found.
fn discover(path: PathBuf) -> Result<Self, PathBuf> {
info!("Searching for Git repository above {:?}", path);
let repo = match git2::Repository::discover(&path) {
Ok(r) => r,
Err(e) => {
error!("Error discovering Git repositories: {:?}", e);
return Err(path);
}
};
if let Some(workdir) = repo.workdir() {
let workdir = workdir.to_path_buf();
let contents = Mutex::new(GitContents::Before { repo });
Ok(Self { contents, workdir, original_path: path, extra_paths: Vec::new() })
}
else {
warn!("Repository has no workdir?");
Err(path)
}
}
}
impl GitContents {
/// Assumes that the repository hasn’t been queried, and extracts it
/// (consuming the value) if it has. This is needed because the entire
/// enum variant gets replaced when a repo is queried (see above).
fn inner_repo(self) -> git2::Repository {
if let Self::Before { repo } = self {
repo
}
else {
unreachable!("Tried to extract a non-Repository")
}
}
}
/// Iterates through a repository’s statuses, consuming it and returning the
/// mapping of files to their Git status.
/// We will have already used the working directory at this point, so it gets
/// passed in rather than deriving it from the `Repository` again.
fn repo_to_statuses(repo: &git2::Repository, workdir: &Path) -> Git {
let mut statuses = Vec::new();
info!("Getting Git statuses for repo with workdir {:?}", workdir);
match repo.statuses(None) {
Ok(es) => {
for e in es.iter() {
#[cfg(target_family = "unix")]
let path = workdir.join(Path::new(OsStr::from_bytes(e.path_bytes())));
// TODO: handle non Unix systems better:
// https://github.com/ogham/exa/issues/698
#[cfg(not(target_family = "unix"))]
let path = workdir.join(Path::new(e.path().unwrap()));
let elem = (path, e.status());
statuses.push(elem);
}
}
Err(e) => {
error!("Error looking up Git statuses: {:?}", e);
}
}
Git { statuses }
}
// The `repo.statuses` call above takes a long time. exa debug output:
//
// 20.311276 INFO:exa::fs::feature::git: Getting Git statuses for repo with workdir "/vagrant/"
// 20.799610 DEBUG:exa::output::table: Getting Git status for file "./Cargo.toml"
//
// Even inserting another logging line immediately afterwards doesn’t make it
// look any faster.
/// Container of Git statuses for all the files in this folder’s Git repository.
struct Git {
statuses: Vec<(PathBuf, git2::Status)>,
}
impl Git {
/// Get either the file or directory status for the given path.
/// “Prefix lookup” means that it should report an aggregate status of all
/// paths starting with the given prefix (in other words, a directory).
fn status(&self, index: &Path, prefix_lookup: bool) -> f::Git {
if prefix_lookup { self.dir_status(index) }
else { self.file_status(index) }
}
/// Get the user-facing status of a file.
/// We check the statuses directly applying to a file, and for the ignored
/// status we check if any of its parents directories is ignored by git.
fn file_status(&self, file: &Path) -> f::Git {
let path = reorient(file);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0 == path
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
/// Get the combined, user-facing status of a directory.
/// Statuses are aggregating (for example, a directory is considered
/// modified if any file under it has the status modified), except for
/// ignored status which applies to files under (for example, a directory
/// is considered ignored if one of its parent directories is ignored).
fn dir_status(&self, dir: &Path) -> f::Git {
let path = reorient(dir);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0.starts_with(&path)
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
}
/// Converts a path to an absolute path based on the current directory.
/// Paths need to be absolute for them to be compared properly, otherwise
/// you’d ask a repo about “./README.md” but it only knows about
/// “/vagrant/README.md”, prefixed by the workdir.
#[cfg(unix)]
fn reorient(path: &Path) -> PathBuf {
use std::env::current_dir;
// TODO: I’m not 100% on this func tbh
let path = match current_dir() {
Err(_) => Path::new(".").join(&path),
Ok(dir) => dir.join(&path),
};
path.canonicalize().unwrap_or(path)
}
#[cfg(windows)]
fn reorient(path: &Path) -> PathBuf {
let unc_path = path.canonicalize().unwrap();
// On Windows UNC path is returned. We need to strip the prefix for it to work.
let normal_path = unc_path.as_os_str().to_str().unwrap().trim_left_matches("\\\\?\\");
return PathBuf::from(normal_path);
}
/// The character to display if the file has been modified, but not staged.
fn working_tree_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::WT_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::WT_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::WT_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::WT_RENAMED) => f::GitStatus::Renamed, | }
/// The character to display if the file has been modified and the change
/// has been staged.
fn index_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::INDEX_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::INDEX_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::INDEX_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::INDEX_RENAMED) => f::GitStatus::Renamed,
s if s.contains(git2::Status::INDEX_TYPECHANGE) => f::GitStatus::TypeChange,
_ => f::GitStatus::NotModified,
}
} | s if s.contains(git2::Status::WT_TYPECHANGE) => f::GitStatus::TypeChange,
s if s.contains(git2::Status::IGNORED) => f::GitStatus::Ignored,
s if s.contains(git2::Status::CONFLICTED) => f::GitStatus::Conflicted,
_ => f::GitStatus::NotModified,
} | random_line_split |
git.rs | //! Getting the Git status of files and directories.
use std::ffi::OsStr;
#[cfg(target_family = "unix")]
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::sync::Mutex;
use log::*;
use crate::fs::fields as f;
/// A **Git cache** is assembled based on the user’s input arguments.
///
/// This uses vectors to avoid the overhead of hashing: it’s not worth it when the
/// expected number of Git repositories per exa invocation is 0 or 1...
pub struct GitCache {
/// A list of discovered Git repositories and their paths.
repos: Vec<GitRepo>,
/// Paths that we’ve confirmed do not have Git repositories underneath them.
misses: Vec<PathBuf>,
}
impl GitCache {
pub fn has_anything_for(&self, index: &Path) -> bool {
self.repos.iter().any(|e| e.has_path(index))
}
pub fn get(&self, index: &Path, prefix_lookup: bool) -> f::Git {
self.repos.iter()
.find(|e| e.has_path(index))
.map(|repo| repo.search(index, prefix_lookup))
.unwrap_or_default()
}
}
use std::iter::FromIterator;
impl FromIterator<PathBuf> for GitCache {
fn from_iter<I>(iter: I) -> Self
where I: IntoIterator<Item=PathBuf>
{
let iter = iter.into_iter();
let mut git = Self {
repos: Vec::with_capacity(iter.size_hint().0),
misses: Vec::new(),
};
for path in iter {
if git.misses.contains(&path) {
debug!("Skipping {:?} because it already came back Gitless", path);
}
else if git.repos.iter().any(|e| e.has_path(&path)) {
debug!("Skipping {:?} because we already queried it", path);
}
else {
match GitRepo::discover(path) {
Ok(r) => {
if let Some(r2) = git.repos.iter_mut().find(|e| e.has_workdir(&r.workdir)) {
debug!("Adding to existing repo (workdir matches with {:?})", r2.workdir);
r2.extra_paths.push(r.original_path);
continue;
}
debug!("Discovered new Git repo");
git.repos.push(r);
}
Err(miss) => {
git.misses.push(miss)
}
}
}
}
git
}
}
/// A **Git repository** is one we’ve discovered somewhere on the filesystem.
pub struct GitRepo {
/// The queryable contents of the repository: either a `git2` repo, or the
/// cached results from when we queried it last time.
contents: Mutex<GitContents>,
/// The working directory of this repository.
/// This is used to check whether two repositories are the same.
workdir: PathBuf,
/// The path that was originally checked to discover this repository.
/// This is as important as the extra_paths (it gets checked first), but
/// is separate to avoid having to deal with a non-empty Vec.
original_path: PathBuf,
/// Any other paths that were checked only to result in this same
/// repository.
extra_paths: Vec<PathBuf>,
}
/// A repository’s queried state.
enum GitContents {
/// All the interesting Git stuff goes through this.
Before {
repo: git2::Repository,
},
/// Temporary value used in `repo_to_statuses` so we can move the
/// repository out of the `Before` variant.
Processing,
/// The data we’ve extracted from the repository, but only after we’ve
/// actually done so.
After {
statuses: Git,
},
}
impl GitRepo {
/// Searches through this repository for a path (to a file or directory,
/// depending on the prefix-lookup flag) and returns its Git status.
///
/// Actually querying the `git2` repository for the mapping of paths to
/// Git statuses is only done once, and gets cached so we don’t need to
/// re-query the entire repository the times after that.
///
/// The temporary `Processing` enum variant is used after the `git2`
/// repository is moved out, but before the results have been moved in!
/// See <https://stackoverflow.com/q/45985827/3484614>
fn search(&self, index: &Path, prefix_lookup: bool) -> f::Git {
use std::mem::replace;
let mut contents = self.contents.lock().unwrap();
if let GitContents::After { ref statuses } = *contents {
debug!("Git repo {:?} has been found in cache", &self.workdir);
return statuses.status(index, prefix_lookup);
}
debug!("Querying Git repo {:?} for the first time", &self.workdir);
let repo = replace(&mut *contents, GitContents::Processing).inner_repo();
let statuses = repo_to_statuses(&repo, &self.workdir);
let result = statuses.status(index, prefix_lookup);
let _processing = replace(&mut *contents, GitContents::After { statuses });
result
}
/// Whether this repository has the given working directory.
fn has_workdir(&self, path: &Path) -> bool {
self.workdir == path
}
/// Whether this repository cares about the given path at all.
fn has_path(&self, path: &Path) -> bool {
path.starts_with(&self.original_path) || self.extra_paths.iter().any(|e| path.starts_with(e))
}
/// Searches for a Git repository at any point above the given path.
/// Returns the original buffer if none is found.
fn discover(path: PathBuf) -> Result<Self, PathBuf> {
info!("Searching for Git repository above {:?}", path);
let repo = match git2::Repository::discover(&path) {
Ok(r) => r,
Err(e) => {
error!("Error discovering Git repositories: {:?}", e);
return Err(path);
}
};
if let Some(workdir) = repo.workdir() {
let workdir = workdir.to_path_buf();
let contents = Mutex::new(GitContents::Before { repo });
Ok(Self { contents, workdir, original_path: path, extra_paths: Vec::new() })
}
else {
warn!("Repository has no workdir?");
Err(path)
}
}
}
impl GitContents {
/// Assumes that the repository hasn’t been queried, and extracts it
/// (consuming the value) if it has. This is needed because the entire
/// enum variant gets replaced when a repo is queried (see above).
fn inner_repo(self) -> git2::Repository {
if let Self::Before { repo } = self {
repo
}
else {
unreachable!("Tried to extract a non-Repository")
}
}
}
/// Iterates through a repository’s statuses, consuming it and returning the
/// mapping of files to their Git status.
/// We will have already used the working directory at this point, so it gets
/// passed in rather than deriving it from the `Repository` again.
fn repo_to_statuses(repo: &git2::Repository, workdir: &Path) -> Git {
let mut statuses = Vec::new();
info!("Getting Git statuses for repo with workdir {:?}", workdir);
match repo.statuses(None) {
Ok(es) => {
for e in es.iter() {
#[cfg(target_family = "unix")]
let path = workdir.join(Path::new(OsStr::from_bytes(e.path_bytes())));
// TODO: handle non Unix systems better:
// https://github.com/ogham/exa/issues/698
#[cfg(not(target_family = "unix"))]
let path = workdir.join(Path::new(e.path().unwrap()));
let elem = (path, e.status());
statuses.push(elem);
}
}
Err(e) => {
error!("Error looking up Git statuses: {:?}", e);
}
}
Git { statuses }
}
// The `repo.statuses` call above takes a long time. exa debug output:
//
// 20.311276 INFO:exa::fs::feature::git: Getting Git statuses for repo with workdir "/vagrant/"
// 20.799610 DEBUG:exa::output::table: Getting Git status for file "./Cargo.toml"
//
// Even inserting another logging line immediately afterwards doesn’t make it
// look any faster.
/// Container of Git statuses for all the files in this folder’s Git repository.
struct Git {
statuses: Vec<(PathBuf, git2::Status)>,
}
impl Git {
/// Get either the file or directory status for the given path.
/// “Prefix lookup” means that it should report an aggregate status of all
/// paths starting with the given prefix (in other words, a directory).
fn status(&self, index: &Path, prefix_lookup: bool) -> f::Git {
if prefix_lookup { self.dir_status(index) }
else { self.file_status(index) }
}
/// Get the user-facing status of a file.
/// We check the statuses directly applying to a file, and for the ignored
/// status we check if any of its parents directories is ignored by git.
fn file_status(&self, file: &Path) -> f::Git {
let path = reorient(file);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0 == path
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
/// Get the combined, user-facing status of a directory.
/// Statuses are aggregating (for example, a directory is considered
/// modified if any file under it has the status modified), except for
/// ignored status which applies to files under (for example, a directory
/// is considered ignored if one of its parent directories is ignored).
fn dir_status(&self, dir: &Path | t {
let path = reorient(dir);
let s = self.statuses.iter()
.filter(|p| if p.1 == git2::Status::IGNORED {
path.starts_with(&p.0)
} else {
p.0.starts_with(&path)
})
.fold(git2::Status::empty(), |a, b| a | b.1);
let staged = index_status(s);
let unstaged = working_tree_status(s);
f::Git { staged, unstaged }
}
}
/// Converts a path to an absolute path based on the current directory.
/// Paths need to be absolute for them to be compared properly, otherwise
/// you’d ask a repo about “./README.md” but it only knows about
/// “/vagrant/README.md”, prefixed by the workdir.
#[cfg(unix)]
fn reorient(path: &Path) -> PathBuf {
use std::env::current_dir;
// TODO: I’m not 100% on this func tbh
let path = match current_dir() {
Err(_) => Path::new(".").join(&path),
Ok(dir) => dir.join(&path),
};
path.canonicalize().unwrap_or(path)
}
#[cfg(windows)]
fn reorient(path: &Path) -> PathBuf {
let unc_path = path.canonicalize().unwrap();
// On Windows UNC path is returned. We need to strip the prefix for it to work.
let normal_path = unc_path.as_os_str().to_str().unwrap().trim_left_matches("\\\\?\\");
return PathBuf::from(normal_path);
}
/// The character to display if the file has been modified, but not staged.
fn working_tree_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::WT_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::WT_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::WT_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::WT_RENAMED) => f::GitStatus::Renamed,
s if s.contains(git2::Status::WT_TYPECHANGE) => f::GitStatus::TypeChange,
s if s.contains(git2::Status::IGNORED) => f::GitStatus::Ignored,
s if s.contains(git2::Status::CONFLICTED) => f::GitStatus::Conflicted,
_ => f::GitStatus::NotModified,
}
}
/// The character to display if the file has been modified and the change
/// has been staged.
fn index_status(status: git2::Status) -> f::GitStatus {
match status {
s if s.contains(git2::Status::INDEX_NEW) => f::GitStatus::New,
s if s.contains(git2::Status::INDEX_MODIFIED) => f::GitStatus::Modified,
s if s.contains(git2::Status::INDEX_DELETED) => f::GitStatus::Deleted,
s if s.contains(git2::Status::INDEX_RENAMED) => f::GitStatus::Renamed,
s if s.contains(git2::Status::INDEX_TYPECHANGE) => f::GitStatus::TypeChange,
_ => f::GitStatus::NotModified,
}
}
| ) -> f::Gi | identifier_name |
split.go | package critbit
import (
"fmt"
"sync"
)
// Split splits a tree into two trees, each having one half of the key-value pairs.
// If there is an odd number of keys, the right tree (the second returned tree)
// will have the extra key-value pair.
func (tree *Critbit) Split() (*Critbit, *Critbit) {
// Empty, or other trivial cases?
switch tree.numExternalRefs {
case 0:
return tree, New(0)
case 1:
return tree, New(0)
case 2:
return tree.splitTwoExternalRefs()
}
leftNumKeys := tree.numExternalRefs / 2
return tree.SplitAt(leftNumKeys)
}
func (tree *Critbit) splitTwoExternalRefs() (*Critbit, *Critbit) {
rootNode := &tree.internalNodes[tree.rootItem]
left := New(1)
leftRef := &tree.externalRefs[rootNode.child[0]]
leftRefNum, err := tree.addExternalRef(leftRef.key, leftRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
left.rootItem = leftRefNum
right := New(1)
rightRef := &tree.externalRefs[rootNode.child[1]]
rightRefNum, err := tree.addExternalRef(rightRef.key, rightRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
right.rootItem = rightRefNum
return left, right
}
// Split splits a tree into two arbitrarily sized trees. The leftNumKeys
// arguments indicates how many treees the left tree (the first returned tree)
// should have. The right tree (the second returned tree) will have the rest.
func (tree *Critbit) SplitAt(leftNumKeys int) (*Critbit, *Critbit) {
leftItemChan := make(chan *splitItem)
rightItemChan := make(chan *splitItem)
rightNumKeys := tree.numExternalRefs - leftNumKeys
if rightNumKeys < 0 {
rightNumKeys = 0
}
leftTree := New(leftNumKeys)
rightTree := New(rightNumKeys)
go tree.splitWalkTree(leftNumKeys, leftItemChan, rightItemChan)
var wg sync.WaitGroup
wg.Add(2)
go createLeftSplit(&wg, leftTree, leftItemChan)
go createRightSplit(&wg, rightTree, rightItemChan)
wg.Wait()
return leftTree, rightTree
}
func (tree *Critbit) splitWalkTree(leftNumKeys int,
leftItemChan chan *splitItem, rightItemChan chan *splitItem) {
defer close(leftItemChan)
defer close(rightItemChan)
state := &splitWalkerState{
// It's impossible to approximate the longest path in the tree,
// but we can use the # of external refs as a pseuco max
path: make([]*splitItem, 0, tree.numExternalRefs),
numLeftKeysRemaining: leftNumKeys,
channels: make([]chan *splitItem, 2),
feedingRight: leftNumKeys == 0,
}
state.channels[0] = leftItemChan
state.channels[1] = rightItemChan
tree.splitWalkTreeRecurse(state)
}
type splitWalkerState struct {
visitedRoot bool
path []*splitItem
numLeftKeysRemaining int
channels []chan *splitItem
feedingRight bool
channel chan *splitItem
}
func (tree *Critbit) splitWalkTreeRecurse(state *splitWalkerState) {
sendPopup := true
if state.feedingRight {
state.channel = state.channels[1]
} else {
state.channel = state.channels[0]
}
// Just started?
if !state.visitedRoot {
state.path = append(state.path, &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: tree.rootItem,
offset: tree.internalNodes[tree.rootItem].offset,
bit: tree.internalNodes[tree.rootItem].bit,
})
state.visitedRoot = true
}
item := state.path[len(state.path)-1]
state.channel <- item
switch item.itemType {
case kChildIntNode:
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 0))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 1))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
case kChildExtRef:
if !state.feedingRight {
state.numLeftKeysRemaining--
if state.numLeftKeysRemaining == 0 {
state.feedingRight = true
state.channel = state.channels[1]
// Need to feed 'path' up to, but not including, the ext ref to the right tree,
// so popups make sense.
// But we need to make copies of each splitItem, as the leftTree will need
// its own copy
for _, pathItem := range state.path[0 : len(state.path)-1] {
clonedItem := pathItem.Clone()
state.channel <- clonedItem
}
sendPopup = false
}
}
}
// The left reader gets popups only until the # keys hasn't been reached;
if sendPopup && (state.feedingRight || state.numLeftKeysRemaining > 0) {
state.channel <- &popupItem
}
}
func (tree *Critbit) createSplitItemFromNodeChild(nodeNum uint32, childDirection byte) *splitItem {
node := &tree.internalNodes[nodeNum]
itemType := node.getChildType(childDirection)
switch itemType {
case kChildIntNode:
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: node.child[childDirection],
direction: childDirection,
offset: node.offset,
bit: node.bit,
}
case kChildExtRef:
itemID := node.child[childDirection]
key := tree.externalRefs[itemID].key
value := tree.externalRefs[itemID].value
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildExtRef,
itemID: itemID,
direction: childDirection,
key: key,
value: value,
}
default:
panic(fmt.Sprintf("Node %d has unexpected child type %d in direction %d",
nodeNum, node.getChildType(childDirection), childDirection))
}
}
const (
kSplitItemTreeData = 1
kSplitItemPopUp = 2
)
type splitItem struct {
metaType int
newTreeID uint32
itemType uint8
itemID uint32
direction byte
// If internalNode
offset uint16
bit uint8
// If externalRef
key string
value interface{}
}
// Clones, but clears newTreeID
func (item *splitItem) Clone() *splitItem {
return &splitItem{
metaType: item.metaType,
itemType: item.itemType,
itemID: item.itemID,
direction: item.direction,
offset: item.offset,
bit: item.bit,
key: item.key,
value: item.value,
}
}
var popupItem splitItem = splitItem{metaType: kSplitItemPopUp}
func createLeftSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("left", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(1)
tree.postSplitZipSide(1)
}
func createRightSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("right", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(0)
tree.postSplitZipSide(0)
}
func (tree *Critbit) populateFromSplitChannel(side string, itemChan chan *splitItem) []*splitItem {
var path []*splitItem
for item := range itemChan {
if item.metaType == kSplitItemPopUp {
path = path[:len(path)-1]
continue
}
switch item.itemType {
case kChildIntNode:
nodeNum, node := tree.addInternalNode()
item.newTreeID = nodeNum
node.offset = item.offset
node.bit = item.bit
case kChildExtRef:
refNum, err := tree.addExternalRef(item.key, item.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
item.newTreeID = refNum
}
if len(path) == 0 {
if item.itemType != kChildIntNode {
panic(fmt.Sprintf("(%s) First node has type %d", side, item.itemType))
}
tree.rootItem = item.newTreeID
} else {
parentNode := &tree.internalNodes[path[len(path)-1].newTreeID]
parentNode.setChild(item.direction, item.newTreeID, item.itemType)
}
path = append(path, item)
}
return path
}
func (tree *Critbit) | (direction byte) {
// Walk down from the root, eliding the root as necessary.
if tree.rootItemType() == kChildIntNode {
rootNode := &tree.internalNodes[tree.rootItem]
for rootNode.getChildType(direction) == kChildNil {
prevRootItem := tree.rootItem
tree.rootItem = tree.internalNodes[prevRootItem].child[1-direction]
tree.deleteInternalNode(prevRootItem)
rootNode = &tree.internalNodes[tree.rootItem]
}
}
}
func (tree *Critbit) postSplitZipSide(direction byte) {
// Now that we know we don't need to elide the root, walk down
// from the root, towards the right, eliding as necessary
if tree.rootItemType() != kChildIntNode {
return
}
var prevNodeNum uint32 = kNilNode
var prevNode *internalNode
var nodeNum uint32 = tree.rootItem
var node *internalNode
pathNodeNums := make([]uint32, 0)
for keepGoing := true; keepGoing; {
node = &tree.internalNodes[nodeNum]
// Do something based on the right child type
switch node.getChildType(direction) {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
pathNodeNums = append(pathNodeNums, nodeNum)
prevNodeNum = nodeNum
prevNode = node
nodeNum = node.child[direction]
continue
case kChildNil:
lchildType := node.getChildType(1 - direction)
if lchildType == kChildNil {
// The node is elided, but really the prevNode needs to be elided as this node
// is _completely unnecessary
tree.deleteInternalNode(nodeNum)
prevNode.setChild(direction, nodeNum, kChildNil)
// Go up
nodeNum = prevNodeNum
node = prevNode
if len(pathNodeNums) > 1 {
prevNodeNum = pathNodeNums[len(pathNodeNums)-2]
prevNode = &tree.internalNodes[prevNodeNum]
continue
} else {
tree.rootItem = prevNode.child[1-direction]
tree.deleteInternalNode(tree.rootItem)
keepGoing = false
break
}
}
lchildID := node.child[1-direction]
prevNode.setChild(direction, lchildID, lchildType)
tree.deleteInternalNode(nodeNum)
// Do something based on the left child type
switch lchildType {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
// prevNode stays, but node changes because we just elided ourselves!
nodeNum = lchildID
continue
}
}
}
}
| postSplitElideRootIfNeeded | identifier_name |
split.go | package critbit
import (
"fmt"
"sync"
)
// Split splits a tree into two trees, each having one half of the key-value pairs.
// If there is an odd number of keys, the right tree (the second returned tree)
// will have the extra key-value pair.
func (tree *Critbit) Split() (*Critbit, *Critbit) {
// Empty, or other trivial cases?
switch tree.numExternalRefs {
case 0:
return tree, New(0)
case 1:
return tree, New(0)
case 2:
return tree.splitTwoExternalRefs()
}
leftNumKeys := tree.numExternalRefs / 2
return tree.SplitAt(leftNumKeys)
}
func (tree *Critbit) splitTwoExternalRefs() (*Critbit, *Critbit) {
rootNode := &tree.internalNodes[tree.rootItem]
left := New(1)
leftRef := &tree.externalRefs[rootNode.child[0]]
leftRefNum, err := tree.addExternalRef(leftRef.key, leftRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
left.rootItem = leftRefNum
right := New(1)
rightRef := &tree.externalRefs[rootNode.child[1]]
rightRefNum, err := tree.addExternalRef(rightRef.key, rightRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
right.rootItem = rightRefNum
return left, right
}
// Split splits a tree into two arbitrarily sized trees. The leftNumKeys
// arguments indicates how many treees the left tree (the first returned tree)
// should have. The right tree (the second returned tree) will have the rest.
func (tree *Critbit) SplitAt(leftNumKeys int) (*Critbit, *Critbit) {
leftItemChan := make(chan *splitItem)
rightItemChan := make(chan *splitItem)
rightNumKeys := tree.numExternalRefs - leftNumKeys
if rightNumKeys < 0 {
rightNumKeys = 0
}
leftTree := New(leftNumKeys)
rightTree := New(rightNumKeys)
go tree.splitWalkTree(leftNumKeys, leftItemChan, rightItemChan)
var wg sync.WaitGroup
wg.Add(2)
go createLeftSplit(&wg, leftTree, leftItemChan)
go createRightSplit(&wg, rightTree, rightItemChan)
wg.Wait()
return leftTree, rightTree
}
func (tree *Critbit) splitWalkTree(leftNumKeys int,
leftItemChan chan *splitItem, rightItemChan chan *splitItem) {
defer close(leftItemChan)
defer close(rightItemChan)
state := &splitWalkerState{
// It's impossible to approximate the longest path in the tree,
// but we can use the # of external refs as a pseuco max
path: make([]*splitItem, 0, tree.numExternalRefs),
numLeftKeysRemaining: leftNumKeys,
channels: make([]chan *splitItem, 2),
feedingRight: leftNumKeys == 0,
}
state.channels[0] = leftItemChan
state.channels[1] = rightItemChan
tree.splitWalkTreeRecurse(state)
}
type splitWalkerState struct {
visitedRoot bool
path []*splitItem
numLeftKeysRemaining int
channels []chan *splitItem
feedingRight bool
channel chan *splitItem
}
func (tree *Critbit) splitWalkTreeRecurse(state *splitWalkerState) {
sendPopup := true
if state.feedingRight {
state.channel = state.channels[1]
} else {
state.channel = state.channels[0]
}
// Just started?
if !state.visitedRoot {
state.path = append(state.path, &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: tree.rootItem,
offset: tree.internalNodes[tree.rootItem].offset,
bit: tree.internalNodes[tree.rootItem].bit,
})
state.visitedRoot = true
}
item := state.path[len(state.path)-1]
state.channel <- item
switch item.itemType {
case kChildIntNode:
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 0))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 1))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
case kChildExtRef:
if !state.feedingRight {
state.numLeftKeysRemaining--
if state.numLeftKeysRemaining == 0 {
state.feedingRight = true
state.channel = state.channels[1]
// Need to feed 'path' up to, but not including, the ext ref to the right tree,
// so popups make sense.
// But we need to make copies of each splitItem, as the leftTree will need
// its own copy
for _, pathItem := range state.path[0 : len(state.path)-1] {
clonedItem := pathItem.Clone()
state.channel <- clonedItem
}
sendPopup = false
}
}
}
// The left reader gets popups only until the # keys hasn't been reached;
if sendPopup && (state.feedingRight || state.numLeftKeysRemaining > 0) {
state.channel <- &popupItem
}
}
func (tree *Critbit) createSplitItemFromNodeChild(nodeNum uint32, childDirection byte) *splitItem {
node := &tree.internalNodes[nodeNum]
itemType := node.getChildType(childDirection)
switch itemType {
case kChildIntNode:
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: node.child[childDirection],
direction: childDirection,
offset: node.offset,
bit: node.bit,
}
case kChildExtRef:
itemID := node.child[childDirection]
key := tree.externalRefs[itemID].key
value := tree.externalRefs[itemID].value
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildExtRef,
itemID: itemID,
direction: childDirection,
key: key,
value: value,
}
default:
panic(fmt.Sprintf("Node %d has unexpected child type %d in direction %d",
nodeNum, node.getChildType(childDirection), childDirection))
}
}
const (
kSplitItemTreeData = 1
kSplitItemPopUp = 2
)
type splitItem struct {
metaType int
newTreeID uint32
itemType uint8
itemID uint32
direction byte
// If internalNode
offset uint16
bit uint8
// If externalRef
key string
value interface{}
}
// Clones, but clears newTreeID
func (item *splitItem) Clone() *splitItem {
return &splitItem{
metaType: item.metaType,
itemType: item.itemType, | key: item.key,
value: item.value,
}
}
var popupItem splitItem = splitItem{metaType: kSplitItemPopUp}
func createLeftSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("left", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(1)
tree.postSplitZipSide(1)
}
func createRightSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("right", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(0)
tree.postSplitZipSide(0)
}
func (tree *Critbit) populateFromSplitChannel(side string, itemChan chan *splitItem) []*splitItem {
var path []*splitItem
for item := range itemChan {
if item.metaType == kSplitItemPopUp {
path = path[:len(path)-1]
continue
}
switch item.itemType {
case kChildIntNode:
nodeNum, node := tree.addInternalNode()
item.newTreeID = nodeNum
node.offset = item.offset
node.bit = item.bit
case kChildExtRef:
refNum, err := tree.addExternalRef(item.key, item.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
item.newTreeID = refNum
}
if len(path) == 0 {
if item.itemType != kChildIntNode {
panic(fmt.Sprintf("(%s) First node has type %d", side, item.itemType))
}
tree.rootItem = item.newTreeID
} else {
parentNode := &tree.internalNodes[path[len(path)-1].newTreeID]
parentNode.setChild(item.direction, item.newTreeID, item.itemType)
}
path = append(path, item)
}
return path
}
func (tree *Critbit) postSplitElideRootIfNeeded(direction byte) {
// Walk down from the root, eliding the root as necessary.
if tree.rootItemType() == kChildIntNode {
rootNode := &tree.internalNodes[tree.rootItem]
for rootNode.getChildType(direction) == kChildNil {
prevRootItem := tree.rootItem
tree.rootItem = tree.internalNodes[prevRootItem].child[1-direction]
tree.deleteInternalNode(prevRootItem)
rootNode = &tree.internalNodes[tree.rootItem]
}
}
}
func (tree *Critbit) postSplitZipSide(direction byte) {
// Now that we know we don't need to elide the root, walk down
// from the root, towards the right, eliding as necessary
if tree.rootItemType() != kChildIntNode {
return
}
var prevNodeNum uint32 = kNilNode
var prevNode *internalNode
var nodeNum uint32 = tree.rootItem
var node *internalNode
pathNodeNums := make([]uint32, 0)
for keepGoing := true; keepGoing; {
node = &tree.internalNodes[nodeNum]
// Do something based on the right child type
switch node.getChildType(direction) {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
pathNodeNums = append(pathNodeNums, nodeNum)
prevNodeNum = nodeNum
prevNode = node
nodeNum = node.child[direction]
continue
case kChildNil:
lchildType := node.getChildType(1 - direction)
if lchildType == kChildNil {
// The node is elided, but really the prevNode needs to be elided as this node
// is _completely unnecessary
tree.deleteInternalNode(nodeNum)
prevNode.setChild(direction, nodeNum, kChildNil)
// Go up
nodeNum = prevNodeNum
node = prevNode
if len(pathNodeNums) > 1 {
prevNodeNum = pathNodeNums[len(pathNodeNums)-2]
prevNode = &tree.internalNodes[prevNodeNum]
continue
} else {
tree.rootItem = prevNode.child[1-direction]
tree.deleteInternalNode(tree.rootItem)
keepGoing = false
break
}
}
lchildID := node.child[1-direction]
prevNode.setChild(direction, lchildID, lchildType)
tree.deleteInternalNode(nodeNum)
// Do something based on the left child type
switch lchildType {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
// prevNode stays, but node changes because we just elided ourselves!
nodeNum = lchildID
continue
}
}
}
} | itemID: item.itemID,
direction: item.direction,
offset: item.offset,
bit: item.bit, | random_line_split |
split.go | package critbit
import (
"fmt"
"sync"
)
// Split splits a tree into two trees, each having one half of the key-value pairs.
// If there is an odd number of keys, the right tree (the second returned tree)
// will have the extra key-value pair.
func (tree *Critbit) Split() (*Critbit, *Critbit) {
// Empty, or other trivial cases?
switch tree.numExternalRefs {
case 0:
return tree, New(0)
case 1:
return tree, New(0)
case 2:
return tree.splitTwoExternalRefs()
}
leftNumKeys := tree.numExternalRefs / 2
return tree.SplitAt(leftNumKeys)
}
func (tree *Critbit) splitTwoExternalRefs() (*Critbit, *Critbit) {
rootNode := &tree.internalNodes[tree.rootItem]
left := New(1)
leftRef := &tree.externalRefs[rootNode.child[0]]
leftRefNum, err := tree.addExternalRef(leftRef.key, leftRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
left.rootItem = leftRefNum
right := New(1)
rightRef := &tree.externalRefs[rootNode.child[1]]
rightRefNum, err := tree.addExternalRef(rightRef.key, rightRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
right.rootItem = rightRefNum
return left, right
}
// Split splits a tree into two arbitrarily sized trees. The leftNumKeys
// arguments indicates how many treees the left tree (the first returned tree)
// should have. The right tree (the second returned tree) will have the rest.
func (tree *Critbit) SplitAt(leftNumKeys int) (*Critbit, *Critbit) {
leftItemChan := make(chan *splitItem)
rightItemChan := make(chan *splitItem)
rightNumKeys := tree.numExternalRefs - leftNumKeys
if rightNumKeys < 0 {
rightNumKeys = 0
}
leftTree := New(leftNumKeys)
rightTree := New(rightNumKeys)
go tree.splitWalkTree(leftNumKeys, leftItemChan, rightItemChan)
var wg sync.WaitGroup
wg.Add(2)
go createLeftSplit(&wg, leftTree, leftItemChan)
go createRightSplit(&wg, rightTree, rightItemChan)
wg.Wait()
return leftTree, rightTree
}
func (tree *Critbit) splitWalkTree(leftNumKeys int,
leftItemChan chan *splitItem, rightItemChan chan *splitItem) {
defer close(leftItemChan)
defer close(rightItemChan)
state := &splitWalkerState{
// It's impossible to approximate the longest path in the tree,
// but we can use the # of external refs as a pseuco max
path: make([]*splitItem, 0, tree.numExternalRefs),
numLeftKeysRemaining: leftNumKeys,
channels: make([]chan *splitItem, 2),
feedingRight: leftNumKeys == 0,
}
state.channels[0] = leftItemChan
state.channels[1] = rightItemChan
tree.splitWalkTreeRecurse(state)
}
type splitWalkerState struct {
visitedRoot bool
path []*splitItem
numLeftKeysRemaining int
channels []chan *splitItem
feedingRight bool
channel chan *splitItem
}
func (tree *Critbit) splitWalkTreeRecurse(state *splitWalkerState) {
sendPopup := true
if state.feedingRight {
state.channel = state.channels[1]
} else {
state.channel = state.channels[0]
}
// Just started?
if !state.visitedRoot {
state.path = append(state.path, &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: tree.rootItem,
offset: tree.internalNodes[tree.rootItem].offset,
bit: tree.internalNodes[tree.rootItem].bit,
})
state.visitedRoot = true
}
item := state.path[len(state.path)-1]
state.channel <- item
switch item.itemType {
case kChildIntNode:
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 0))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 1))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
case kChildExtRef:
if !state.feedingRight {
state.numLeftKeysRemaining--
if state.numLeftKeysRemaining == 0 {
state.feedingRight = true
state.channel = state.channels[1]
// Need to feed 'path' up to, but not including, the ext ref to the right tree,
// so popups make sense.
// But we need to make copies of each splitItem, as the leftTree will need
// its own copy
for _, pathItem := range state.path[0 : len(state.path)-1] {
clonedItem := pathItem.Clone()
state.channel <- clonedItem
}
sendPopup = false
}
}
}
// The left reader gets popups only until the # keys hasn't been reached;
if sendPopup && (state.feedingRight || state.numLeftKeysRemaining > 0) {
state.channel <- &popupItem
}
}
func (tree *Critbit) createSplitItemFromNodeChild(nodeNum uint32, childDirection byte) *splitItem |
const (
kSplitItemTreeData = 1
kSplitItemPopUp = 2
)
type splitItem struct {
metaType int
newTreeID uint32
itemType uint8
itemID uint32
direction byte
// If internalNode
offset uint16
bit uint8
// If externalRef
key string
value interface{}
}
// Clones, but clears newTreeID
func (item *splitItem) Clone() *splitItem {
return &splitItem{
metaType: item.metaType,
itemType: item.itemType,
itemID: item.itemID,
direction: item.direction,
offset: item.offset,
bit: item.bit,
key: item.key,
value: item.value,
}
}
var popupItem splitItem = splitItem{metaType: kSplitItemPopUp}
func createLeftSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("left", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(1)
tree.postSplitZipSide(1)
}
func createRightSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("right", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(0)
tree.postSplitZipSide(0)
}
func (tree *Critbit) populateFromSplitChannel(side string, itemChan chan *splitItem) []*splitItem {
var path []*splitItem
for item := range itemChan {
if item.metaType == kSplitItemPopUp {
path = path[:len(path)-1]
continue
}
switch item.itemType {
case kChildIntNode:
nodeNum, node := tree.addInternalNode()
item.newTreeID = nodeNum
node.offset = item.offset
node.bit = item.bit
case kChildExtRef:
refNum, err := tree.addExternalRef(item.key, item.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
item.newTreeID = refNum
}
if len(path) == 0 {
if item.itemType != kChildIntNode {
panic(fmt.Sprintf("(%s) First node has type %d", side, item.itemType))
}
tree.rootItem = item.newTreeID
} else {
parentNode := &tree.internalNodes[path[len(path)-1].newTreeID]
parentNode.setChild(item.direction, item.newTreeID, item.itemType)
}
path = append(path, item)
}
return path
}
func (tree *Critbit) postSplitElideRootIfNeeded(direction byte) {
// Walk down from the root, eliding the root as necessary.
if tree.rootItemType() == kChildIntNode {
rootNode := &tree.internalNodes[tree.rootItem]
for rootNode.getChildType(direction) == kChildNil {
prevRootItem := tree.rootItem
tree.rootItem = tree.internalNodes[prevRootItem].child[1-direction]
tree.deleteInternalNode(prevRootItem)
rootNode = &tree.internalNodes[tree.rootItem]
}
}
}
func (tree *Critbit) postSplitZipSide(direction byte) {
// Now that we know we don't need to elide the root, walk down
// from the root, towards the right, eliding as necessary
if tree.rootItemType() != kChildIntNode {
return
}
var prevNodeNum uint32 = kNilNode
var prevNode *internalNode
var nodeNum uint32 = tree.rootItem
var node *internalNode
pathNodeNums := make([]uint32, 0)
for keepGoing := true; keepGoing; {
node = &tree.internalNodes[nodeNum]
// Do something based on the right child type
switch node.getChildType(direction) {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
pathNodeNums = append(pathNodeNums, nodeNum)
prevNodeNum = nodeNum
prevNode = node
nodeNum = node.child[direction]
continue
case kChildNil:
lchildType := node.getChildType(1 - direction)
if lchildType == kChildNil {
// The node is elided, but really the prevNode needs to be elided as this node
// is _completely unnecessary
tree.deleteInternalNode(nodeNum)
prevNode.setChild(direction, nodeNum, kChildNil)
// Go up
nodeNum = prevNodeNum
node = prevNode
if len(pathNodeNums) > 1 {
prevNodeNum = pathNodeNums[len(pathNodeNums)-2]
prevNode = &tree.internalNodes[prevNodeNum]
continue
} else {
tree.rootItem = prevNode.child[1-direction]
tree.deleteInternalNode(tree.rootItem)
keepGoing = false
break
}
}
lchildID := node.child[1-direction]
prevNode.setChild(direction, lchildID, lchildType)
tree.deleteInternalNode(nodeNum)
// Do something based on the left child type
switch lchildType {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
// prevNode stays, but node changes because we just elided ourselves!
nodeNum = lchildID
continue
}
}
}
}
| {
node := &tree.internalNodes[nodeNum]
itemType := node.getChildType(childDirection)
switch itemType {
case kChildIntNode:
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: node.child[childDirection],
direction: childDirection,
offset: node.offset,
bit: node.bit,
}
case kChildExtRef:
itemID := node.child[childDirection]
key := tree.externalRefs[itemID].key
value := tree.externalRefs[itemID].value
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildExtRef,
itemID: itemID,
direction: childDirection,
key: key,
value: value,
}
default:
panic(fmt.Sprintf("Node %d has unexpected child type %d in direction %d",
nodeNum, node.getChildType(childDirection), childDirection))
}
} | identifier_body |
split.go | package critbit
import (
"fmt"
"sync"
)
// Split splits a tree into two trees, each having one half of the key-value pairs.
// If there is an odd number of keys, the right tree (the second returned tree)
// will have the extra key-value pair.
func (tree *Critbit) Split() (*Critbit, *Critbit) {
// Empty, or other trivial cases?
switch tree.numExternalRefs {
case 0:
return tree, New(0)
case 1:
return tree, New(0)
case 2:
return tree.splitTwoExternalRefs()
}
leftNumKeys := tree.numExternalRefs / 2
return tree.SplitAt(leftNumKeys)
}
func (tree *Critbit) splitTwoExternalRefs() (*Critbit, *Critbit) {
rootNode := &tree.internalNodes[tree.rootItem]
left := New(1)
leftRef := &tree.externalRefs[rootNode.child[0]]
leftRefNum, err := tree.addExternalRef(leftRef.key, leftRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
left.rootItem = leftRefNum
right := New(1)
rightRef := &tree.externalRefs[rootNode.child[1]]
rightRefNum, err := tree.addExternalRef(rightRef.key, rightRef.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
right.rootItem = rightRefNum
return left, right
}
// Split splits a tree into two arbitrarily sized trees. The leftNumKeys
// arguments indicates how many treees the left tree (the first returned tree)
// should have. The right tree (the second returned tree) will have the rest.
func (tree *Critbit) SplitAt(leftNumKeys int) (*Critbit, *Critbit) {
leftItemChan := make(chan *splitItem)
rightItemChan := make(chan *splitItem)
rightNumKeys := tree.numExternalRefs - leftNumKeys
if rightNumKeys < 0 {
rightNumKeys = 0
}
leftTree := New(leftNumKeys)
rightTree := New(rightNumKeys)
go tree.splitWalkTree(leftNumKeys, leftItemChan, rightItemChan)
var wg sync.WaitGroup
wg.Add(2)
go createLeftSplit(&wg, leftTree, leftItemChan)
go createRightSplit(&wg, rightTree, rightItemChan)
wg.Wait()
return leftTree, rightTree
}
func (tree *Critbit) splitWalkTree(leftNumKeys int,
leftItemChan chan *splitItem, rightItemChan chan *splitItem) {
defer close(leftItemChan)
defer close(rightItemChan)
state := &splitWalkerState{
// It's impossible to approximate the longest path in the tree,
// but we can use the # of external refs as a pseuco max
path: make([]*splitItem, 0, tree.numExternalRefs),
numLeftKeysRemaining: leftNumKeys,
channels: make([]chan *splitItem, 2),
feedingRight: leftNumKeys == 0,
}
state.channels[0] = leftItemChan
state.channels[1] = rightItemChan
tree.splitWalkTreeRecurse(state)
}
type splitWalkerState struct {
visitedRoot bool
path []*splitItem
numLeftKeysRemaining int
channels []chan *splitItem
feedingRight bool
channel chan *splitItem
}
func (tree *Critbit) splitWalkTreeRecurse(state *splitWalkerState) {
sendPopup := true
if state.feedingRight {
state.channel = state.channels[1]
} else {
state.channel = state.channels[0]
}
// Just started?
if !state.visitedRoot {
state.path = append(state.path, &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: tree.rootItem,
offset: tree.internalNodes[tree.rootItem].offset,
bit: tree.internalNodes[tree.rootItem].bit,
})
state.visitedRoot = true
}
item := state.path[len(state.path)-1]
state.channel <- item
switch item.itemType {
case kChildIntNode:
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 0))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
state.path = append(state.path, tree.createSplitItemFromNodeChild(item.itemID, 1))
tree.splitWalkTreeRecurse(state)
state.path = state.path[:len(state.path)-1]
case kChildExtRef:
if !state.feedingRight {
state.numLeftKeysRemaining--
if state.numLeftKeysRemaining == 0 {
state.feedingRight = true
state.channel = state.channels[1]
// Need to feed 'path' up to, but not including, the ext ref to the right tree,
// so popups make sense.
// But we need to make copies of each splitItem, as the leftTree will need
// its own copy
for _, pathItem := range state.path[0 : len(state.path)-1] {
clonedItem := pathItem.Clone()
state.channel <- clonedItem
}
sendPopup = false
}
}
}
// The left reader gets popups only until the # keys hasn't been reached;
if sendPopup && (state.feedingRight || state.numLeftKeysRemaining > 0) {
state.channel <- &popupItem
}
}
func (tree *Critbit) createSplitItemFromNodeChild(nodeNum uint32, childDirection byte) *splitItem {
node := &tree.internalNodes[nodeNum]
itemType := node.getChildType(childDirection)
switch itemType {
case kChildIntNode:
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildIntNode,
itemID: node.child[childDirection],
direction: childDirection,
offset: node.offset,
bit: node.bit,
}
case kChildExtRef:
itemID := node.child[childDirection]
key := tree.externalRefs[itemID].key
value := tree.externalRefs[itemID].value
return &splitItem{
metaType: kSplitItemTreeData,
itemType: kChildExtRef,
itemID: itemID,
direction: childDirection,
key: key,
value: value,
}
default:
panic(fmt.Sprintf("Node %d has unexpected child type %d in direction %d",
nodeNum, node.getChildType(childDirection), childDirection))
}
}
const (
kSplitItemTreeData = 1
kSplitItemPopUp = 2
)
type splitItem struct {
metaType int
newTreeID uint32
itemType uint8
itemID uint32
direction byte
// If internalNode
offset uint16
bit uint8
// If externalRef
key string
value interface{}
}
// Clones, but clears newTreeID
func (item *splitItem) Clone() *splitItem {
return &splitItem{
metaType: item.metaType,
itemType: item.itemType,
itemID: item.itemID,
direction: item.direction,
offset: item.offset,
bit: item.bit,
key: item.key,
value: item.value,
}
}
var popupItem splitItem = splitItem{metaType: kSplitItemPopUp}
func createLeftSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("left", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(1)
tree.postSplitZipSide(1)
}
func createRightSplit(wg *sync.WaitGroup, tree *Critbit, itemChan chan *splitItem) {
defer wg.Done()
// Populate the tree
_ = tree.populateFromSplitChannel("right", itemChan)
// Elide the root and sides
tree.postSplitElideRootIfNeeded(0)
tree.postSplitZipSide(0)
}
func (tree *Critbit) populateFromSplitChannel(side string, itemChan chan *splitItem) []*splitItem {
var path []*splitItem
for item := range itemChan {
if item.metaType == kSplitItemPopUp |
switch item.itemType {
case kChildIntNode:
nodeNum, node := tree.addInternalNode()
item.newTreeID = nodeNum
node.offset = item.offset
node.bit = item.bit
case kChildExtRef:
refNum, err := tree.addExternalRef(item.key, item.value)
// An error should not happen because of the size of the tree
if err != nil {
panic(err.Error())
}
item.newTreeID = refNum
}
if len(path) == 0 {
if item.itemType != kChildIntNode {
panic(fmt.Sprintf("(%s) First node has type %d", side, item.itemType))
}
tree.rootItem = item.newTreeID
} else {
parentNode := &tree.internalNodes[path[len(path)-1].newTreeID]
parentNode.setChild(item.direction, item.newTreeID, item.itemType)
}
path = append(path, item)
}
return path
}
func (tree *Critbit) postSplitElideRootIfNeeded(direction byte) {
// Walk down from the root, eliding the root as necessary.
if tree.rootItemType() == kChildIntNode {
rootNode := &tree.internalNodes[tree.rootItem]
for rootNode.getChildType(direction) == kChildNil {
prevRootItem := tree.rootItem
tree.rootItem = tree.internalNodes[prevRootItem].child[1-direction]
tree.deleteInternalNode(prevRootItem)
rootNode = &tree.internalNodes[tree.rootItem]
}
}
}
func (tree *Critbit) postSplitZipSide(direction byte) {
// Now that we know we don't need to elide the root, walk down
// from the root, towards the right, eliding as necessary
if tree.rootItemType() != kChildIntNode {
return
}
var prevNodeNum uint32 = kNilNode
var prevNode *internalNode
var nodeNum uint32 = tree.rootItem
var node *internalNode
pathNodeNums := make([]uint32, 0)
for keepGoing := true; keepGoing; {
node = &tree.internalNodes[nodeNum]
// Do something based on the right child type
switch node.getChildType(direction) {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
pathNodeNums = append(pathNodeNums, nodeNum)
prevNodeNum = nodeNum
prevNode = node
nodeNum = node.child[direction]
continue
case kChildNil:
lchildType := node.getChildType(1 - direction)
if lchildType == kChildNil {
// The node is elided, but really the prevNode needs to be elided as this node
// is _completely unnecessary
tree.deleteInternalNode(nodeNum)
prevNode.setChild(direction, nodeNum, kChildNil)
// Go up
nodeNum = prevNodeNum
node = prevNode
if len(pathNodeNums) > 1 {
prevNodeNum = pathNodeNums[len(pathNodeNums)-2]
prevNode = &tree.internalNodes[prevNodeNum]
continue
} else {
tree.rootItem = prevNode.child[1-direction]
tree.deleteInternalNode(tree.rootItem)
keepGoing = false
break
}
}
lchildID := node.child[1-direction]
prevNode.setChild(direction, lchildID, lchildType)
tree.deleteInternalNode(nodeNum)
// Do something based on the left child type
switch lchildType {
case kChildExtRef:
keepGoing = false
break
case kChildIntNode:
// prevNode stays, but node changes because we just elided ourselves!
nodeNum = lchildID
continue
}
}
}
}
| {
path = path[:len(path)-1]
continue
} | conditional_block |
usage.go | package handler
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
m3oauth "github.com/m3o/services/pkg/auth"
pb "github.com/m3o/services/usage/proto"
"github.com/micro/micro/v3/service"
"github.com/micro/micro/v3/service/auth"
"github.com/micro/micro/v3/service/config"
"github.com/micro/micro/v3/service/errors"
log "github.com/micro/micro/v3/service/logger"
"github.com/micro/micro/v3/service/store"
dbproto "github.com/micro/services/db/proto"
)
const (
prefixCounter = "usage-service/counter"
prefixUsageByCustomer = "usageByCustomer" // customer ID / date
counterTTL = 48 * time.Hour
)
type counter struct {
sync.RWMutex
redisClient *redis.Client
}
func (c *counter) incr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
incr := pipe.IncrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire the counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return incr.Result()
}
func (c *counter) decr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
decr := pipe.DecrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return decr.Result()
}
func (c *counter) read(ctx context.Context, userID, path string, t time.Time) (int64, error) {
t = t.UTC()
ret, err := c.redisClient.Get(ctx, fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)).Int64()
if err == redis.Nil {
return 0, nil
}
return ret, err
}
func (c *counter) deleteUser(ctx context.Context, userID string) error {
keys, err := c.redisClient.Keys(ctx, fmt.Sprintf("%s:%s:*", prefixCounter, userID)).Result()
if err != nil {
if err == redis.Nil {
return nil
}
return err
}
if len(keys) == 0 {
return nil
}
if err := c.redisClient.Del(ctx, keys...).Err(); err != nil && err != redis.Nil {
return err
}
return nil
}
type listEntry struct {
Service string
Count int64
}
func (c *counter) listForUser(userID string, t time.Time) ([]listEntry, error) {
ctx := context.Background()
keyPrefix := fmt.Sprintf("%s:%s:%s:", prefixCounter, userID, t.Format("20060102"))
sc := c.redisClient.Scan(ctx, 0, keyPrefix+"*", 0)
if err := sc.Err(); err != nil {
return nil, err
}
iter := sc.Iterator()
res := []listEntry{}
for {
if !iter.Next(ctx) {
break
}
key := iter.Val()
i, err := c.redisClient.Get(ctx, key).Int64()
if err != nil {
return nil, err
}
res = append(res, listEntry{
Service: strings.TrimPrefix(key, keyPrefix),
Count: i,
})
}
return res, iter.Err()
}
type UsageSvc struct {
c *counter
dbService dbproto.DbService
}
func NewHandler(svc *service.Service, dbService dbproto.DbService) *UsageSvc |
func (p *UsageSvc) Read(ctx context.Context, request *pb.ReadRequest, response *pb.ReadResponse) error {
acc, ok := auth.AccountFromContext(ctx)
if !ok {
return errors.Unauthorized("usage.Read", "Unauthorized")
}
if len(request.CustomerId) == 0 {
request.CustomerId = acc.ID
}
if acc.ID != request.CustomerId {
_, err := m3oauth.VerifyMicroAdmin(ctx, "usage.Read")
if err != nil {
return err
}
}
now := time.Now().UTC().Truncate(24 * time.Hour)
liveEntries, err := p.c.listForUser(request.CustomerId, now)
if err != nil {
log.Errorf("Error retrieving usage %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
response.Usage = map[string]*pb.UsageHistory{}
// add live data on top of historical
keyPrefix := fmt.Sprintf("%s/%s/", prefixUsageByCustomer, request.CustomerId)
recs, err := store.Read(keyPrefix, store.ReadPrefix())
if err != nil {
log.Errorf("Error querying historical data %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
addEntryToResponse := func(response *pb.ReadResponse, e listEntry, unixTime int64) {
// detailed view includes data for individual endpoints
if !request.Detail && strings.Contains(e.Service, "$") {
return
}
use := response.Usage[e.Service]
if use == nil {
use = &pb.UsageHistory{
ApiName: e.Service,
Records: []*pb.UsageRecord{},
}
}
use.Records = append(use.Records, &pb.UsageRecord{Date: unixTime, Requests: e.Count})
response.Usage[e.Service] = use
}
// add to slices
for _, rec := range recs {
date := strings.TrimPrefix(rec.Key, keyPrefix)
dateObj, err := time.Parse("20060102", date)
if err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
var de dateEntry
if err := json.Unmarshal(rec.Value, &de); err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
for _, e := range de.Entries {
addEntryToResponse(response, e, dateObj.Unix())
}
}
for _, e := range liveEntries {
addEntryToResponse(response, e, now.Unix())
}
// sort slices
for _, v := range response.Usage {
sort.Slice(v.Records, func(i, j int) bool {
if v.Records[i].Date == v.Records[j].Date {
return v.Records[i].Requests < v.Records[j].Requests
}
return v.Records[i].Date < v.Records[j].Date
})
}
// remove dupe
for k, v := range response.Usage {
lenRecs := len(v.Records)
if lenRecs < 2 {
continue
}
if v.Records[lenRecs-2].Date != v.Records[lenRecs-1].Date {
continue
}
response.Usage[k].Records = append(v.Records[:lenRecs-2], v.Records[lenRecs-1])
}
return nil
}
type dateEntry struct {
Entries []listEntry
}
func (p *UsageSvc) UsageCron() {
defer func() {
log.Infof("Usage sweep ended")
}()
log.Infof("Performing usage sweep")
// loop through counters and persist
ctx := context.Background()
sc := p.c.redisClient.Scan(ctx, 0, prefixCounter+":*", 0)
if err := sc.Err(); err != nil {
log.Errorf("Error running redis scan %s", err)
return
}
toPersist := map[string]map[string][]listEntry{} // userid->date->[]listEntry
it := sc.Iterator()
for {
if !it.Next(ctx) {
if err := it.Err(); err != nil {
log.Errorf("Error during iteration %s", err)
}
break
}
key := it.Val()
count, err := p.c.redisClient.Get(ctx, key).Int64()
if err != nil {
log.Errorf("Error retrieving value %s", err)
return
}
parts := strings.Split(strings.TrimPrefix(key, prefixCounter+":"), ":")
if len(parts) < 3 {
log.Errorf("Unexpected number of components in key %s", key)
continue
}
userID := parts[0]
date := parts[1]
service := parts[2]
dates := toPersist[userID]
if dates == nil {
dates = map[string][]listEntry{}
toPersist[userID] = dates
}
entries := dates[date]
if entries == nil {
entries = []listEntry{}
}
entries = append(entries, listEntry{
Service: service,
Count: count,
})
dates[date] = entries
}
for userID, v := range toPersist {
for date, entry := range v {
de := dateEntry{
Entries: entry,
}
b, err := json.Marshal(de)
if err != nil {
log.Errorf("Error marshalling entry %s", err)
return
}
store.Write(&store.Record{
Key: fmt.Sprintf("%s/%s/%s", prefixUsageByCustomer, userID, date),
Value: b,
})
}
}
}
func (p *UsageSvc) Sweep(ctx context.Context, request *pb.SweepRequest, response *pb.SweepResponse) error {
p.UsageCron()
return nil
}
func (p *UsageSvc) deleteUser(ctx context.Context, userID string) error {
if err := p.c.deleteUser(ctx, userID); err != nil {
return err
}
recs, err := store.Read(fmt.Sprintf("%s/%s/", prefixUsageByCustomer, userID), store.ReadPrefix())
if err != nil {
return err
}
for _, rec := range recs {
if err := store.Delete(rec.Key); err != nil {
return err
}
}
return nil
}
func (p *UsageSvc) DeleteCustomer(ctx context.Context, request *pb.DeleteCustomerRequest, response *pb.DeleteCustomerResponse) error {
if _, err := m3oauth.VerifyMicroAdmin(ctx, "usage.DeleteCustomer"); err != nil {
return err
}
if len(request.Id) == 0 {
return errors.BadRequest("usage.DeleteCustomer", "Error deleting customer")
}
if err := p.deleteUser(ctx, request.Id); err != nil {
log.Errorf("Error deleting customer %s", err)
return err
}
return nil
}
func (p *UsageSvc) SaveEvent(ctx context.Context, request *pb.SaveEventRequest, response *pb.SaveEventResponse) error {
if request.Event == nil {
return fmt.Errorf("event not provided")
}
if request.Event.Table == "" {
return fmt.Errorf("table not provided")
}
rec := request.Event.Record.AsMap()
if request.Event.Id == "" {
request.Event.Id = uuid.New().String()
}
rec["id"] = request.Event.Id
rec["createdAt"] = time.Now().Unix()
bs, err := json.Marshal(rec)
if err != nil {
return err
}
err = json.Unmarshal(bs, request.Event.Record)
if err != nil {
return err
}
_, err = p.dbService.Create(ctx, &dbproto.CreateRequest{
Table: request.Event.Table,
Record: request.Event.Record,
})
return err
}
func (p *UsageSvc) ListEvents(ctx context.Context, request *pb.ListEventsRequest, response *pb.ListEventsResponse) error {
if request.Table == "" {
return fmt.Errorf("no table provided")
}
resp, err := p.dbService.Read(ctx, &dbproto.ReadRequest{
Table: request.Table,
Query: "createdAt > 0",
OrderBy: "createdAt",
Order: "desc",
})
if err != nil {
return err
}
for _, v := range resp.Records {
response.Events = append(response.Events, &pb.Event{
Table: request.Table,
Record: v,
})
}
return nil
}
| {
redisConfig := struct {
Address string
User string
Password string
}{}
val, err := config.Get("micro.usage.redis")
if err != nil {
log.Fatalf("No redis config found %s", err)
}
if err := val.Scan(&redisConfig); err != nil {
log.Fatalf("Error parsing redis config %s", err)
}
if len(redisConfig.Password) == 0 || len(redisConfig.User) == 0 || len(redisConfig.Password) == 0 {
log.Fatalf("Missing redis config %s", err)
}
rc := redis.NewClient(&redis.Options{
Addr: redisConfig.Address,
Username: redisConfig.User,
Password: redisConfig.Password,
TLSConfig: &tls.Config{
InsecureSkipVerify: false,
},
})
p := &UsageSvc{
c: &counter{redisClient: rc},
dbService: dbService,
}
go p.consumeEvents()
return p
} | identifier_body |
usage.go | package handler
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
m3oauth "github.com/m3o/services/pkg/auth"
pb "github.com/m3o/services/usage/proto"
"github.com/micro/micro/v3/service"
"github.com/micro/micro/v3/service/auth"
"github.com/micro/micro/v3/service/config"
"github.com/micro/micro/v3/service/errors"
log "github.com/micro/micro/v3/service/logger"
"github.com/micro/micro/v3/service/store"
dbproto "github.com/micro/services/db/proto"
)
const (
prefixCounter = "usage-service/counter"
prefixUsageByCustomer = "usageByCustomer" // customer ID / date
counterTTL = 48 * time.Hour
)
type counter struct {
sync.RWMutex
redisClient *redis.Client
}
func (c *counter) incr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
incr := pipe.IncrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire the counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return incr.Result()
}
func (c *counter) decr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
decr := pipe.DecrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return decr.Result()
}
func (c *counter) read(ctx context.Context, userID, path string, t time.Time) (int64, error) {
t = t.UTC()
ret, err := c.redisClient.Get(ctx, fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)).Int64()
if err == redis.Nil {
return 0, nil
}
return ret, err
}
func (c *counter) deleteUser(ctx context.Context, userID string) error {
keys, err := c.redisClient.Keys(ctx, fmt.Sprintf("%s:%s:*", prefixCounter, userID)).Result()
if err != nil {
if err == redis.Nil {
return nil
}
return err
}
if len(keys) == 0 {
return nil
}
if err := c.redisClient.Del(ctx, keys...).Err(); err != nil && err != redis.Nil {
return err
}
return nil
}
type listEntry struct {
Service string
Count int64
}
func (c *counter) listForUser(userID string, t time.Time) ([]listEntry, error) {
ctx := context.Background()
keyPrefix := fmt.Sprintf("%s:%s:%s:", prefixCounter, userID, t.Format("20060102"))
sc := c.redisClient.Scan(ctx, 0, keyPrefix+"*", 0)
if err := sc.Err(); err != nil {
return nil, err
}
iter := sc.Iterator()
res := []listEntry{}
for {
if !iter.Next(ctx) {
break
}
key := iter.Val()
i, err := c.redisClient.Get(ctx, key).Int64()
if err != nil {
return nil, err
}
res = append(res, listEntry{
Service: strings.TrimPrefix(key, keyPrefix),
Count: i,
})
}
return res, iter.Err()
}
type UsageSvc struct {
c *counter
dbService dbproto.DbService
}
func NewHandler(svc *service.Service, dbService dbproto.DbService) *UsageSvc {
redisConfig := struct {
Address string
User string
Password string
}{}
val, err := config.Get("micro.usage.redis")
if err != nil {
log.Fatalf("No redis config found %s", err)
}
if err := val.Scan(&redisConfig); err != nil {
log.Fatalf("Error parsing redis config %s", err)
}
if len(redisConfig.Password) == 0 || len(redisConfig.User) == 0 || len(redisConfig.Password) == 0 {
log.Fatalf("Missing redis config %s", err)
}
rc := redis.NewClient(&redis.Options{
Addr: redisConfig.Address,
Username: redisConfig.User,
Password: redisConfig.Password,
TLSConfig: &tls.Config{
InsecureSkipVerify: false,
},
})
p := &UsageSvc{
c: &counter{redisClient: rc},
dbService: dbService,
}
go p.consumeEvents()
return p
}
func (p *UsageSvc) Read(ctx context.Context, request *pb.ReadRequest, response *pb.ReadResponse) error {
acc, ok := auth.AccountFromContext(ctx)
if !ok {
return errors.Unauthorized("usage.Read", "Unauthorized")
}
if len(request.CustomerId) == 0 {
request.CustomerId = acc.ID
}
if acc.ID != request.CustomerId {
_, err := m3oauth.VerifyMicroAdmin(ctx, "usage.Read")
if err != nil {
return err
}
}
now := time.Now().UTC().Truncate(24 * time.Hour)
liveEntries, err := p.c.listForUser(request.CustomerId, now)
if err != nil {
log.Errorf("Error retrieving usage %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
response.Usage = map[string]*pb.UsageHistory{}
// add live data on top of historical
keyPrefix := fmt.Sprintf("%s/%s/", prefixUsageByCustomer, request.CustomerId)
recs, err := store.Read(keyPrefix, store.ReadPrefix())
if err != nil {
log.Errorf("Error querying historical data %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
addEntryToResponse := func(response *pb.ReadResponse, e listEntry, unixTime int64) {
// detailed view includes data for individual endpoints
if !request.Detail && strings.Contains(e.Service, "$") {
return
}
use := response.Usage[e.Service]
if use == nil {
use = &pb.UsageHistory{
ApiName: e.Service,
Records: []*pb.UsageRecord{},
}
}
use.Records = append(use.Records, &pb.UsageRecord{Date: unixTime, Requests: e.Count})
response.Usage[e.Service] = use
}
// add to slices
for _, rec := range recs {
date := strings.TrimPrefix(rec.Key, keyPrefix)
dateObj, err := time.Parse("20060102", date)
if err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
var de dateEntry
if err := json.Unmarshal(rec.Value, &de); err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
for _, e := range de.Entries {
addEntryToResponse(response, e, dateObj.Unix())
}
}
for _, e := range liveEntries {
addEntryToResponse(response, e, now.Unix())
}
// sort slices
for _, v := range response.Usage {
sort.Slice(v.Records, func(i, j int) bool {
if v.Records[i].Date == v.Records[j].Date {
return v.Records[i].Requests < v.Records[j].Requests
}
return v.Records[i].Date < v.Records[j].Date
})
}
// remove dupe
for k, v := range response.Usage {
lenRecs := len(v.Records)
if lenRecs < 2 {
continue
}
if v.Records[lenRecs-2].Date != v.Records[lenRecs-1].Date {
continue
}
response.Usage[k].Records = append(v.Records[:lenRecs-2], v.Records[lenRecs-1])
}
return nil
}
type dateEntry struct {
Entries []listEntry
}
func (p *UsageSvc) UsageCron() {
defer func() {
log.Infof("Usage sweep ended")
}()
log.Infof("Performing usage sweep")
// loop through counters and persist
ctx := context.Background()
sc := p.c.redisClient.Scan(ctx, 0, prefixCounter+":*", 0)
if err := sc.Err(); err != nil {
log.Errorf("Error running redis scan %s", err)
return
}
toPersist := map[string]map[string][]listEntry{} // userid->date->[]listEntry
it := sc.Iterator()
for {
if !it.Next(ctx) {
if err := it.Err(); err != nil {
log.Errorf("Error during iteration %s", err)
}
break
}
key := it.Val()
count, err := p.c.redisClient.Get(ctx, key).Int64()
if err != nil {
log.Errorf("Error retrieving value %s", err)
return
}
parts := strings.Split(strings.TrimPrefix(key, prefixCounter+":"), ":")
if len(parts) < 3 {
log.Errorf("Unexpected number of components in key %s", key)
continue
}
userID := parts[0]
date := parts[1]
service := parts[2]
dates := toPersist[userID]
if dates == nil {
dates = map[string][]listEntry{}
toPersist[userID] = dates
}
entries := dates[date]
if entries == nil {
entries = []listEntry{}
}
entries = append(entries, listEntry{
Service: service,
Count: count,
})
dates[date] = entries
}
for userID, v := range toPersist {
for date, entry := range v {
de := dateEntry{
Entries: entry,
}
b, err := json.Marshal(de)
if err != nil {
log.Errorf("Error marshalling entry %s", err)
return
}
store.Write(&store.Record{
Key: fmt.Sprintf("%s/%s/%s", prefixUsageByCustomer, userID, date),
Value: b,
})
}
}
}
func (p *UsageSvc) Sweep(ctx context.Context, request *pb.SweepRequest, response *pb.SweepResponse) error {
p.UsageCron()
return nil
}
func (p *UsageSvc) deleteUser(ctx context.Context, userID string) error {
if err := p.c.deleteUser(ctx, userID); err != nil {
return err
}
recs, err := store.Read(fmt.Sprintf("%s/%s/", prefixUsageByCustomer, userID), store.ReadPrefix())
if err != nil {
return err
}
for _, rec := range recs {
if err := store.Delete(rec.Key); err != nil {
return err
}
}
return nil
}
func (p *UsageSvc) DeleteCustomer(ctx context.Context, request *pb.DeleteCustomerRequest, response *pb.DeleteCustomerResponse) error {
if _, err := m3oauth.VerifyMicroAdmin(ctx, "usage.DeleteCustomer"); err != nil {
return err
}
if len(request.Id) == 0 {
return errors.BadRequest("usage.DeleteCustomer", "Error deleting customer")
}
if err := p.deleteUser(ctx, request.Id); err != nil {
log.Errorf("Error deleting customer %s", err)
return err
}
return nil
}
func (p *UsageSvc) SaveEvent(ctx context.Context, request *pb.SaveEventRequest, response *pb.SaveEventResponse) error {
if request.Event == nil {
return fmt.Errorf("event not provided")
}
if request.Event.Table == "" {
return fmt.Errorf("table not provided")
}
rec := request.Event.Record.AsMap()
if request.Event.Id == "" {
request.Event.Id = uuid.New().String()
}
rec["id"] = request.Event.Id
rec["createdAt"] = time.Now().Unix()
bs, err := json.Marshal(rec)
if err != nil {
return err
}
err = json.Unmarshal(bs, request.Event.Record)
if err != nil |
_, err = p.dbService.Create(ctx, &dbproto.CreateRequest{
Table: request.Event.Table,
Record: request.Event.Record,
})
return err
}
func (p *UsageSvc) ListEvents(ctx context.Context, request *pb.ListEventsRequest, response *pb.ListEventsResponse) error {
if request.Table == "" {
return fmt.Errorf("no table provided")
}
resp, err := p.dbService.Read(ctx, &dbproto.ReadRequest{
Table: request.Table,
Query: "createdAt > 0",
OrderBy: "createdAt",
Order: "desc",
})
if err != nil {
return err
}
for _, v := range resp.Records {
response.Events = append(response.Events, &pb.Event{
Table: request.Table,
Record: v,
})
}
return nil
}
| {
return err
} | conditional_block |
usage.go | package handler
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
m3oauth "github.com/m3o/services/pkg/auth"
pb "github.com/m3o/services/usage/proto"
"github.com/micro/micro/v3/service"
"github.com/micro/micro/v3/service/auth"
"github.com/micro/micro/v3/service/config"
"github.com/micro/micro/v3/service/errors"
log "github.com/micro/micro/v3/service/logger"
"github.com/micro/micro/v3/service/store"
dbproto "github.com/micro/services/db/proto"
)
const (
prefixCounter = "usage-service/counter"
prefixUsageByCustomer = "usageByCustomer" // customer ID / date
counterTTL = 48 * time.Hour
)
type counter struct {
sync.RWMutex
redisClient *redis.Client
}
func (c *counter) incr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
incr := pipe.IncrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire the counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return incr.Result()
}
func (c *counter) decr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
decr := pipe.DecrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return decr.Result()
}
func (c *counter) read(ctx context.Context, userID, path string, t time.Time) (int64, error) {
t = t.UTC()
ret, err := c.redisClient.Get(ctx, fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)).Int64()
if err == redis.Nil {
return 0, nil
}
return ret, err
}
func (c *counter) deleteUser(ctx context.Context, userID string) error {
keys, err := c.redisClient.Keys(ctx, fmt.Sprintf("%s:%s:*", prefixCounter, userID)).Result() | return err
}
if len(keys) == 0 {
return nil
}
if err := c.redisClient.Del(ctx, keys...).Err(); err != nil && err != redis.Nil {
return err
}
return nil
}
type listEntry struct {
Service string
Count int64
}
func (c *counter) listForUser(userID string, t time.Time) ([]listEntry, error) {
ctx := context.Background()
keyPrefix := fmt.Sprintf("%s:%s:%s:", prefixCounter, userID, t.Format("20060102"))
sc := c.redisClient.Scan(ctx, 0, keyPrefix+"*", 0)
if err := sc.Err(); err != nil {
return nil, err
}
iter := sc.Iterator()
res := []listEntry{}
for {
if !iter.Next(ctx) {
break
}
key := iter.Val()
i, err := c.redisClient.Get(ctx, key).Int64()
if err != nil {
return nil, err
}
res = append(res, listEntry{
Service: strings.TrimPrefix(key, keyPrefix),
Count: i,
})
}
return res, iter.Err()
}
type UsageSvc struct {
c *counter
dbService dbproto.DbService
}
func NewHandler(svc *service.Service, dbService dbproto.DbService) *UsageSvc {
redisConfig := struct {
Address string
User string
Password string
}{}
val, err := config.Get("micro.usage.redis")
if err != nil {
log.Fatalf("No redis config found %s", err)
}
if err := val.Scan(&redisConfig); err != nil {
log.Fatalf("Error parsing redis config %s", err)
}
if len(redisConfig.Password) == 0 || len(redisConfig.User) == 0 || len(redisConfig.Password) == 0 {
log.Fatalf("Missing redis config %s", err)
}
rc := redis.NewClient(&redis.Options{
Addr: redisConfig.Address,
Username: redisConfig.User,
Password: redisConfig.Password,
TLSConfig: &tls.Config{
InsecureSkipVerify: false,
},
})
p := &UsageSvc{
c: &counter{redisClient: rc},
dbService: dbService,
}
go p.consumeEvents()
return p
}
func (p *UsageSvc) Read(ctx context.Context, request *pb.ReadRequest, response *pb.ReadResponse) error {
acc, ok := auth.AccountFromContext(ctx)
if !ok {
return errors.Unauthorized("usage.Read", "Unauthorized")
}
if len(request.CustomerId) == 0 {
request.CustomerId = acc.ID
}
if acc.ID != request.CustomerId {
_, err := m3oauth.VerifyMicroAdmin(ctx, "usage.Read")
if err != nil {
return err
}
}
now := time.Now().UTC().Truncate(24 * time.Hour)
liveEntries, err := p.c.listForUser(request.CustomerId, now)
if err != nil {
log.Errorf("Error retrieving usage %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
response.Usage = map[string]*pb.UsageHistory{}
// add live data on top of historical
keyPrefix := fmt.Sprintf("%s/%s/", prefixUsageByCustomer, request.CustomerId)
recs, err := store.Read(keyPrefix, store.ReadPrefix())
if err != nil {
log.Errorf("Error querying historical data %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
addEntryToResponse := func(response *pb.ReadResponse, e listEntry, unixTime int64) {
// detailed view includes data for individual endpoints
if !request.Detail && strings.Contains(e.Service, "$") {
return
}
use := response.Usage[e.Service]
if use == nil {
use = &pb.UsageHistory{
ApiName: e.Service,
Records: []*pb.UsageRecord{},
}
}
use.Records = append(use.Records, &pb.UsageRecord{Date: unixTime, Requests: e.Count})
response.Usage[e.Service] = use
}
// add to slices
for _, rec := range recs {
date := strings.TrimPrefix(rec.Key, keyPrefix)
dateObj, err := time.Parse("20060102", date)
if err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
var de dateEntry
if err := json.Unmarshal(rec.Value, &de); err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
for _, e := range de.Entries {
addEntryToResponse(response, e, dateObj.Unix())
}
}
for _, e := range liveEntries {
addEntryToResponse(response, e, now.Unix())
}
// sort slices
for _, v := range response.Usage {
sort.Slice(v.Records, func(i, j int) bool {
if v.Records[i].Date == v.Records[j].Date {
return v.Records[i].Requests < v.Records[j].Requests
}
return v.Records[i].Date < v.Records[j].Date
})
}
// remove dupe
for k, v := range response.Usage {
lenRecs := len(v.Records)
if lenRecs < 2 {
continue
}
if v.Records[lenRecs-2].Date != v.Records[lenRecs-1].Date {
continue
}
response.Usage[k].Records = append(v.Records[:lenRecs-2], v.Records[lenRecs-1])
}
return nil
}
type dateEntry struct {
Entries []listEntry
}
func (p *UsageSvc) UsageCron() {
defer func() {
log.Infof("Usage sweep ended")
}()
log.Infof("Performing usage sweep")
// loop through counters and persist
ctx := context.Background()
sc := p.c.redisClient.Scan(ctx, 0, prefixCounter+":*", 0)
if err := sc.Err(); err != nil {
log.Errorf("Error running redis scan %s", err)
return
}
toPersist := map[string]map[string][]listEntry{} // userid->date->[]listEntry
it := sc.Iterator()
for {
if !it.Next(ctx) {
if err := it.Err(); err != nil {
log.Errorf("Error during iteration %s", err)
}
break
}
key := it.Val()
count, err := p.c.redisClient.Get(ctx, key).Int64()
if err != nil {
log.Errorf("Error retrieving value %s", err)
return
}
parts := strings.Split(strings.TrimPrefix(key, prefixCounter+":"), ":")
if len(parts) < 3 {
log.Errorf("Unexpected number of components in key %s", key)
continue
}
userID := parts[0]
date := parts[1]
service := parts[2]
dates := toPersist[userID]
if dates == nil {
dates = map[string][]listEntry{}
toPersist[userID] = dates
}
entries := dates[date]
if entries == nil {
entries = []listEntry{}
}
entries = append(entries, listEntry{
Service: service,
Count: count,
})
dates[date] = entries
}
for userID, v := range toPersist {
for date, entry := range v {
de := dateEntry{
Entries: entry,
}
b, err := json.Marshal(de)
if err != nil {
log.Errorf("Error marshalling entry %s", err)
return
}
store.Write(&store.Record{
Key: fmt.Sprintf("%s/%s/%s", prefixUsageByCustomer, userID, date),
Value: b,
})
}
}
}
func (p *UsageSvc) Sweep(ctx context.Context, request *pb.SweepRequest, response *pb.SweepResponse) error {
p.UsageCron()
return nil
}
func (p *UsageSvc) deleteUser(ctx context.Context, userID string) error {
if err := p.c.deleteUser(ctx, userID); err != nil {
return err
}
recs, err := store.Read(fmt.Sprintf("%s/%s/", prefixUsageByCustomer, userID), store.ReadPrefix())
if err != nil {
return err
}
for _, rec := range recs {
if err := store.Delete(rec.Key); err != nil {
return err
}
}
return nil
}
func (p *UsageSvc) DeleteCustomer(ctx context.Context, request *pb.DeleteCustomerRequest, response *pb.DeleteCustomerResponse) error {
if _, err := m3oauth.VerifyMicroAdmin(ctx, "usage.DeleteCustomer"); err != nil {
return err
}
if len(request.Id) == 0 {
return errors.BadRequest("usage.DeleteCustomer", "Error deleting customer")
}
if err := p.deleteUser(ctx, request.Id); err != nil {
log.Errorf("Error deleting customer %s", err)
return err
}
return nil
}
func (p *UsageSvc) SaveEvent(ctx context.Context, request *pb.SaveEventRequest, response *pb.SaveEventResponse) error {
if request.Event == nil {
return fmt.Errorf("event not provided")
}
if request.Event.Table == "" {
return fmt.Errorf("table not provided")
}
rec := request.Event.Record.AsMap()
if request.Event.Id == "" {
request.Event.Id = uuid.New().String()
}
rec["id"] = request.Event.Id
rec["createdAt"] = time.Now().Unix()
bs, err := json.Marshal(rec)
if err != nil {
return err
}
err = json.Unmarshal(bs, request.Event.Record)
if err != nil {
return err
}
_, err = p.dbService.Create(ctx, &dbproto.CreateRequest{
Table: request.Event.Table,
Record: request.Event.Record,
})
return err
}
func (p *UsageSvc) ListEvents(ctx context.Context, request *pb.ListEventsRequest, response *pb.ListEventsResponse) error {
if request.Table == "" {
return fmt.Errorf("no table provided")
}
resp, err := p.dbService.Read(ctx, &dbproto.ReadRequest{
Table: request.Table,
Query: "createdAt > 0",
OrderBy: "createdAt",
Order: "desc",
})
if err != nil {
return err
}
for _, v := range resp.Records {
response.Events = append(response.Events, &pb.Event{
Table: request.Table,
Record: v,
})
}
return nil
} | if err != nil {
if err == redis.Nil {
return nil
} | random_line_split |
usage.go | package handler
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
"github.com/google/uuid"
m3oauth "github.com/m3o/services/pkg/auth"
pb "github.com/m3o/services/usage/proto"
"github.com/micro/micro/v3/service"
"github.com/micro/micro/v3/service/auth"
"github.com/micro/micro/v3/service/config"
"github.com/micro/micro/v3/service/errors"
log "github.com/micro/micro/v3/service/logger"
"github.com/micro/micro/v3/service/store"
dbproto "github.com/micro/services/db/proto"
)
const (
prefixCounter = "usage-service/counter"
prefixUsageByCustomer = "usageByCustomer" // customer ID / date
counterTTL = 48 * time.Hour
)
type counter struct {
sync.RWMutex
redisClient *redis.Client
}
func (c *counter) incr(ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
incr := pipe.IncrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire the counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return incr.Result()
}
func (c *counter) | (ctx context.Context, userID, path string, delta int64, t time.Time) (int64, error) {
t = t.UTC()
key := fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)
pipe := c.redisClient.TxPipeline()
decr := pipe.DecrBy(ctx, key, delta)
pipe.Expire(ctx, key, counterTTL) // make sure we expire counters
_, err := pipe.Exec(ctx)
if err != nil {
return 0, err
}
return decr.Result()
}
func (c *counter) read(ctx context.Context, userID, path string, t time.Time) (int64, error) {
t = t.UTC()
ret, err := c.redisClient.Get(ctx, fmt.Sprintf("%s:%s:%s:%s", prefixCounter, userID, t.Format("20060102"), path)).Int64()
if err == redis.Nil {
return 0, nil
}
return ret, err
}
func (c *counter) deleteUser(ctx context.Context, userID string) error {
keys, err := c.redisClient.Keys(ctx, fmt.Sprintf("%s:%s:*", prefixCounter, userID)).Result()
if err != nil {
if err == redis.Nil {
return nil
}
return err
}
if len(keys) == 0 {
return nil
}
if err := c.redisClient.Del(ctx, keys...).Err(); err != nil && err != redis.Nil {
return err
}
return nil
}
type listEntry struct {
Service string
Count int64
}
func (c *counter) listForUser(userID string, t time.Time) ([]listEntry, error) {
ctx := context.Background()
keyPrefix := fmt.Sprintf("%s:%s:%s:", prefixCounter, userID, t.Format("20060102"))
sc := c.redisClient.Scan(ctx, 0, keyPrefix+"*", 0)
if err := sc.Err(); err != nil {
return nil, err
}
iter := sc.Iterator()
res := []listEntry{}
for {
if !iter.Next(ctx) {
break
}
key := iter.Val()
i, err := c.redisClient.Get(ctx, key).Int64()
if err != nil {
return nil, err
}
res = append(res, listEntry{
Service: strings.TrimPrefix(key, keyPrefix),
Count: i,
})
}
return res, iter.Err()
}
type UsageSvc struct {
c *counter
dbService dbproto.DbService
}
func NewHandler(svc *service.Service, dbService dbproto.DbService) *UsageSvc {
redisConfig := struct {
Address string
User string
Password string
}{}
val, err := config.Get("micro.usage.redis")
if err != nil {
log.Fatalf("No redis config found %s", err)
}
if err := val.Scan(&redisConfig); err != nil {
log.Fatalf("Error parsing redis config %s", err)
}
if len(redisConfig.Password) == 0 || len(redisConfig.User) == 0 || len(redisConfig.Password) == 0 {
log.Fatalf("Missing redis config %s", err)
}
rc := redis.NewClient(&redis.Options{
Addr: redisConfig.Address,
Username: redisConfig.User,
Password: redisConfig.Password,
TLSConfig: &tls.Config{
InsecureSkipVerify: false,
},
})
p := &UsageSvc{
c: &counter{redisClient: rc},
dbService: dbService,
}
go p.consumeEvents()
return p
}
func (p *UsageSvc) Read(ctx context.Context, request *pb.ReadRequest, response *pb.ReadResponse) error {
acc, ok := auth.AccountFromContext(ctx)
if !ok {
return errors.Unauthorized("usage.Read", "Unauthorized")
}
if len(request.CustomerId) == 0 {
request.CustomerId = acc.ID
}
if acc.ID != request.CustomerId {
_, err := m3oauth.VerifyMicroAdmin(ctx, "usage.Read")
if err != nil {
return err
}
}
now := time.Now().UTC().Truncate(24 * time.Hour)
liveEntries, err := p.c.listForUser(request.CustomerId, now)
if err != nil {
log.Errorf("Error retrieving usage %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
response.Usage = map[string]*pb.UsageHistory{}
// add live data on top of historical
keyPrefix := fmt.Sprintf("%s/%s/", prefixUsageByCustomer, request.CustomerId)
recs, err := store.Read(keyPrefix, store.ReadPrefix())
if err != nil {
log.Errorf("Error querying historical data %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
addEntryToResponse := func(response *pb.ReadResponse, e listEntry, unixTime int64) {
// detailed view includes data for individual endpoints
if !request.Detail && strings.Contains(e.Service, "$") {
return
}
use := response.Usage[e.Service]
if use == nil {
use = &pb.UsageHistory{
ApiName: e.Service,
Records: []*pb.UsageRecord{},
}
}
use.Records = append(use.Records, &pb.UsageRecord{Date: unixTime, Requests: e.Count})
response.Usage[e.Service] = use
}
// add to slices
for _, rec := range recs {
date := strings.TrimPrefix(rec.Key, keyPrefix)
dateObj, err := time.Parse("20060102", date)
if err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
var de dateEntry
if err := json.Unmarshal(rec.Value, &de); err != nil {
log.Errorf("Error parsing date obj %s", err)
return errors.InternalServerError("usage.Read", "Error retrieving usage")
}
for _, e := range de.Entries {
addEntryToResponse(response, e, dateObj.Unix())
}
}
for _, e := range liveEntries {
addEntryToResponse(response, e, now.Unix())
}
// sort slices
for _, v := range response.Usage {
sort.Slice(v.Records, func(i, j int) bool {
if v.Records[i].Date == v.Records[j].Date {
return v.Records[i].Requests < v.Records[j].Requests
}
return v.Records[i].Date < v.Records[j].Date
})
}
// remove dupe
for k, v := range response.Usage {
lenRecs := len(v.Records)
if lenRecs < 2 {
continue
}
if v.Records[lenRecs-2].Date != v.Records[lenRecs-1].Date {
continue
}
response.Usage[k].Records = append(v.Records[:lenRecs-2], v.Records[lenRecs-1])
}
return nil
}
type dateEntry struct {
Entries []listEntry
}
func (p *UsageSvc) UsageCron() {
defer func() {
log.Infof("Usage sweep ended")
}()
log.Infof("Performing usage sweep")
// loop through counters and persist
ctx := context.Background()
sc := p.c.redisClient.Scan(ctx, 0, prefixCounter+":*", 0)
if err := sc.Err(); err != nil {
log.Errorf("Error running redis scan %s", err)
return
}
toPersist := map[string]map[string][]listEntry{} // userid->date->[]listEntry
it := sc.Iterator()
for {
if !it.Next(ctx) {
if err := it.Err(); err != nil {
log.Errorf("Error during iteration %s", err)
}
break
}
key := it.Val()
count, err := p.c.redisClient.Get(ctx, key).Int64()
if err != nil {
log.Errorf("Error retrieving value %s", err)
return
}
parts := strings.Split(strings.TrimPrefix(key, prefixCounter+":"), ":")
if len(parts) < 3 {
log.Errorf("Unexpected number of components in key %s", key)
continue
}
userID := parts[0]
date := parts[1]
service := parts[2]
dates := toPersist[userID]
if dates == nil {
dates = map[string][]listEntry{}
toPersist[userID] = dates
}
entries := dates[date]
if entries == nil {
entries = []listEntry{}
}
entries = append(entries, listEntry{
Service: service,
Count: count,
})
dates[date] = entries
}
for userID, v := range toPersist {
for date, entry := range v {
de := dateEntry{
Entries: entry,
}
b, err := json.Marshal(de)
if err != nil {
log.Errorf("Error marshalling entry %s", err)
return
}
store.Write(&store.Record{
Key: fmt.Sprintf("%s/%s/%s", prefixUsageByCustomer, userID, date),
Value: b,
})
}
}
}
func (p *UsageSvc) Sweep(ctx context.Context, request *pb.SweepRequest, response *pb.SweepResponse) error {
p.UsageCron()
return nil
}
func (p *UsageSvc) deleteUser(ctx context.Context, userID string) error {
if err := p.c.deleteUser(ctx, userID); err != nil {
return err
}
recs, err := store.Read(fmt.Sprintf("%s/%s/", prefixUsageByCustomer, userID), store.ReadPrefix())
if err != nil {
return err
}
for _, rec := range recs {
if err := store.Delete(rec.Key); err != nil {
return err
}
}
return nil
}
func (p *UsageSvc) DeleteCustomer(ctx context.Context, request *pb.DeleteCustomerRequest, response *pb.DeleteCustomerResponse) error {
if _, err := m3oauth.VerifyMicroAdmin(ctx, "usage.DeleteCustomer"); err != nil {
return err
}
if len(request.Id) == 0 {
return errors.BadRequest("usage.DeleteCustomer", "Error deleting customer")
}
if err := p.deleteUser(ctx, request.Id); err != nil {
log.Errorf("Error deleting customer %s", err)
return err
}
return nil
}
func (p *UsageSvc) SaveEvent(ctx context.Context, request *pb.SaveEventRequest, response *pb.SaveEventResponse) error {
if request.Event == nil {
return fmt.Errorf("event not provided")
}
if request.Event.Table == "" {
return fmt.Errorf("table not provided")
}
rec := request.Event.Record.AsMap()
if request.Event.Id == "" {
request.Event.Id = uuid.New().String()
}
rec["id"] = request.Event.Id
rec["createdAt"] = time.Now().Unix()
bs, err := json.Marshal(rec)
if err != nil {
return err
}
err = json.Unmarshal(bs, request.Event.Record)
if err != nil {
return err
}
_, err = p.dbService.Create(ctx, &dbproto.CreateRequest{
Table: request.Event.Table,
Record: request.Event.Record,
})
return err
}
func (p *UsageSvc) ListEvents(ctx context.Context, request *pb.ListEventsRequest, response *pb.ListEventsResponse) error {
if request.Table == "" {
return fmt.Errorf("no table provided")
}
resp, err := p.dbService.Read(ctx, &dbproto.ReadRequest{
Table: request.Table,
Query: "createdAt > 0",
OrderBy: "createdAt",
Order: "desc",
})
if err != nil {
return err
}
for _, v := range resp.Records {
response.Events = append(response.Events, &pb.Event{
Table: request.Table,
Record: v,
})
}
return nil
}
| decr | identifier_name |
peer.go | package p2p
import (
"errors"
"fmt"
"github.com/DSiSc/craft/log"
"github.com/DSiSc/p2p/common"
"github.com/DSiSc/p2p/config"
"github.com/DSiSc/p2p/message"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
const (
MAX_BUF_LEN = 1024 * 256 //the maximum buffer To receive message
WRITE_DEADLINE = 60 //deadline of conn write
)
// PeerCom provides the basic information of a peer
type PeerCom struct {
version string // version info
addr *common.NetAddress // peer address
state uint64 //current state of this peer
outBound atomic.Value // whether peer is out bound peer
persistent bool // whether peer is persistent peer
service config.ServiceFlag // service peer supported
}
// Peer represent the peer
type Peer struct {
PeerCom
serverInfo *PeerCom
conn *PeerConn //connection To this peer
internalChan chan message.Message
sendChan chan *InternalMsg
recvChan chan<- *InternalMsg
quitChan chan interface{}
lock sync.RWMutex
isRunning int32
knownMsgs *common.RingBuffer
}
// NewInboundPeer new inbound peer instance
func NewInboundPeer(serverInfo *PeerCom, addr *common.NetAddress, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
return newPeer(serverInfo, addr, false, false, msgChan, conn)
}
// NewInboundPeer new outbound peer instance
func NewOutboundPeer(serverInfo *PeerCom, addr *common.NetAddress, persistent bool, msgChan chan<- *InternalMsg) *Peer {
return newPeer(serverInfo, addr, true, persistent, msgChan, nil)
}
// create a peer instance.
func newPeer(serverInfo *PeerCom, addr *common.NetAddress, outBound, persistent bool, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
peer := &Peer{
PeerCom: PeerCom{
addr: addr,
persistent: persistent,
},
serverInfo: serverInfo,
internalChan: make(chan message.Message),
sendChan: make(chan *InternalMsg),
recvChan: msgChan,
quitChan: make(chan interface{}),
knownMsgs: common.NewRingBuffer(1024),
isRunning: 0,
}
peer.outBound.Store(outBound)
if !outBound && conn != nil {
peer.conn = NewPeerConn(conn, peer.internalChan)
}
return peer
}
// Start connect To peer and send message To each other
func (peer *Peer) Start() error {
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning != 0 {
log.Error("peer %s has been started", peer.addr.ToString())
return fmt.Errorf("peer %s has been started", peer.addr.ToString())
}
if peer.outBound.Load().(bool) {
log.Info("Start outbound peer %s", peer.addr.ToString())
err := peer.initConn()
if err != nil {
return err
}
peer.conn.Start()
err = peer.handShakeWithOutBoundPeer()
if err != nil {
log.Info("failed to hand shake with outbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
} else {
log.Info("Start inbound peer %s", peer.addr.ToString())
if peer.conn == nil {
return errors.New("have no established connection")
}
peer.conn.Start()
err := peer.handShakeWithInBoundPeer()
if err != nil {
log.Info("failed to hand shake with inbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
}
go peer.recvHandler()
go peer.sendHandler()
peer.isRunning = 1
return nil
}
// start handshake with outbound peer.
func (peer *Peer) handShakeWithOutBoundPeer() error {
//send version message
err := peer.sendVersionMessage()
if err != nil {
return err
}
// read version message
err = peer.readVersionMessage()
if err != nil {
return err
}
// send version ack message
err = peer.sendVersionAckMessage()
if err != nil {
return err
}
// read version ack message
return peer.readVersionAckMessage()
}
// start handshake with inbound peer.
func (peer *Peer) handShakeWithInBoundPeer() error {
// read version message
err := peer.readVersionMessage()
if err != nil {
return err
}
//send version message
err = peer.sendVersionMessage()
if err != nil {
return err
}
// read version ack message
err = peer.readVersionAckMessage()
if err != nil {
return err
}
// send version ack message
return peer.sendVersionAckMessage()
}
// send version message To this peer.
func (peer *Peer) sendVersionMessage() error {
vmsg := &message.Version{
Version: peer.serverInfo.version,
PortMe: peer.serverInfo.addr.Port,
Service: peer.serverInfo.service,
}
return peer.conn.SendMessage(vmsg)
}
// send version ack message To this peer.
func (peer *Peer) sendVersionAckMessage() error {
vackmsg := &message.VersionAck{}
return peer.conn.SendMessage(vackmsg)
}
// read version message
func (peer *Peer) readVersionMessage() error {
msg, err := peer.readMessageWithType(message.VERSION_TYPE)
if err != nil {
return err
}
vmsg := msg.(*message.Version)
if vmsg.Service != peer.serverInfo.service {
return errors.New("Incompatible service ")
}
if !peer.outBound.Load().(bool) {
peer.addr.Port = vmsg.PortMe
}
return nil
}
// read version ack message
func (peer *Peer) readVersionAckMessage() error {
_, err := peer.readMessageWithType(message.VERACK_TYPE)
if err != nil {
return err
}
return nil
}
// read specified type message From peer.
func (peer *Peer) readMessageWithType(msgType message.MessageType) (message.Message, error) {
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case msg := <-peer.internalChan:
if msg.MsgType() == msgType {
return msg, nil
} else {
log.Warn("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
return nil, fmt.Errorf("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
}
case <-timer.C:
log.Warn("read %v type message From peer %s time out", msgType, peer.addr.ToString())
return nil, fmt.Errorf("read %v type message From peer %s time out", msgType, peer.addr.ToString())
}
}
// Stop stop peer.
func (peer *Peer) Stop() {
log.Info("Stop peer %s", peer.GetAddr().ToString())
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning == 0 {
return
}
if peer.conn != nil {
peer.conn.Stop()
}
close(peer.quitChan)
peer.isRunning = 0
}
// initConnection init the connection To peer.
func (peer *Peer) initConn() error {
log.Debug("start init the connection To peer %s", peer.addr.ToString())
dialAddr := peer.addr.IP + ":" + strconv.Itoa(int(peer.addr.Port))
conn, err := net.Dial("tcp", dialAddr)
if err != nil {
log.Info("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
return fmt.Errorf("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
}
peer.conn = NewPeerConn(conn, peer.internalChan)
return nil
}
// message receive handler
func (peer *Peer) recvHandler() {
for {
var msg message.Message
select {
case msg = <-peer.internalChan:
log.Debug("receive %v type message From peer %s", msg.MsgType(), peer.GetAddr().ToString())
if msg.MsgId() != message.EmptyHash |
case <-peer.quitChan:
return
}
switch msg.(type) {
case *message.Version:
reject := &message.RejectMsg{
Reason: "invalid message, as version messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.VersionAck:
reject := &message.RejectMsg{
Reason: "invalid message, as version ack messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.RejectMsg:
rejectMsg := msg.(*message.RejectMsg)
log.Error("receive a reject message From remote, reject reason: %s", rejectMsg.Reason)
peer.disconnectNotify(errors.New(rejectMsg.Reason))
return
default:
imsg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: msg,
}
peer.receivedMsg(imsg)
log.Debug("peer %s send %v type message To message channel", peer.GetAddr().ToString(), msg.MsgType())
}
}
}
// message send handler
func (peer *Peer) sendHandler() {
for {
select {
case msg := <-peer.sendChan:
if msg.Payload.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.Payload.MsgId(), struct{}{})
}
err := peer.conn.SendMessage(msg.Payload)
if msg.RespTo != nil {
if err != nil {
msg.RespTo <- err
} else {
msg.RespTo <- nilError
}
}
case <-peer.quitChan:
return
}
}
}
// IsPersistent return true if this peer is a persistent peer
func (peer *Peer) IsPersistent() bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.persistent
}
// GetAddr get peer's address
func (peer *Peer) GetAddr() *common.NetAddress {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.addr
}
// CurrentState get current state of this peer.
func (peer *Peer) CurrentState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// Channel get peer's send channel
func (peer *Peer) SendMsg(msg *InternalMsg) error {
select {
case peer.sendChan <- msg:
return nil
case <-peer.quitChan:
return fmt.Errorf("peer %s have stopped", peer.GetAddr().ToString())
}
}
// SetState update peer's state
func (peer *Peer) SetState(state uint64) {
peer.lock.Lock()
defer peer.lock.Unlock()
peer.state = state
}
// SetState update peer's state
func (peer *Peer) GetState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// KnownMsg check whether the peer already known this message
func (peer *Peer) KnownMsg(msg message.Message) bool {
return peer.knownMsgs.Exist(msg.MsgId())
}
// IsOutBound check whether the peer is outbound peer.
func (peer *Peer) IsOutBound() bool {
return peer.outBound.Load().(bool)
}
//disconnectNotify push disconnect msg To channel
func (peer *Peer) disconnectNotify(err error) {
log.Debug("[p2p]call disconnectNotify for %s, as: %v", peer.GetAddr().ToString(), err)
disconnectMsg := &peerDisconnecMsg{
err,
}
msg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: disconnectMsg,
}
peer.receivedMsg(msg)
}
// received a message from remote
func (peer *Peer) receivedMsg(msg *InternalMsg) {
select {
case peer.recvChan <- msg:
case <-peer.quitChan:
log.Warn("Peer have been closed")
}
}
| {
peer.knownMsgs.AddElement(msg.MsgId(), struct{}{})
} | conditional_block |
peer.go | package p2p
import (
"errors"
"fmt"
"github.com/DSiSc/craft/log"
"github.com/DSiSc/p2p/common"
"github.com/DSiSc/p2p/config"
"github.com/DSiSc/p2p/message"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
const (
MAX_BUF_LEN = 1024 * 256 //the maximum buffer To receive message
WRITE_DEADLINE = 60 //deadline of conn write
)
// PeerCom provides the basic information of a peer
type PeerCom struct {
version string // version info
addr *common.NetAddress // peer address
state uint64 //current state of this peer
outBound atomic.Value // whether peer is out bound peer
persistent bool // whether peer is persistent peer
service config.ServiceFlag // service peer supported
}
// Peer represent the peer
type Peer struct {
PeerCom
serverInfo *PeerCom
conn *PeerConn //connection To this peer
internalChan chan message.Message
sendChan chan *InternalMsg
recvChan chan<- *InternalMsg
quitChan chan interface{}
lock sync.RWMutex
isRunning int32
knownMsgs *common.RingBuffer
}
// NewInboundPeer new inbound peer instance
func NewInboundPeer(serverInfo *PeerCom, addr *common.NetAddress, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
return newPeer(serverInfo, addr, false, false, msgChan, conn)
}
// NewInboundPeer new outbound peer instance
func NewOutboundPeer(serverInfo *PeerCom, addr *common.NetAddress, persistent bool, msgChan chan<- *InternalMsg) *Peer {
return newPeer(serverInfo, addr, true, persistent, msgChan, nil)
}
// create a peer instance.
func newPeer(serverInfo *PeerCom, addr *common.NetAddress, outBound, persistent bool, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
peer := &Peer{
PeerCom: PeerCom{
addr: addr,
persistent: persistent,
},
serverInfo: serverInfo,
internalChan: make(chan message.Message),
sendChan: make(chan *InternalMsg),
recvChan: msgChan,
quitChan: make(chan interface{}),
knownMsgs: common.NewRingBuffer(1024),
isRunning: 0,
}
peer.outBound.Store(outBound)
if !outBound && conn != nil {
peer.conn = NewPeerConn(conn, peer.internalChan)
}
return peer
}
// Start connect To peer and send message To each other
func (peer *Peer) Start() error {
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning != 0 {
log.Error("peer %s has been started", peer.addr.ToString())
return fmt.Errorf("peer %s has been started", peer.addr.ToString())
}
if peer.outBound.Load().(bool) {
log.Info("Start outbound peer %s", peer.addr.ToString())
err := peer.initConn()
if err != nil {
return err
}
peer.conn.Start()
err = peer.handShakeWithOutBoundPeer()
if err != nil {
log.Info("failed to hand shake with outbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
} else {
log.Info("Start inbound peer %s", peer.addr.ToString())
if peer.conn == nil {
return errors.New("have no established connection")
}
peer.conn.Start()
err := peer.handShakeWithInBoundPeer()
if err != nil {
log.Info("failed to hand shake with inbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
}
go peer.recvHandler()
go peer.sendHandler()
peer.isRunning = 1
return nil
}
// start handshake with outbound peer.
func (peer *Peer) handShakeWithOutBoundPeer() error {
//send version message
err := peer.sendVersionMessage()
if err != nil {
return err
}
// read version message
err = peer.readVersionMessage()
if err != nil {
return err
}
// send version ack message
err = peer.sendVersionAckMessage()
if err != nil {
return err
}
// read version ack message
return peer.readVersionAckMessage()
}
// start handshake with inbound peer.
func (peer *Peer) handShakeWithInBoundPeer() error {
// read version message
err := peer.readVersionMessage()
if err != nil {
return err
}
//send version message
err = peer.sendVersionMessage()
if err != nil {
return err
}
// read version ack message
err = peer.readVersionAckMessage()
if err != nil {
return err
}
// send version ack message
return peer.sendVersionAckMessage()
}
// send version message To this peer.
func (peer *Peer) sendVersionMessage() error {
vmsg := &message.Version{
Version: peer.serverInfo.version,
PortMe: peer.serverInfo.addr.Port,
Service: peer.serverInfo.service,
}
return peer.conn.SendMessage(vmsg)
}
// send version ack message To this peer.
func (peer *Peer) sendVersionAckMessage() error {
vackmsg := &message.VersionAck{}
return peer.conn.SendMessage(vackmsg)
}
// read version message
func (peer *Peer) readVersionMessage() error {
msg, err := peer.readMessageWithType(message.VERSION_TYPE)
if err != nil {
return err
}
vmsg := msg.(*message.Version)
if vmsg.Service != peer.serverInfo.service {
return errors.New("Incompatible service ")
}
if !peer.outBound.Load().(bool) {
peer.addr.Port = vmsg.PortMe
}
return nil
}
// read version ack message
func (peer *Peer) readVersionAckMessage() error {
_, err := peer.readMessageWithType(message.VERACK_TYPE)
if err != nil {
return err
}
return nil
}
// read specified type message From peer.
func (peer *Peer) readMessageWithType(msgType message.MessageType) (message.Message, error) {
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case msg := <-peer.internalChan:
if msg.MsgType() == msgType {
return msg, nil
} else {
log.Warn("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
return nil, fmt.Errorf("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
}
case <-timer.C:
log.Warn("read %v type message From peer %s time out", msgType, peer.addr.ToString())
return nil, fmt.Errorf("read %v type message From peer %s time out", msgType, peer.addr.ToString())
}
}
// Stop stop peer.
func (peer *Peer) Stop() {
log.Info("Stop peer %s", peer.GetAddr().ToString())
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning == 0 {
return
}
if peer.conn != nil {
peer.conn.Stop()
}
close(peer.quitChan)
peer.isRunning = 0
}
// initConnection init the connection To peer.
func (peer *Peer) initConn() error |
// message receive handler
func (peer *Peer) recvHandler() {
for {
var msg message.Message
select {
case msg = <-peer.internalChan:
log.Debug("receive %v type message From peer %s", msg.MsgType(), peer.GetAddr().ToString())
if msg.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.MsgId(), struct{}{})
}
case <-peer.quitChan:
return
}
switch msg.(type) {
case *message.Version:
reject := &message.RejectMsg{
Reason: "invalid message, as version messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.VersionAck:
reject := &message.RejectMsg{
Reason: "invalid message, as version ack messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.RejectMsg:
rejectMsg := msg.(*message.RejectMsg)
log.Error("receive a reject message From remote, reject reason: %s", rejectMsg.Reason)
peer.disconnectNotify(errors.New(rejectMsg.Reason))
return
default:
imsg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: msg,
}
peer.receivedMsg(imsg)
log.Debug("peer %s send %v type message To message channel", peer.GetAddr().ToString(), msg.MsgType())
}
}
}
// message send handler
func (peer *Peer) sendHandler() {
for {
select {
case msg := <-peer.sendChan:
if msg.Payload.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.Payload.MsgId(), struct{}{})
}
err := peer.conn.SendMessage(msg.Payload)
if msg.RespTo != nil {
if err != nil {
msg.RespTo <- err
} else {
msg.RespTo <- nilError
}
}
case <-peer.quitChan:
return
}
}
}
// IsPersistent return true if this peer is a persistent peer
func (peer *Peer) IsPersistent() bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.persistent
}
// GetAddr get peer's address
func (peer *Peer) GetAddr() *common.NetAddress {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.addr
}
// CurrentState get current state of this peer.
func (peer *Peer) CurrentState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// Channel get peer's send channel
func (peer *Peer) SendMsg(msg *InternalMsg) error {
select {
case peer.sendChan <- msg:
return nil
case <-peer.quitChan:
return fmt.Errorf("peer %s have stopped", peer.GetAddr().ToString())
}
}
// SetState update peer's state
func (peer *Peer) SetState(state uint64) {
peer.lock.Lock()
defer peer.lock.Unlock()
peer.state = state
}
// SetState update peer's state
func (peer *Peer) GetState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// KnownMsg check whether the peer already known this message
func (peer *Peer) KnownMsg(msg message.Message) bool {
return peer.knownMsgs.Exist(msg.MsgId())
}
// IsOutBound check whether the peer is outbound peer.
func (peer *Peer) IsOutBound() bool {
return peer.outBound.Load().(bool)
}
//disconnectNotify push disconnect msg To channel
func (peer *Peer) disconnectNotify(err error) {
log.Debug("[p2p]call disconnectNotify for %s, as: %v", peer.GetAddr().ToString(), err)
disconnectMsg := &peerDisconnecMsg{
err,
}
msg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: disconnectMsg,
}
peer.receivedMsg(msg)
}
// received a message from remote
func (peer *Peer) receivedMsg(msg *InternalMsg) {
select {
case peer.recvChan <- msg:
case <-peer.quitChan:
log.Warn("Peer have been closed")
}
}
| {
log.Debug("start init the connection To peer %s", peer.addr.ToString())
dialAddr := peer.addr.IP + ":" + strconv.Itoa(int(peer.addr.Port))
conn, err := net.Dial("tcp", dialAddr)
if err != nil {
log.Info("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
return fmt.Errorf("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
}
peer.conn = NewPeerConn(conn, peer.internalChan)
return nil
} | identifier_body |
peer.go | package p2p
import (
"errors"
"fmt"
"github.com/DSiSc/craft/log"
"github.com/DSiSc/p2p/common"
"github.com/DSiSc/p2p/config" | "github.com/DSiSc/p2p/message"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
const (
MAX_BUF_LEN = 1024 * 256 //the maximum buffer To receive message
WRITE_DEADLINE = 60 //deadline of conn write
)
// PeerCom provides the basic information of a peer
type PeerCom struct {
version string // version info
addr *common.NetAddress // peer address
state uint64 //current state of this peer
outBound atomic.Value // whether peer is out bound peer
persistent bool // whether peer is persistent peer
service config.ServiceFlag // service peer supported
}
// Peer represent the peer
type Peer struct {
PeerCom
serverInfo *PeerCom
conn *PeerConn //connection To this peer
internalChan chan message.Message
sendChan chan *InternalMsg
recvChan chan<- *InternalMsg
quitChan chan interface{}
lock sync.RWMutex
isRunning int32
knownMsgs *common.RingBuffer
}
// NewInboundPeer new inbound peer instance
func NewInboundPeer(serverInfo *PeerCom, addr *common.NetAddress, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
return newPeer(serverInfo, addr, false, false, msgChan, conn)
}
// NewInboundPeer new outbound peer instance
func NewOutboundPeer(serverInfo *PeerCom, addr *common.NetAddress, persistent bool, msgChan chan<- *InternalMsg) *Peer {
return newPeer(serverInfo, addr, true, persistent, msgChan, nil)
}
// create a peer instance.
func newPeer(serverInfo *PeerCom, addr *common.NetAddress, outBound, persistent bool, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
peer := &Peer{
PeerCom: PeerCom{
addr: addr,
persistent: persistent,
},
serverInfo: serverInfo,
internalChan: make(chan message.Message),
sendChan: make(chan *InternalMsg),
recvChan: msgChan,
quitChan: make(chan interface{}),
knownMsgs: common.NewRingBuffer(1024),
isRunning: 0,
}
peer.outBound.Store(outBound)
if !outBound && conn != nil {
peer.conn = NewPeerConn(conn, peer.internalChan)
}
return peer
}
// Start connect To peer and send message To each other
func (peer *Peer) Start() error {
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning != 0 {
log.Error("peer %s has been started", peer.addr.ToString())
return fmt.Errorf("peer %s has been started", peer.addr.ToString())
}
if peer.outBound.Load().(bool) {
log.Info("Start outbound peer %s", peer.addr.ToString())
err := peer.initConn()
if err != nil {
return err
}
peer.conn.Start()
err = peer.handShakeWithOutBoundPeer()
if err != nil {
log.Info("failed to hand shake with outbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
} else {
log.Info("Start inbound peer %s", peer.addr.ToString())
if peer.conn == nil {
return errors.New("have no established connection")
}
peer.conn.Start()
err := peer.handShakeWithInBoundPeer()
if err != nil {
log.Info("failed to hand shake with inbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
}
go peer.recvHandler()
go peer.sendHandler()
peer.isRunning = 1
return nil
}
// start handshake with outbound peer.
func (peer *Peer) handShakeWithOutBoundPeer() error {
//send version message
err := peer.sendVersionMessage()
if err != nil {
return err
}
// read version message
err = peer.readVersionMessage()
if err != nil {
return err
}
// send version ack message
err = peer.sendVersionAckMessage()
if err != nil {
return err
}
// read version ack message
return peer.readVersionAckMessage()
}
// start handshake with inbound peer.
func (peer *Peer) handShakeWithInBoundPeer() error {
// read version message
err := peer.readVersionMessage()
if err != nil {
return err
}
//send version message
err = peer.sendVersionMessage()
if err != nil {
return err
}
// read version ack message
err = peer.readVersionAckMessage()
if err != nil {
return err
}
// send version ack message
return peer.sendVersionAckMessage()
}
// send version message To this peer.
func (peer *Peer) sendVersionMessage() error {
vmsg := &message.Version{
Version: peer.serverInfo.version,
PortMe: peer.serverInfo.addr.Port,
Service: peer.serverInfo.service,
}
return peer.conn.SendMessage(vmsg)
}
// send version ack message To this peer.
func (peer *Peer) sendVersionAckMessage() error {
vackmsg := &message.VersionAck{}
return peer.conn.SendMessage(vackmsg)
}
// read version message
func (peer *Peer) readVersionMessage() error {
msg, err := peer.readMessageWithType(message.VERSION_TYPE)
if err != nil {
return err
}
vmsg := msg.(*message.Version)
if vmsg.Service != peer.serverInfo.service {
return errors.New("Incompatible service ")
}
if !peer.outBound.Load().(bool) {
peer.addr.Port = vmsg.PortMe
}
return nil
}
// read version ack message
func (peer *Peer) readVersionAckMessage() error {
_, err := peer.readMessageWithType(message.VERACK_TYPE)
if err != nil {
return err
}
return nil
}
// read specified type message From peer.
func (peer *Peer) readMessageWithType(msgType message.MessageType) (message.Message, error) {
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case msg := <-peer.internalChan:
if msg.MsgType() == msgType {
return msg, nil
} else {
log.Warn("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
return nil, fmt.Errorf("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
}
case <-timer.C:
log.Warn("read %v type message From peer %s time out", msgType, peer.addr.ToString())
return nil, fmt.Errorf("read %v type message From peer %s time out", msgType, peer.addr.ToString())
}
}
// Stop stop peer.
func (peer *Peer) Stop() {
log.Info("Stop peer %s", peer.GetAddr().ToString())
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning == 0 {
return
}
if peer.conn != nil {
peer.conn.Stop()
}
close(peer.quitChan)
peer.isRunning = 0
}
// initConnection init the connection To peer.
func (peer *Peer) initConn() error {
log.Debug("start init the connection To peer %s", peer.addr.ToString())
dialAddr := peer.addr.IP + ":" + strconv.Itoa(int(peer.addr.Port))
conn, err := net.Dial("tcp", dialAddr)
if err != nil {
log.Info("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
return fmt.Errorf("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
}
peer.conn = NewPeerConn(conn, peer.internalChan)
return nil
}
// message receive handler
func (peer *Peer) recvHandler() {
for {
var msg message.Message
select {
case msg = <-peer.internalChan:
log.Debug("receive %v type message From peer %s", msg.MsgType(), peer.GetAddr().ToString())
if msg.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.MsgId(), struct{}{})
}
case <-peer.quitChan:
return
}
switch msg.(type) {
case *message.Version:
reject := &message.RejectMsg{
Reason: "invalid message, as version messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.VersionAck:
reject := &message.RejectMsg{
Reason: "invalid message, as version ack messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.RejectMsg:
rejectMsg := msg.(*message.RejectMsg)
log.Error("receive a reject message From remote, reject reason: %s", rejectMsg.Reason)
peer.disconnectNotify(errors.New(rejectMsg.Reason))
return
default:
imsg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: msg,
}
peer.receivedMsg(imsg)
log.Debug("peer %s send %v type message To message channel", peer.GetAddr().ToString(), msg.MsgType())
}
}
}
// message send handler
func (peer *Peer) sendHandler() {
for {
select {
case msg := <-peer.sendChan:
if msg.Payload.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.Payload.MsgId(), struct{}{})
}
err := peer.conn.SendMessage(msg.Payload)
if msg.RespTo != nil {
if err != nil {
msg.RespTo <- err
} else {
msg.RespTo <- nilError
}
}
case <-peer.quitChan:
return
}
}
}
// IsPersistent return true if this peer is a persistent peer
func (peer *Peer) IsPersistent() bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.persistent
}
// GetAddr get peer's address
func (peer *Peer) GetAddr() *common.NetAddress {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.addr
}
// CurrentState get current state of this peer.
func (peer *Peer) CurrentState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// Channel get peer's send channel
func (peer *Peer) SendMsg(msg *InternalMsg) error {
select {
case peer.sendChan <- msg:
return nil
case <-peer.quitChan:
return fmt.Errorf("peer %s have stopped", peer.GetAddr().ToString())
}
}
// SetState update peer's state
func (peer *Peer) SetState(state uint64) {
peer.lock.Lock()
defer peer.lock.Unlock()
peer.state = state
}
// SetState update peer's state
func (peer *Peer) GetState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// KnownMsg check whether the peer already known this message
func (peer *Peer) KnownMsg(msg message.Message) bool {
return peer.knownMsgs.Exist(msg.MsgId())
}
// IsOutBound check whether the peer is outbound peer.
func (peer *Peer) IsOutBound() bool {
return peer.outBound.Load().(bool)
}
//disconnectNotify push disconnect msg To channel
func (peer *Peer) disconnectNotify(err error) {
log.Debug("[p2p]call disconnectNotify for %s, as: %v", peer.GetAddr().ToString(), err)
disconnectMsg := &peerDisconnecMsg{
err,
}
msg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: disconnectMsg,
}
peer.receivedMsg(msg)
}
// received a message from remote
func (peer *Peer) receivedMsg(msg *InternalMsg) {
select {
case peer.recvChan <- msg:
case <-peer.quitChan:
log.Warn("Peer have been closed")
}
} | random_line_split | |
peer.go | package p2p
import (
"errors"
"fmt"
"github.com/DSiSc/craft/log"
"github.com/DSiSc/p2p/common"
"github.com/DSiSc/p2p/config"
"github.com/DSiSc/p2p/message"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
const (
MAX_BUF_LEN = 1024 * 256 //the maximum buffer To receive message
WRITE_DEADLINE = 60 //deadline of conn write
)
// PeerCom provides the basic information of a peer
type PeerCom struct {
version string // version info
addr *common.NetAddress // peer address
state uint64 //current state of this peer
outBound atomic.Value // whether peer is out bound peer
persistent bool // whether peer is persistent peer
service config.ServiceFlag // service peer supported
}
// Peer represent the peer
type Peer struct {
PeerCom
serverInfo *PeerCom
conn *PeerConn //connection To this peer
internalChan chan message.Message
sendChan chan *InternalMsg
recvChan chan<- *InternalMsg
quitChan chan interface{}
lock sync.RWMutex
isRunning int32
knownMsgs *common.RingBuffer
}
// NewInboundPeer new inbound peer instance
func NewInboundPeer(serverInfo *PeerCom, addr *common.NetAddress, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
return newPeer(serverInfo, addr, false, false, msgChan, conn)
}
// NewInboundPeer new outbound peer instance
func NewOutboundPeer(serverInfo *PeerCom, addr *common.NetAddress, persistent bool, msgChan chan<- *InternalMsg) *Peer {
return newPeer(serverInfo, addr, true, persistent, msgChan, nil)
}
// create a peer instance.
func newPeer(serverInfo *PeerCom, addr *common.NetAddress, outBound, persistent bool, msgChan chan<- *InternalMsg, conn net.Conn) *Peer {
peer := &Peer{
PeerCom: PeerCom{
addr: addr,
persistent: persistent,
},
serverInfo: serverInfo,
internalChan: make(chan message.Message),
sendChan: make(chan *InternalMsg),
recvChan: msgChan,
quitChan: make(chan interface{}),
knownMsgs: common.NewRingBuffer(1024),
isRunning: 0,
}
peer.outBound.Store(outBound)
if !outBound && conn != nil {
peer.conn = NewPeerConn(conn, peer.internalChan)
}
return peer
}
// Start connect To peer and send message To each other
func (peer *Peer) Start() error {
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning != 0 {
log.Error("peer %s has been started", peer.addr.ToString())
return fmt.Errorf("peer %s has been started", peer.addr.ToString())
}
if peer.outBound.Load().(bool) {
log.Info("Start outbound peer %s", peer.addr.ToString())
err := peer.initConn()
if err != nil {
return err
}
peer.conn.Start()
err = peer.handShakeWithOutBoundPeer()
if err != nil {
log.Info("failed to hand shake with outbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
} else {
log.Info("Start inbound peer %s", peer.addr.ToString())
if peer.conn == nil {
return errors.New("have no established connection")
}
peer.conn.Start()
err := peer.handShakeWithInBoundPeer()
if err != nil {
log.Info("failed to hand shake with inbound peer %s, as: %v", peer.addr.ToString(), err)
peer.conn.Stop()
return err
}
}
go peer.recvHandler()
go peer.sendHandler()
peer.isRunning = 1
return nil
}
// start handshake with outbound peer.
func (peer *Peer) handShakeWithOutBoundPeer() error {
//send version message
err := peer.sendVersionMessage()
if err != nil {
return err
}
// read version message
err = peer.readVersionMessage()
if err != nil {
return err
}
// send version ack message
err = peer.sendVersionAckMessage()
if err != nil {
return err
}
// read version ack message
return peer.readVersionAckMessage()
}
// start handshake with inbound peer.
func (peer *Peer) handShakeWithInBoundPeer() error {
// read version message
err := peer.readVersionMessage()
if err != nil {
return err
}
//send version message
err = peer.sendVersionMessage()
if err != nil {
return err
}
// read version ack message
err = peer.readVersionAckMessage()
if err != nil {
return err
}
// send version ack message
return peer.sendVersionAckMessage()
}
// send version message To this peer.
func (peer *Peer) sendVersionMessage() error {
vmsg := &message.Version{
Version: peer.serverInfo.version,
PortMe: peer.serverInfo.addr.Port,
Service: peer.serverInfo.service,
}
return peer.conn.SendMessage(vmsg)
}
// send version ack message To this peer.
func (peer *Peer) sendVersionAckMessage() error {
vackmsg := &message.VersionAck{}
return peer.conn.SendMessage(vackmsg)
}
// read version message
func (peer *Peer) readVersionMessage() error {
msg, err := peer.readMessageWithType(message.VERSION_TYPE)
if err != nil {
return err
}
vmsg := msg.(*message.Version)
if vmsg.Service != peer.serverInfo.service {
return errors.New("Incompatible service ")
}
if !peer.outBound.Load().(bool) {
peer.addr.Port = vmsg.PortMe
}
return nil
}
// read version ack message
func (peer *Peer) readVersionAckMessage() error {
_, err := peer.readMessageWithType(message.VERACK_TYPE)
if err != nil {
return err
}
return nil
}
// read specified type message From peer.
func (peer *Peer) readMessageWithType(msgType message.MessageType) (message.Message, error) {
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case msg := <-peer.internalChan:
if msg.MsgType() == msgType {
return msg, nil
} else {
log.Warn("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
return nil, fmt.Errorf("error type message received From peer %s, expected: %v, actual: %v", peer.addr.ToString(), msgType, msg.MsgType())
}
case <-timer.C:
log.Warn("read %v type message From peer %s time out", msgType, peer.addr.ToString())
return nil, fmt.Errorf("read %v type message From peer %s time out", msgType, peer.addr.ToString())
}
}
// Stop stop peer.
func (peer *Peer) Stop() {
log.Info("Stop peer %s", peer.GetAddr().ToString())
peer.lock.Lock()
defer peer.lock.Unlock()
if peer.isRunning == 0 {
return
}
if peer.conn != nil {
peer.conn.Stop()
}
close(peer.quitChan)
peer.isRunning = 0
}
// initConnection init the connection To peer.
func (peer *Peer) initConn() error {
log.Debug("start init the connection To peer %s", peer.addr.ToString())
dialAddr := peer.addr.IP + ":" + strconv.Itoa(int(peer.addr.Port))
conn, err := net.Dial("tcp", dialAddr)
if err != nil {
log.Info("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
return fmt.Errorf("failed To dial To peer %s, as : %v", peer.addr.ToString(), err)
}
peer.conn = NewPeerConn(conn, peer.internalChan)
return nil
}
// message receive handler
func (peer *Peer) recvHandler() {
for {
var msg message.Message
select {
case msg = <-peer.internalChan:
log.Debug("receive %v type message From peer %s", msg.MsgType(), peer.GetAddr().ToString())
if msg.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.MsgId(), struct{}{})
}
case <-peer.quitChan:
return
}
switch msg.(type) {
case *message.Version:
reject := &message.RejectMsg{
Reason: "invalid message, as version messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.VersionAck:
reject := &message.RejectMsg{
Reason: "invalid message, as version ack messages can only be sent once ",
}
peer.conn.SendMessage(reject)
peer.disconnectNotify(errors.New("receive an invalid message From remote"))
return
case *message.RejectMsg:
rejectMsg := msg.(*message.RejectMsg)
log.Error("receive a reject message From remote, reject reason: %s", rejectMsg.Reason)
peer.disconnectNotify(errors.New(rejectMsg.Reason))
return
default:
imsg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: msg,
}
peer.receivedMsg(imsg)
log.Debug("peer %s send %v type message To message channel", peer.GetAddr().ToString(), msg.MsgType())
}
}
}
// message send handler
func (peer *Peer) sendHandler() {
for {
select {
case msg := <-peer.sendChan:
if msg.Payload.MsgId() != message.EmptyHash {
peer.knownMsgs.AddElement(msg.Payload.MsgId(), struct{}{})
}
err := peer.conn.SendMessage(msg.Payload)
if msg.RespTo != nil {
if err != nil {
msg.RespTo <- err
} else {
msg.RespTo <- nilError
}
}
case <-peer.quitChan:
return
}
}
}
// IsPersistent return true if this peer is a persistent peer
func (peer *Peer) | () bool {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.persistent
}
// GetAddr get peer's address
func (peer *Peer) GetAddr() *common.NetAddress {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.addr
}
// CurrentState get current state of this peer.
func (peer *Peer) CurrentState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// Channel get peer's send channel
func (peer *Peer) SendMsg(msg *InternalMsg) error {
select {
case peer.sendChan <- msg:
return nil
case <-peer.quitChan:
return fmt.Errorf("peer %s have stopped", peer.GetAddr().ToString())
}
}
// SetState update peer's state
func (peer *Peer) SetState(state uint64) {
peer.lock.Lock()
defer peer.lock.Unlock()
peer.state = state
}
// SetState update peer's state
func (peer *Peer) GetState() uint64 {
peer.lock.RLock()
defer peer.lock.RUnlock()
return peer.state
}
// KnownMsg check whether the peer already known this message
func (peer *Peer) KnownMsg(msg message.Message) bool {
return peer.knownMsgs.Exist(msg.MsgId())
}
// IsOutBound check whether the peer is outbound peer.
func (peer *Peer) IsOutBound() bool {
return peer.outBound.Load().(bool)
}
//disconnectNotify push disconnect msg To channel
func (peer *Peer) disconnectNotify(err error) {
log.Debug("[p2p]call disconnectNotify for %s, as: %v", peer.GetAddr().ToString(), err)
disconnectMsg := &peerDisconnecMsg{
err,
}
msg := &InternalMsg{
From: peer.addr,
To: peer.serverInfo.addr,
Payload: disconnectMsg,
}
peer.receivedMsg(msg)
}
// received a message from remote
func (peer *Peer) receivedMsg(msg *InternalMsg) {
select {
case peer.recvChan <- msg:
case <-peer.quitChan:
log.Warn("Peer have been closed")
}
}
| IsPersistent | identifier_name |
data_plt.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge,LinearRegression,Ridge,Lasso
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import KFold, RepeatedKFold, cross_val_score, GridSearchCV
from sklearn.preprocessing import OneHotEncoder
from scipy import sparse
import warnings
import re
import plotly.offline as py
py.init_notebook_mode(connected=True)
from sklearn.metrics import mean_squared_error
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns',None)
pd.set_option('max_colwidth',100)
train = pd.read_csv('./jinnan_round1_train_20181227.csv')
test = pd.read_csv('./jinnan_round1_testB_20190121.csv')
class grid():
def __init__(self, model):
|
def grid_get(self, X, y, param_grid):
grid_search = GridSearchCV(self.model, param_grid, cv=5, scoring="neg_mean_squared_error")
grid_search.fit(X, y)
print(grid_search.best_params_, np.sqrt(-grid_search.best_score_))
grid_search.cv_results_['mean_test_score'] = np.sqrt(-grid_search.cv_results_['mean_test_score'])
print(pd.DataFrame(grid_search.cv_results_)[['params', 'mean_test_score', 'std_test_score']])
def get_phase(t1,t2):
try:
h1, m1, s1=t1.split(':')
h2, m2, s2=t2.split(':')
except:
if t1 == -1 or t2 == -1:
return -1
if int(h2) >= int(h1):
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600
else:
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600 + 24
return tm
def timeTranSecond(t):
try:
t, m, s = t.split(":")
except:
if t == '1900/1/9 7:00':
return 7 * 3600 / 3600
elif t == '1900/1/1 2:30':
return (2 * 3600 + 30 * 60) / 3600
elif t == -1:
return -1
else:
return 0
try:
tm = (int(t) * 3600 + int(m) * 60 + int(s)) / 3600
except:
return (30 * 60) / 3600
return tm
def getDuration(se):
try:
sh, sm, eh, em = re.findall(r"\d+\.?\d*", se)
except:
if se == -1:
return -1
try:
if int(sh) > int(eh):
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 + 24
else:
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600
except:
if se == '19:-20:05':
return 1
elif se == '15:00-1600':
return 1
return tm
def rmse_cv(model,X,y):
rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5))
return rmse
train.loc[train['B14'] == 40, 'B14'] = 400
train.drop(train[train['收率'] < 0.87].index, inplace=True)
full = pd.concat([train, test], ignore_index=True)
cols = ["A2", "A3", "A4"]
for col in cols:
full[col].fillna(0, inplace=True)
cols1 = ["A7", "A8", "B10", "B11", "A20", "A24", "A26"]
for col in cols1:
full[col].fillna(-1, inplace=True)
cols2 = ["B1", "B2", "B3", "B8", "B12", "B13", "A21", "A23"]
for col in cols2:
full[col].fillna(full[col].mode()[0], inplace=True)
full['a21_a22_a23'] = full['A21']+full['A22']+full['A23']
cols3 = ["A25", "A27"]
for col in cols3:
full[col] = full.groupby(['a21_a22_a23'])[col].transform(lambda x: x.fillna(x.median()))
full['a1_a3_a4']=full['A1']+full['A3']+full['A4']
full['a1_a3']=full['A1']+full['A3']
full['a1_a4']=full['A1']+full['A4']
full['a10_a6']=full['A10']-full['A6']
full['a12_a10']=full['A12']-full['A10']
full['a15_a12']=full['A15']-full['A12']
full['a17_a15']=full['A17']-full['A15']
full['a27_a25']=full['A27']-full['A25']
full['b6_b8']=full['B6']-full['B8']
full['a10_a6/a9_a5']=(full['A10']-full['A6'])/full.apply(lambda df:get_phase(df['A5'],df['A9']),axis=1)
full['a12_a10/a11_a9']=(full['A12']-full['A10'])/full.apply(lambda df:get_phase(df['A9'],df['A11']),axis=1)
full['a15_a12/a14_a11']=(full['A15']-full['A12'])/full.apply(lambda df:get_phase(df['A11'],df['A14']),axis=1)
full['a17_a15/a16_a14']=(full['A17']-full['A15'])/full.apply(lambda df:get_phase(df['A14'],df['A16']),axis=1)
full['a27_a25/a26_a24']=(full['A27']-full['A25'])/full.apply(lambda df:get_phase(df['A24'],df['A26']),axis=1)
full['b6_b8/b7_b5']=(full['B6']-full['B8'])/full.apply(lambda df:get_phase(df['B5'],df['B7']),axis=1)
full['b14/a1_a3_a4_a19_b1_b12'] = full['B14']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B12'])
full['b14/a1_a3_a4_a19_b1_b12_b14'] = full['B12']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B14'])
for f in ['A5', 'A7', 'A9', 'A11', 'A14', 'A16', 'A24', 'A26', 'B5', 'B7']:
try:
full[f] = full[f].apply(timeTranSecond)
except:
print(f, '应该在前面被删除了!')
for f in ['A20', 'A28', 'B4', 'B9', 'B10', 'B11']:
full[f] = full.apply(lambda df: getDuration(df[f]), axis=1)
full['样本id'] = full['样本id'].apply(lambda x: int(x.split('_')[1]))
good_cols=list(full.columns)
good_cols.remove('样本id')
good_cols.remove('收率')
# for f in good_cols:
# full[f] = full[f].map(dict(zip(full[f].unique(), range(0, full[f].nunique()))))
n_train=train.shape[0]
X = full[:n_train]
test_X = full[n_train:]
y= X.收率
X.drop(['收率'], axis=1, inplace=True)
test_X.drop(['收率'], axis=1, inplace=True)
# X_train = X[list(X.columns)].values
# X_test = test_X[list(X.columns)].values
# y_train = y.values
# # grid(Lasso()).grid_get(X,y,{'alpha': [0.02,0.0002,0.000222,0.0000224],'max_iter':[10000]})
# grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'num_leaves': [100],
# 'min_data_in_leaf': [9],
# 'objective': ['regression'],
# 'max_depth': [-1],
# 'learning_rate': [0.01],
# 'min_child_samples': [15],
# "boosting": ['gbdt'],
# "feature_fraction": [0.9],
# "bagging_freq": [1],
# "bagging_fraction": [0.9],
# "bagging_seed": [5,13,40,50],
# "metric": ['mse'],
# "lambda_l1": [0.000001],
# 'verbosity': [-1]})
# grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'eta': [0.1], 'max_depth': [6], 'subsample': [0.9],
# 'colsample_bytree': [0.5],'objective': ['reg:linear'],
# 'eval_metric': ['rmse'], 'silent': [True], 'nthread': [3]})
X_train = X[list(X.columns)].values
X_test = test_X[list(X.columns)].values
# one hot
enc = OneHotEncoder()
# for f in good_cols:
# enc.fit(full[f].values.reshape(-1, 1))
# X_train = sparse.hstack((X_train, enc.transform(X[f].values.reshape(-1, 1))), 'csr')
# X_test = sparse.hstack((X_test, enc.transform(test_X[f].values.reshape(-1, 1))), 'csr')
print(X_train.shape)
print(X_test.shape)
y_train = y.values
#
param = {'num_leaves': 100,
'min_data_in_leaf': 9,
'objective': 'regression',
'max_depth': -1,
'learning_rate': 0.01,
"min_child_samples": 15,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
"bagging_seed": 13,
"metric": 'mse',
"lambda_l1": 0.000001,
"verbosity": -1}
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_lgb = np.zeros(len(train))
predictions_lgb = np.zeros(len(test))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
trn_data = lgb.Dataset(X_train[trn_idx], y_train[trn_idx])
val_data = lgb.Dataset(X_train[val_idx], y_train[val_idx])
num_round = 3000
clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=200,
early_stopping_rounds=100)
oof_lgb[val_idx] = clf.predict(X_train[val_idx], num_iteration=clf.best_iteration)
predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits
print("CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, y)))
##### xgb
xgb_params = {'eta': 0.1, 'max_depth': 6, 'subsample': 0.9, 'colsample_bytree': 0.5,
'objective': 'reg:linear', 'eval_metric': 'rmse', 'silent': True, 'nthread': 3}
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_xgb = np.zeros(len(train))
predictions_xgb = np.zeros(len(test))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
trn_data = xgb.DMatrix(X_train[trn_idx], y_train[trn_idx])
val_data = xgb.DMatrix(X_train[val_idx], y_train[val_idx])
watchlist = [(trn_data, 'train'), (val_data, 'valid_data')]
clf = xgb.train(dtrain=trn_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,
verbose_eval=100, params=xgb_params)
oof_xgb[val_idx] = clf.predict(xgb.DMatrix(X_train[val_idx]), ntree_limit=clf.best_ntree_limit)
predictions_xgb += clf.predict(xgb.DMatrix(X_test), ntree_limit=clf.best_ntree_limit) / folds.n_splits
print("CV score: {:<8.8f}".format(mean_squared_error(oof_xgb, y)))
# 将lgb和xgb的结果进行stacking
train_stack = np.vstack([oof_lgb, oof_xgb]).transpose()
test_stack = np.vstack([predictions_lgb, predictions_xgb]).transpose()
folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=4590)
oof_stack1 = np.zeros(train_stack.shape[0])
predictions1 = np.zeros(test_stack.shape[0])
for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, y)):
print("fold {}".format(fold_))
trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values
val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values
clf_3 = BayesianRidge()
clf_3.fit(trn_data, trn_y)
oof_stack1[val_idx] = clf_3.predict(val_data)
predictions1 += clf_3.predict(test_stack) / 10
print("CV score: {:<8.8f}".format(mean_squared_error(y.values, oof_stack1)))
sub_df = pd.DataFrame()
sub_df[0] = pd.read_csv('./jinnan_round1_testB_20190121.csv', header=None)[0][1:]
sub_df[1] = predictions1
sub_df[1] = sub_df[1].apply(lambda x:round(x, 3))
sub_df.to_csv('./prediction.csv', index=False, header=None)
| self.model = model | identifier_body |
data_plt.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge,LinearRegression,Ridge,Lasso
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import KFold, RepeatedKFold, cross_val_score, GridSearchCV
from sklearn.preprocessing import OneHotEncoder
from scipy import sparse
import warnings
import re
import plotly.offline as py
py.init_notebook_mode(connected=True)
from sklearn.metrics import mean_squared_error
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns',None)
pd.set_option('max_colwidth',100)
train = pd.read_csv('./jinnan_round1_train_20181227.csv')
test = pd.read_csv('./jinnan_round1_testB_20190121.csv')
class grid():
def | (self, model):
self.model = model
def grid_get(self, X, y, param_grid):
grid_search = GridSearchCV(self.model, param_grid, cv=5, scoring="neg_mean_squared_error")
grid_search.fit(X, y)
print(grid_search.best_params_, np.sqrt(-grid_search.best_score_))
grid_search.cv_results_['mean_test_score'] = np.sqrt(-grid_search.cv_results_['mean_test_score'])
print(pd.DataFrame(grid_search.cv_results_)[['params', 'mean_test_score', 'std_test_score']])
def get_phase(t1,t2):
try:
h1, m1, s1=t1.split(':')
h2, m2, s2=t2.split(':')
except:
if t1 == -1 or t2 == -1:
return -1
if int(h2) >= int(h1):
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600
else:
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600 + 24
return tm
def timeTranSecond(t):
try:
t, m, s = t.split(":")
except:
if t == '1900/1/9 7:00':
return 7 * 3600 / 3600
elif t == '1900/1/1 2:30':
return (2 * 3600 + 30 * 60) / 3600
elif t == -1:
return -1
else:
return 0
try:
tm = (int(t) * 3600 + int(m) * 60 + int(s)) / 3600
except:
return (30 * 60) / 3600
return tm
def getDuration(se):
try:
sh, sm, eh, em = re.findall(r"\d+\.?\d*", se)
except:
if se == -1:
return -1
try:
if int(sh) > int(eh):
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 + 24
else:
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600
except:
if se == '19:-20:05':
return 1
elif se == '15:00-1600':
return 1
return tm
def rmse_cv(model,X,y):
rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5))
return rmse
train.loc[train['B14'] == 40, 'B14'] = 400
train.drop(train[train['收率'] < 0.87].index, inplace=True)
full = pd.concat([train, test], ignore_index=True)
cols = ["A2", "A3", "A4"]
for col in cols:
full[col].fillna(0, inplace=True)
cols1 = ["A7", "A8", "B10", "B11", "A20", "A24", "A26"]
for col in cols1:
full[col].fillna(-1, inplace=True)
cols2 = ["B1", "B2", "B3", "B8", "B12", "B13", "A21", "A23"]
for col in cols2:
full[col].fillna(full[col].mode()[0], inplace=True)
full['a21_a22_a23'] = full['A21']+full['A22']+full['A23']
cols3 = ["A25", "A27"]
for col in cols3:
full[col] = full.groupby(['a21_a22_a23'])[col].transform(lambda x: x.fillna(x.median()))
full['a1_a3_a4']=full['A1']+full['A3']+full['A4']
full['a1_a3']=full['A1']+full['A3']
full['a1_a4']=full['A1']+full['A4']
full['a10_a6']=full['A10']-full['A6']
full['a12_a10']=full['A12']-full['A10']
full['a15_a12']=full['A15']-full['A12']
full['a17_a15']=full['A17']-full['A15']
full['a27_a25']=full['A27']-full['A25']
full['b6_b8']=full['B6']-full['B8']
full['a10_a6/a9_a5']=(full['A10']-full['A6'])/full.apply(lambda df:get_phase(df['A5'],df['A9']),axis=1)
full['a12_a10/a11_a9']=(full['A12']-full['A10'])/full.apply(lambda df:get_phase(df['A9'],df['A11']),axis=1)
full['a15_a12/a14_a11']=(full['A15']-full['A12'])/full.apply(lambda df:get_phase(df['A11'],df['A14']),axis=1)
full['a17_a15/a16_a14']=(full['A17']-full['A15'])/full.apply(lambda df:get_phase(df['A14'],df['A16']),axis=1)
full['a27_a25/a26_a24']=(full['A27']-full['A25'])/full.apply(lambda df:get_phase(df['A24'],df['A26']),axis=1)
full['b6_b8/b7_b5']=(full['B6']-full['B8'])/full.apply(lambda df:get_phase(df['B5'],df['B7']),axis=1)
full['b14/a1_a3_a4_a19_b1_b12'] = full['B14']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B12'])
full['b14/a1_a3_a4_a19_b1_b12_b14'] = full['B12']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B14'])
for f in ['A5', 'A7', 'A9', 'A11', 'A14', 'A16', 'A24', 'A26', 'B5', 'B7']:
try:
full[f] = full[f].apply(timeTranSecond)
except:
print(f, '应该在前面被删除了!')
for f in ['A20', 'A28', 'B4', 'B9', 'B10', 'B11']:
full[f] = full.apply(lambda df: getDuration(df[f]), axis=1)
full['样本id'] = full['样本id'].apply(lambda x: int(x.split('_')[1]))
good_cols=list(full.columns)
good_cols.remove('样本id')
good_cols.remove('收率')
# for f in good_cols:
# full[f] = full[f].map(dict(zip(full[f].unique(), range(0, full[f].nunique()))))
n_train=train.shape[0]
X = full[:n_train]
test_X = full[n_train:]
y= X.收率
X.drop(['收率'], axis=1, inplace=True)
test_X.drop(['收率'], axis=1, inplace=True)
# X_train = X[list(X.columns)].values
# X_test = test_X[list(X.columns)].values
# y_train = y.values
# # grid(Lasso()).grid_get(X,y,{'alpha': [0.02,0.0002,0.000222,0.0000224],'max_iter':[10000]})
# grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'num_leaves': [100],
# 'min_data_in_leaf': [9],
# 'objective': ['regression'],
# 'max_depth': [-1],
# 'learning_rate': [0.01],
# 'min_child_samples': [15],
# "boosting": ['gbdt'],
# "feature_fraction": [0.9],
# "bagging_freq": [1],
# "bagging_fraction": [0.9],
# "bagging_seed": [5,13,40,50],
# "metric": ['mse'],
# "lambda_l1": [0.000001],
# 'verbosity': [-1]})
# grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'eta': [0.1], 'max_depth': [6], 'subsample': [0.9],
# 'colsample_bytree': [0.5],'objective': ['reg:linear'],
# 'eval_metric': ['rmse'], 'silent': [True], 'nthread': [3]})
X_train = X[list(X.columns)].values
X_test = test_X[list(X.columns)].values
# one hot
enc = OneHotEncoder()
# for f in good_cols:
# enc.fit(full[f].values.reshape(-1, 1))
# X_train = sparse.hstack((X_train, enc.transform(X[f].values.reshape(-1, 1))), 'csr')
# X_test = sparse.hstack((X_test, enc.transform(test_X[f].values.reshape(-1, 1))), 'csr')
print(X_train.shape)
print(X_test.shape)
y_train = y.values
#
param = {'num_leaves': 100,
'min_data_in_leaf': 9,
'objective': 'regression',
'max_depth': -1,
'learning_rate': 0.01,
"min_child_samples": 15,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
"bagging_seed": 13,
"metric": 'mse',
"lambda_l1": 0.000001,
"verbosity": -1}
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_lgb = np.zeros(len(train))
predictions_lgb = np.zeros(len(test))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
trn_data = lgb.Dataset(X_train[trn_idx], y_train[trn_idx])
val_data = lgb.Dataset(X_train[val_idx], y_train[val_idx])
num_round = 3000
clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=200,
early_stopping_rounds=100)
oof_lgb[val_idx] = clf.predict(X_train[val_idx], num_iteration=clf.best_iteration)
predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits
print("CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, y)))
##### xgb
xgb_params = {'eta': 0.1, 'max_depth': 6, 'subsample': 0.9, 'colsample_bytree': 0.5,
'objective': 'reg:linear', 'eval_metric': 'rmse', 'silent': True, 'nthread': 3}
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_xgb = np.zeros(len(train))
predictions_xgb = np.zeros(len(test))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
trn_data = xgb.DMatrix(X_train[trn_idx], y_train[trn_idx])
val_data = xgb.DMatrix(X_train[val_idx], y_train[val_idx])
watchlist = [(trn_data, 'train'), (val_data, 'valid_data')]
clf = xgb.train(dtrain=trn_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,
verbose_eval=100, params=xgb_params)
oof_xgb[val_idx] = clf.predict(xgb.DMatrix(X_train[val_idx]), ntree_limit=clf.best_ntree_limit)
predictions_xgb += clf.predict(xgb.DMatrix(X_test), ntree_limit=clf.best_ntree_limit) / folds.n_splits
print("CV score: {:<8.8f}".format(mean_squared_error(oof_xgb, y)))
# 将lgb和xgb的结果进行stacking
train_stack = np.vstack([oof_lgb, oof_xgb]).transpose()
test_stack = np.vstack([predictions_lgb, predictions_xgb]).transpose()
folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=4590)
oof_stack1 = np.zeros(train_stack.shape[0])
predictions1 = np.zeros(test_stack.shape[0])
for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, y)):
print("fold {}".format(fold_))
trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values
val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values
clf_3 = BayesianRidge()
clf_3.fit(trn_data, trn_y)
oof_stack1[val_idx] = clf_3.predict(val_data)
predictions1 += clf_3.predict(test_stack) / 10
print("CV score: {:<8.8f}".format(mean_squared_error(y.values, oof_stack1)))
sub_df = pd.DataFrame()
sub_df[0] = pd.read_csv('./jinnan_round1_testB_20190121.csv', header=None)[0][1:]
sub_df[1] = predictions1
sub_df[1] = sub_df[1].apply(lambda x:round(x, 3))
sub_df.to_csv('./prediction.csv', index=False, header=None)
| __init__ | identifier_name |
data_plt.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge,LinearRegression,Ridge,Lasso
from sklearn.kernel_ridge import KernelRidge
from xgboost import XGBRegressor
import lightgbm as lgb
import xgboost as xgb
from sklearn.model_selection import KFold, RepeatedKFold, cross_val_score, GridSearchCV
from sklearn.preprocessing import OneHotEncoder
from scipy import sparse
import warnings
import re
import plotly.offline as py
py.init_notebook_mode(connected=True)
from sklearn.metrics import mean_squared_error
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings("ignore")
pd.set_option('display.max_columns',None)
pd.set_option('max_colwidth',100)
train = pd.read_csv('./jinnan_round1_train_20181227.csv')
test = pd.read_csv('./jinnan_round1_testB_20190121.csv')
class grid():
def __init__(self, model):
self.model = model
def grid_get(self, X, y, param_grid):
grid_search = GridSearchCV(self.model, param_grid, cv=5, scoring="neg_mean_squared_error")
grid_search.fit(X, y)
print(grid_search.best_params_, np.sqrt(-grid_search.best_score_))
grid_search.cv_results_['mean_test_score'] = np.sqrt(-grid_search.cv_results_['mean_test_score'])
print(pd.DataFrame(grid_search.cv_results_)[['params', 'mean_test_score', 'std_test_score']])
def get_phase(t1,t2):
try:
h1, m1, s1=t1.split(':')
h2, m2, s2=t2.split(':')
except:
if t1 == -1 or t2 == -1:
return -1
if int(h2) >= int(h1):
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600
else:
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600 + 24
return tm
def timeTranSecond(t):
try:
t, m, s = t.split(":")
except:
if t == '1900/1/9 7:00':
return 7 * 3600 / 3600
elif t == '1900/1/1 2:30':
return (2 * 3600 + 30 * 60) / 3600
elif t == -1:
return -1
else:
return 0
try:
tm = (int(t) * 3600 + int(m) * 60 + int(s)) / 3600
except:
return (30 * 60) / 3600
return tm
def getDuration(se):
try:
sh, sm, eh, em = re.findall(r"\d+\.?\d*", se)
except:
if se == -1:
return -1
try:
if int(sh) > int(eh):
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 + 24
else:
tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600
except:
if se == '19:-20:05':
return 1
elif se == '15:00-1600':
return 1
return tm
def rmse_cv(model,X,y):
rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5))
return rmse
train.loc[train['B14'] == 40, 'B14'] = 400
train.drop(train[train['收率'] < 0.87].index, inplace=True)
full = pd.concat([train, test], ignore_index=True)
cols = ["A2", "A3", "A4"]
for col in cols:
full[col].fillna(0, inplace=True)
cols1 = ["A7", "A8", "B10", "B11", "A20", "A24", "A26"]
for col in cols1:
full | s2 = ["B1", "B2", "B3", "B8", "B12", "B13", "A21", "A23"]
for col in cols2:
full[col].fillna(full[col].mode()[0], inplace=True)
full['a21_a22_a23'] = full['A21']+full['A22']+full['A23']
cols3 = ["A25", "A27"]
for col in cols3:
full[col] = full.groupby(['a21_a22_a23'])[col].transform(lambda x: x.fillna(x.median()))
full['a1_a3_a4']=full['A1']+full['A3']+full['A4']
full['a1_a3']=full['A1']+full['A3']
full['a1_a4']=full['A1']+full['A4']
full['a10_a6']=full['A10']-full['A6']
full['a12_a10']=full['A12']-full['A10']
full['a15_a12']=full['A15']-full['A12']
full['a17_a15']=full['A17']-full['A15']
full['a27_a25']=full['A27']-full['A25']
full['b6_b8']=full['B6']-full['B8']
full['a10_a6/a9_a5']=(full['A10']-full['A6'])/full.apply(lambda df:get_phase(df['A5'],df['A9']),axis=1)
full['a12_a10/a11_a9']=(full['A12']-full['A10'])/full.apply(lambda df:get_phase(df['A9'],df['A11']),axis=1)
full['a15_a12/a14_a11']=(full['A15']-full['A12'])/full.apply(lambda df:get_phase(df['A11'],df['A14']),axis=1)
full['a17_a15/a16_a14']=(full['A17']-full['A15'])/full.apply(lambda df:get_phase(df['A14'],df['A16']),axis=1)
full['a27_a25/a26_a24']=(full['A27']-full['A25'])/full.apply(lambda df:get_phase(df['A24'],df['A26']),axis=1)
full['b6_b8/b7_b5']=(full['B6']-full['B8'])/full.apply(lambda df:get_phase(df['B5'],df['B7']),axis=1)
full['b14/a1_a3_a4_a19_b1_b12'] = full['B14']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B12'])
full['b14/a1_a3_a4_a19_b1_b12_b14'] = full['B12']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B14'])
for f in ['A5', 'A7', 'A9', 'A11', 'A14', 'A16', 'A24', 'A26', 'B5', 'B7']:
try:
full[f] = full[f].apply(timeTranSecond)
except:
print(f, '应该在前面被删除了!')
for f in ['A20', 'A28', 'B4', 'B9', 'B10', 'B11']:
full[f] = full.apply(lambda df: getDuration(df[f]), axis=1)
full['样本id'] = full['样本id'].apply(lambda x: int(x.split('_')[1]))
good_cols=list(full.columns)
good_cols.remove('样本id')
good_cols.remove('收率')
# for f in good_cols:
# full[f] = full[f].map(dict(zip(full[f].unique(), range(0, full[f].nunique()))))
n_train=train.shape[0]
X = full[:n_train]
test_X = full[n_train:]
y= X.收率
X.drop(['收率'], axis=1, inplace=True)
test_X.drop(['收率'], axis=1, inplace=True)
# X_train = X[list(X.columns)].values
# X_test = test_X[list(X.columns)].values
# y_train = y.values
# # grid(Lasso()).grid_get(X,y,{'alpha': [0.02,0.0002,0.000222,0.0000224],'max_iter':[10000]})
# grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'num_leaves': [100],
# 'min_data_in_leaf': [9],
# 'objective': ['regression'],
# 'max_depth': [-1],
# 'learning_rate': [0.01],
# 'min_child_samples': [15],
# "boosting": ['gbdt'],
# "feature_fraction": [0.9],
# "bagging_freq": [1],
# "bagging_fraction": [0.9],
# "bagging_seed": [5,13,40,50],
# "metric": ['mse'],
# "lambda_l1": [0.000001],
# 'verbosity': [-1]})
# grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'eta': [0.1], 'max_depth': [6], 'subsample': [0.9],
# 'colsample_bytree': [0.5],'objective': ['reg:linear'],
# 'eval_metric': ['rmse'], 'silent': [True], 'nthread': [3]})
X_train = X[list(X.columns)].values
X_test = test_X[list(X.columns)].values
# one hot
enc = OneHotEncoder()
# for f in good_cols:
# enc.fit(full[f].values.reshape(-1, 1))
# X_train = sparse.hstack((X_train, enc.transform(X[f].values.reshape(-1, 1))), 'csr')
# X_test = sparse.hstack((X_test, enc.transform(test_X[f].values.reshape(-1, 1))), 'csr')
print(X_train.shape)
print(X_test.shape)
y_train = y.values
#
param = {'num_leaves': 100,
'min_data_in_leaf': 9,
'objective': 'regression',
'max_depth': -1,
'learning_rate': 0.01,
"min_child_samples": 15,
"boosting": "gbdt",
"feature_fraction": 0.9,
"bagging_freq": 1,
"bagging_fraction": 0.9,
"bagging_seed": 13,
"metric": 'mse',
"lambda_l1": 0.000001,
"verbosity": -1}
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_lgb = np.zeros(len(train))
predictions_lgb = np.zeros(len(test))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
trn_data = lgb.Dataset(X_train[trn_idx], y_train[trn_idx])
val_data = lgb.Dataset(X_train[val_idx], y_train[val_idx])
num_round = 3000
clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=200,
early_stopping_rounds=100)
oof_lgb[val_idx] = clf.predict(X_train[val_idx], num_iteration=clf.best_iteration)
predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits
print("CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, y)))
##### xgb
xgb_params = {'eta': 0.1, 'max_depth': 6, 'subsample': 0.9, 'colsample_bytree': 0.5,
'objective': 'reg:linear', 'eval_metric': 'rmse', 'silent': True, 'nthread': 3}
folds = KFold(n_splits=5, shuffle=True, random_state=2018)
oof_xgb = np.zeros(len(train))
predictions_xgb = np.zeros(len(test))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)):
print("fold n°{}".format(fold_ + 1))
trn_data = xgb.DMatrix(X_train[trn_idx], y_train[trn_idx])
val_data = xgb.DMatrix(X_train[val_idx], y_train[val_idx])
watchlist = [(trn_data, 'train'), (val_data, 'valid_data')]
clf = xgb.train(dtrain=trn_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,
verbose_eval=100, params=xgb_params)
oof_xgb[val_idx] = clf.predict(xgb.DMatrix(X_train[val_idx]), ntree_limit=clf.best_ntree_limit)
predictions_xgb += clf.predict(xgb.DMatrix(X_test), ntree_limit=clf.best_ntree_limit) / folds.n_splits
print("CV score: {:<8.8f}".format(mean_squared_error(oof_xgb, y)))
# 将lgb和xgb的结果进行stacking
train_stack = np.vstack([oof_lgb, oof_xgb]).transpose()
test_stack = np.vstack([predictions_lgb, predictions_xgb]).transpose()
folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=4590)
oof_stack1 = np.zeros(train_stack.shape[0])
predictions1 = np.zeros(test_stack.shape[0])
for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, y)):
print("fold {}".format(fold_))
trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values
val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values
clf_3 = BayesianRidge()
clf_3.fit(trn_data, trn_y)
oof_stack1[val_idx] = clf_3.predict(val_data)
predictions1 += clf_3.predict(test_stack) / 10
print("CV score: {:<8.8f}".format(mean_squared_error(y.values, oof_stack1)))
sub_df = pd.DataFrame()
sub_df[0] = pd.read_csv('./jinnan_round1_testB_20190121.csv', header=None)[0][1:]
sub_df[1] = predictions1
sub_df[1] = sub_df[1].apply(lambda x:round(x, 3))
sub_df.to_csv('./prediction.csv', index=False, header=None)
| [col].fillna(-1, inplace=True)
col | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.