file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
test1.go | // Copyright 2018 Alexander S.Kresin <alex@kresin.ru>, http://www.kresin.ru
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
egui "github.com/alkresin/external"
"io/ioutil"
"strconv"
"time"
)
const (
CLR_LBLUE = 16759929
CLR_LBLUE0 = 12164479
CLR_LBLUE2 = 16770002
CLR_LBLUE3 = 16772062
CLR_LBLUE4 = 16775920
CLR_GRAY = 0x333333
CLR_LGRAY1 = 0xeeeeee
)
var arr = [][]string{{"Alex", "17", "1200", "f", "1"}, {"Victor", "42", "1600", "f", "2"},
{"John", "31", "1000", "f", "3"}}
func main() {
var sInit string
{
b, err := ioutil.ReadFile("test.ini")
if err != nil {
sInit = ""
} else {
sInit = string(b)
}
}
if egui.Init(sInit) != 0 {
return
}
egui.CreateStyle(&(egui.Style{Name: "st1", Orient: 1, Colors: []int32{CLR_LBLUE, CLR_LBLUE3}}))
egui.CreateStyle(&(egui.Style{Name: "st2", Colors: []int32{CLR_LBLUE}, BorderW: 3}))
egui.CreateStyle(&(egui.Style{Name: "st3", Colors: []int32{CLR_LBLUE},
BorderW: 2, BorderClr: CLR_LBLUE0}))
egui.CreateStyle(&(egui.Style{Name: "st4", Colors: []int32{CLR_LBLUE2, CLR_LBLUE3},
BorderW: 1, BorderClr: CLR_LBLUE}))
pWindow := &egui.Widget{X: 100, Y: 100, W: 400, H: 280, Title: "External"}
egui.InitMainWindow(pWindow)
egui.Menu("")
{
egui.Menu("File")
{
egui.AddMenuItem("Set text to label", 0,
func(p []string) string { egui.Widg("main.l1").SetText(p[1]); return "" }, "fsett2", "Bye...1")
egui.AddMenuSeparator()
egui.AddMenuItem("Printing", 0, fprint, "fprint")
egui.AddMenuSeparator()
egui.AddMenuItem("Exit", 0, nil, "hwg_EndWindow()")
}
egui.EndMenu()
egui.Menu("Dialogs")
{
egui.AddMenuItem("Open dialog", 0, fsett3, "fsett3")
egui.AddMenuItem("Test Tab", 0, ftab, "ftab")
egui.AddMenuItem("Test browse", 0, fbrowse, "fbrowse")
}
egui.EndMenu()
egui.Menu("Standard dialogs")
{
egui.AddMenuItem("Message boxes", 0, fmbox1, "fmbox1")
egui.AddMenuItem("MsgGet box", 0, fmbox2, "fmbox2")
egui.AddMenuItem("Choice", 0, fmbox3, "fmbox3")
egui.AddMenuItem("Select color", 0, fsele_color, "fsele_color")
egui.AddMenuItem("Select font", 0, fsele_font, "fsele_font")
egui.AddMenuItem("Select file", 0, fsele_file, "fsele_file")
}
egui.EndMenu()
egui.Menu("Help")
{
egui.AddMenuItem("About", 0, nil, "hwg_MsgInfo(hb_version()+chr(10)+chr(13)+hwg_version(),\"About\")")
}
egui.EndMenu()
}
egui.EndMenu()
pPanel := pWindow.AddWidget(&egui.Widget{Type: "paneltop", H: 40,
AProps: map[string]string{"HStyle": "st1"}})
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 0, Y: 0, W: 56, H: 40, Title: "Date",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", nil, "hwg_WriteStatus(HWindow():GetMain(),1,Dtoc(Date()),.T.)")
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 56, Y: 0, W: 56, H: 40, Title: "Time",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", nil, "hwg_WriteStatus(HWindow():GetMain(),2,Time(),.T.)")
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 112, Y: 0, W: 56, H: 40, Title: "Get",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", fsett3, "fsett3")
pWindow.AddWidget(&egui.Widget{Type: "label", Name: "l1",
X: 20, Y: 60, W: 180, H: 24, Title: "Test of a label",
AProps: map[string]string{"Transpa": "t"}})
pWindow.AddWidget(&egui.Widget{Type: "button", X: 200, Y: 56, W: 100, H: 32, Title: "SetText"})
egui.PLastWidget.SetCallBackProc("onclick", fsett1, "fsett1", "first parameter")
pWindow.AddWidget(&egui.Widget{Type: "panelbot", H: 32,
AProps: map[string]string{"HStyle": "st4", "AParts": egui.ToString(120, 120, 0)}})
pWindow.Activate()
egui.Exit()
}
func fsett1(p []string) string {
pLabel := egui.Widg("main.l1")
fmt.Println(pLabel.GetText())
pLabel.SetText(p[1])
return ""
}
func fsett3([]string) string {
egui.BeginPacket()
egui.SetDateFormat("DD.MM.YYYY")
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg", X: 300, Y: 200, W: 400, H: 260, Title: "Dialog Test", Font: pFont}
egui.InitDialog(pDlg)
pDlg.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 10, W: 160, H: 24, Title: "Identifier:"})
pDlg.AddWidget(&egui.Widget{Type: "edit", Name: "edi1", X: 20, Y: 32, W: 160, H: 24,
AProps: map[string]string{"Picture": "@!R /XXX:XXX/"}})
pDlg.AddWidget(&egui.Widget{Type: "label", X: 220, Y: 10, W: 160, H: 24, Title: "Date:"})
pDlg.AddWidget(&egui.Widget{Type: "edit", Name: "edi2", X: 220, Y: 32, W: 120, H: 24,
Title: time.Now().Format("20060102"), AProps: map[string]string{"Picture": "D@D"}})
pDlg.AddWidget(&egui.Widget{Type: "combo", Name: "comb", X: 20, Y: 68, W: 160, H: 24,
AProps: map[string]string{"AItems": egui.ToString("first", "second", "third")}})
pDlg.AddWidget(&egui.Widget{Type: "label", X: 220, Y: 68, W: 80, H: 24, Title: "Age:"})
pDlg.AddWidget(&egui.Widget{Type: "updown", Name: "upd1", X: 280, Y: 68, W: 60, H: 24})
pDlg.AddWidget(&egui.Widget{Type: "group", X: 10, Y: 110, W: 180, H: 76, Title: "Check"})
pDlg.AddWidget(&egui.Widget{Type: "check", Name: "chk1", X: 24, Y: 124, W: 150, H: 24, Title: "Married"})
pDlg.AddWidget(&egui.Widget{Type: "check", Name: "chk2", X: 24, Y: 148, W: 150, H: 24, Title: "Has children"})
pGroup := pDlg.AddWidget(&egui.Widget{Type: "radiogr", Name: "rg", X: 200, Y: 110, W: 180, H: 76, Title: "Radio"})
pDlg.AddWidget(&egui.Widget{Type: "radio", X: 224, Y: 124, W: 150, H: 24, Title: "Male"})
pDlg.AddWidget(&egui.Widget{Type: "radio", X: 224, Y: 148, W: 150, H: 24, Title: "Female"})
egui.RadioEnd(pGroup, 1)
pDlg.AddWidget(&egui.Widget{Type: "button", X: 150, Y: 220, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", fsett4, "fsett4")
pDlg.Activate()
egui.EndPacket()
return ""
}
func fsett4([]string) string {
arr := egui.GetValues(egui.Wnd("dlg"), []string{"edi1", "edi2", "comb", "chk1", "chk2", "rg", "upd1"})
egui.PLastWindow.Close()
egui.MsgInfo("Id: "+arr[0]+"\r\n"+"Date: "+arr[1]+"\r\n"+"Combo: "+arr[2]+"\r\n"+
"Married: "+arr[3]+"\r\n"+"Has children: "+arr[4]+"\r\n"+"Sex: "+arr[5]+"\r\n"+
"Age: "+arr[6], "Result", nil, "", "")
return ""
}
func ftab([]string) string {
egui.BeginPacket()
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg2", X: 300, Y: 200, W: 200, H: 340, Title: "Tab", Font: pFont,
AProps: map[string]string{"NoExitOnEsc": "t","NoCloseAble": "t"}}
egui.InitDialog(pDlg)
pTab := pDlg.AddWidget(&egui.Widget{Type: "tab", X: 10, Y: 10, W: 180, H: 280})
egui.TabPage(pTab, "First")
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 30, W: 140, H: 24, Title: "Name:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 52, W: 140, H: 24})
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 84, W: 140, H: 24, Title: "Surname:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 106, W: 140, H: 24})
egui.TabPageEnd(pTab)
egui.TabPage(pTab, "Second")
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 40, W: 140, H: 24, Title: "Age:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 62, W: 140, H: 24})
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 94, W: 140, H: 24, Title: "Profession:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 116, W: 140, H: 24})
egui.TabPageEnd(pTab)
pDlg.AddWidget(&egui.Widget{Type: "button", X: 60, Y: 300, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", ftabclose, "ftabclose")
pDlg.Activate()
egui.EndPacket()
return ""
}
func ftabclose([]string) string {
egui.PLastWindow.Close()
return ""
}
func fbrowse([]string) string {
egui.BeginPacket()
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg2", X: 300, Y: 200, W: 280, H: 240, Title: "browse", Font: pFont}
egui.InitDialog(pDlg)
pBrw := pDlg.AddWidget(&egui.Widget{Type: "browse", Name: "brw", X: 10, Y: 10, W: 260, H: 140})
pBrw.SetParam("oStyleHead", egui.GetStyle("st1"))
pBrw.SetParam("tColor", CLR_GRAY)
pBrw.SetParam("bColorSel", CLR_LGRAY1)
pBrw.SetParam("htbColor", CLR_LGRAY1)
pBrw.SetParam("tColorSel", 0)
pBrw.SetParam("httColor", 0)
pBrw.SetParam("lInFocus", true)
egui.BrwSetArray(pBrw, &arr)
egui.BrwDelColumn(pBrw, 5)
egui.BrwSetColumn(pBrw, 1, "Name", 1, 0, false, 0)
egui.BrwSetColumn(pBrw, 2, "Age", 1, 0, false, 0)
egui.BrwSetColumn(pBrw, 3, "Salary", 1, 0, true, 0)
egui.BrwSetColumnEx(pBrw, 2, "bColor", CLR_LBLUE3)
egui.BrwSetColumnEx(pBrw, 2, "lResizable", false)
pBrw.SetCallBackProc("onposchanged", fbrwpc, "fbrwpc")
pBrw.SetCallBackProc("onrclick", fbrwrc, "fbrwrc")
pDlg.AddWidget(&egui.Widget{Type: "label", Name: "l1",
X: 90, Y: 160, W: 100, H: 24, Winstyle: egui.DT_CENTER})
pDlg.AddWidget(&egui.Widget{Type: "button", X: 90, Y: 200, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", fbrwclose, "fbrwclose")
pDlg.Activate()
egui.EndPacket()
return ""
}
func fbrwclose([]string) string {
arrNew := egui.BrwGetArray(egui.Widg("dlg2.brw"))
egui.PLastWindow.Close()
s := ""
for i, a := range arrNew {
if a[2] != arr[i][2] {
s += a[0] + " => " + a[2] + "\r\n"
}
}
if s != "" {
egui.MsgInfo(s, "Changes", nil, "", "")
}
return ""
}
func fbrwpc(p []string) string {
pLabel := egui.Widg("dlg2.l1")
if len(p) > 1 {
i, _ := strconv.Atoi(p[1])
if i > 0 && i <= 3 {
pLabel.SetText(arr[i-1][0])
}
}
return ""
}
func fbrwrc(p []string) string {
if len(p) > 2 {
egui.MsgInfo(p[0]+" Row: "+p[2]+" Col: "+p[1], "Right click position", nil, "", "")
}
return ""
}
func fmbox1(p []string) string {
if p[0] == "menu" {
egui.MsgYesNo("Yes or No???", "MsgBox", fmbox1, "fmbox1", "mm1")
} else if p[0] == "mm1" {
if p[1] == "t" {
egui.MsgInfo("Yes!", "Answer", nil, "", "")
} else {
egui.MsgInfo("No...", "Answer", nil, "", "")
}
}
return ""
}
func | (p []string) string {
if p[0] == "menu" {
egui.MsgGet("Input something:", "MsgGet", 0, fmbox2, "fmbox2", "mm1")
} else if p[0] == "mm1" {
egui.MsgInfo(p[1], "Answer", nil, "", "")
}
return ""
}
func fmbox3(p []string) string {
if p[0] == "menu" {
arr := []string{"Alex Petrov", "Serg Lama", "Jimmy Hendrix", "Dorian Gray", "Victor Peti"}
egui.Choice(arr, "Select from a list", fmbox3, "fmbox3", "mm1")
} else if p[0] == "mm1" {
egui.MsgInfo(p[1], "Answer", nil, "", "")
}
return ""
}
func fsele_color(p []string) string {
if p[0] == "menu" {
egui.SelectColor(0, fsele_color, "fsele_color", "mm1")
} else {
iColor, _ := strconv.Atoi(p[1])
egui.Widg("main.l1").SetColor(int32(iColor), -1)
}
return ""
}
func fsele_font(p []string) string {
if p[0] == "menu" {
egui.SelectFont(fsele_font, "fsele_font", "")
} else {
fmt.Println("font id: ", p[0])
if pFont := egui.GetFont(p[0]); pFont != nil {
if len(p) < 8 {
} else {
fmt.Println("font fam: ", p[1])
}
egui.Widg("main.l1").SetFont( pFont );
}
}
return ""
}
func fsele_file(p []string) string {
if p[0] == "menu" {
egui.SelectFile("", fsele_file, "fsele_file", "mm1")
} else {
if p[1] == "" {
egui.MsgInfo("Nothing selected", "Result", nil, "", "")
} else {
egui.MsgInfo(p[1], "File selected", nil, "", "")
}
}
return ""
}
func fprint(p []string) string {
if p[0] == "menu" {
egui.InitPrinter(&egui.Printer{SPrinter: "...", BPreview: true}, "fprint", fprint, "mm1")
} else {
pPrinter := egui.PLastPrinter
pFont := pPrinter.AddFont(&egui.Font{Family: "Times New Roman", Height: 10})
pPrinter.StartPage()
pPrinter.SetFont(pFont)
pPrinter.Box(5, 5, 200, 282)
pPrinter.Say(50, 10, 165, 26, "Printing first sample !", egui.DT_CENTER)
pPrinter.Line(45, 30, 170, 30)
pPrinter.Line(45, 5, 45, 30)
pPrinter.Line(170, 5, 170, 30)
pPrinter.Say(50, 120, 150, 132, "----------", egui.DT_CENTER)
pPrinter.Box(50, 134, 160, 146)
pPrinter.Say(50, 135, 160, 146, "End Of Report", egui.DT_CENTER)
pPrinter.EndPage()
pPrinter.End()
}
return ""
}
| fmbox2 | identifier_name |
test1.go | // Copyright 2018 Alexander S.Kresin <alex@kresin.ru>, http://www.kresin.ru
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
egui "github.com/alkresin/external"
"io/ioutil"
"strconv"
"time"
)
const (
CLR_LBLUE = 16759929
CLR_LBLUE0 = 12164479
CLR_LBLUE2 = 16770002
CLR_LBLUE3 = 16772062
CLR_LBLUE4 = 16775920
CLR_GRAY = 0x333333
CLR_LGRAY1 = 0xeeeeee
)
var arr = [][]string{{"Alex", "17", "1200", "f", "1"}, {"Victor", "42", "1600", "f", "2"},
{"John", "31", "1000", "f", "3"}}
func main() {
var sInit string
{
b, err := ioutil.ReadFile("test.ini")
if err != nil {
sInit = ""
} else {
sInit = string(b)
}
}
if egui.Init(sInit) != 0 {
return
}
egui.CreateStyle(&(egui.Style{Name: "st1", Orient: 1, Colors: []int32{CLR_LBLUE, CLR_LBLUE3}}))
egui.CreateStyle(&(egui.Style{Name: "st2", Colors: []int32{CLR_LBLUE}, BorderW: 3}))
egui.CreateStyle(&(egui.Style{Name: "st3", Colors: []int32{CLR_LBLUE},
BorderW: 2, BorderClr: CLR_LBLUE0}))
egui.CreateStyle(&(egui.Style{Name: "st4", Colors: []int32{CLR_LBLUE2, CLR_LBLUE3},
BorderW: 1, BorderClr: CLR_LBLUE}))
pWindow := &egui.Widget{X: 100, Y: 100, W: 400, H: 280, Title: "External"}
egui.InitMainWindow(pWindow)
egui.Menu("")
{
egui.Menu("File")
{
egui.AddMenuItem("Set text to label", 0,
func(p []string) string { egui.Widg("main.l1").SetText(p[1]); return "" }, "fsett2", "Bye...1")
egui.AddMenuSeparator()
egui.AddMenuItem("Printing", 0, fprint, "fprint")
egui.AddMenuSeparator()
egui.AddMenuItem("Exit", 0, nil, "hwg_EndWindow()")
}
egui.EndMenu()
egui.Menu("Dialogs")
{
egui.AddMenuItem("Open dialog", 0, fsett3, "fsett3")
egui.AddMenuItem("Test Tab", 0, ftab, "ftab")
egui.AddMenuItem("Test browse", 0, fbrowse, "fbrowse")
}
egui.EndMenu()
egui.Menu("Standard dialogs")
{
egui.AddMenuItem("Message boxes", 0, fmbox1, "fmbox1")
egui.AddMenuItem("MsgGet box", 0, fmbox2, "fmbox2")
egui.AddMenuItem("Choice", 0, fmbox3, "fmbox3")
egui.AddMenuItem("Select color", 0, fsele_color, "fsele_color")
egui.AddMenuItem("Select font", 0, fsele_font, "fsele_font")
egui.AddMenuItem("Select file", 0, fsele_file, "fsele_file")
}
egui.EndMenu()
egui.Menu("Help")
{
egui.AddMenuItem("About", 0, nil, "hwg_MsgInfo(hb_version()+chr(10)+chr(13)+hwg_version(),\"About\")")
}
egui.EndMenu()
}
egui.EndMenu()
pPanel := pWindow.AddWidget(&egui.Widget{Type: "paneltop", H: 40,
AProps: map[string]string{"HStyle": "st1"}})
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 0, Y: 0, W: 56, H: 40, Title: "Date",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", nil, "hwg_WriteStatus(HWindow():GetMain(),1,Dtoc(Date()),.T.)")
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 56, Y: 0, W: 56, H: 40, Title: "Time",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", nil, "hwg_WriteStatus(HWindow():GetMain(),2,Time(),.T.)")
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 112, Y: 0, W: 56, H: 40, Title: "Get",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", fsett3, "fsett3")
pWindow.AddWidget(&egui.Widget{Type: "label", Name: "l1",
X: 20, Y: 60, W: 180, H: 24, Title: "Test of a label",
AProps: map[string]string{"Transpa": "t"}})
pWindow.AddWidget(&egui.Widget{Type: "button", X: 200, Y: 56, W: 100, H: 32, Title: "SetText"})
egui.PLastWidget.SetCallBackProc("onclick", fsett1, "fsett1", "first parameter")
pWindow.AddWidget(&egui.Widget{Type: "panelbot", H: 32,
AProps: map[string]string{"HStyle": "st4", "AParts": egui.ToString(120, 120, 0)}})
pWindow.Activate()
egui.Exit()
}
func fsett1(p []string) string {
pLabel := egui.Widg("main.l1")
fmt.Println(pLabel.GetText())
pLabel.SetText(p[1])
return ""
}
func fsett3([]string) string {
egui.BeginPacket()
egui.SetDateFormat("DD.MM.YYYY")
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg", X: 300, Y: 200, W: 400, H: 260, Title: "Dialog Test", Font: pFont}
egui.InitDialog(pDlg)
pDlg.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 10, W: 160, H: 24, Title: "Identifier:"})
pDlg.AddWidget(&egui.Widget{Type: "edit", Name: "edi1", X: 20, Y: 32, W: 160, H: 24,
AProps: map[string]string{"Picture": "@!R /XXX:XXX/"}})
pDlg.AddWidget(&egui.Widget{Type: "label", X: 220, Y: 10, W: 160, H: 24, Title: "Date:"})
pDlg.AddWidget(&egui.Widget{Type: "edit", Name: "edi2", X: 220, Y: 32, W: 120, H: 24,
Title: time.Now().Format("20060102"), AProps: map[string]string{"Picture": "D@D"}}) | pDlg.AddWidget(&egui.Widget{Type: "updown", Name: "upd1", X: 280, Y: 68, W: 60, H: 24})
pDlg.AddWidget(&egui.Widget{Type: "group", X: 10, Y: 110, W: 180, H: 76, Title: "Check"})
pDlg.AddWidget(&egui.Widget{Type: "check", Name: "chk1", X: 24, Y: 124, W: 150, H: 24, Title: "Married"})
pDlg.AddWidget(&egui.Widget{Type: "check", Name: "chk2", X: 24, Y: 148, W: 150, H: 24, Title: "Has children"})
pGroup := pDlg.AddWidget(&egui.Widget{Type: "radiogr", Name: "rg", X: 200, Y: 110, W: 180, H: 76, Title: "Radio"})
pDlg.AddWidget(&egui.Widget{Type: "radio", X: 224, Y: 124, W: 150, H: 24, Title: "Male"})
pDlg.AddWidget(&egui.Widget{Type: "radio", X: 224, Y: 148, W: 150, H: 24, Title: "Female"})
egui.RadioEnd(pGroup, 1)
pDlg.AddWidget(&egui.Widget{Type: "button", X: 150, Y: 220, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", fsett4, "fsett4")
pDlg.Activate()
egui.EndPacket()
return ""
}
func fsett4([]string) string {
arr := egui.GetValues(egui.Wnd("dlg"), []string{"edi1", "edi2", "comb", "chk1", "chk2", "rg", "upd1"})
egui.PLastWindow.Close()
egui.MsgInfo("Id: "+arr[0]+"\r\n"+"Date: "+arr[1]+"\r\n"+"Combo: "+arr[2]+"\r\n"+
"Married: "+arr[3]+"\r\n"+"Has children: "+arr[4]+"\r\n"+"Sex: "+arr[5]+"\r\n"+
"Age: "+arr[6], "Result", nil, "", "")
return ""
}
func ftab([]string) string {
egui.BeginPacket()
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg2", X: 300, Y: 200, W: 200, H: 340, Title: "Tab", Font: pFont,
AProps: map[string]string{"NoExitOnEsc": "t","NoCloseAble": "t"}}
egui.InitDialog(pDlg)
pTab := pDlg.AddWidget(&egui.Widget{Type: "tab", X: 10, Y: 10, W: 180, H: 280})
egui.TabPage(pTab, "First")
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 30, W: 140, H: 24, Title: "Name:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 52, W: 140, H: 24})
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 84, W: 140, H: 24, Title: "Surname:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 106, W: 140, H: 24})
egui.TabPageEnd(pTab)
egui.TabPage(pTab, "Second")
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 40, W: 140, H: 24, Title: "Age:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 62, W: 140, H: 24})
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 94, W: 140, H: 24, Title: "Profession:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 116, W: 140, H: 24})
egui.TabPageEnd(pTab)
pDlg.AddWidget(&egui.Widget{Type: "button", X: 60, Y: 300, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", ftabclose, "ftabclose")
pDlg.Activate()
egui.EndPacket()
return ""
}
func ftabclose([]string) string {
egui.PLastWindow.Close()
return ""
}
func fbrowse([]string) string {
egui.BeginPacket()
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg2", X: 300, Y: 200, W: 280, H: 240, Title: "browse", Font: pFont}
egui.InitDialog(pDlg)
pBrw := pDlg.AddWidget(&egui.Widget{Type: "browse", Name: "brw", X: 10, Y: 10, W: 260, H: 140})
pBrw.SetParam("oStyleHead", egui.GetStyle("st1"))
pBrw.SetParam("tColor", CLR_GRAY)
pBrw.SetParam("bColorSel", CLR_LGRAY1)
pBrw.SetParam("htbColor", CLR_LGRAY1)
pBrw.SetParam("tColorSel", 0)
pBrw.SetParam("httColor", 0)
pBrw.SetParam("lInFocus", true)
egui.BrwSetArray(pBrw, &arr)
egui.BrwDelColumn(pBrw, 5)
egui.BrwSetColumn(pBrw, 1, "Name", 1, 0, false, 0)
egui.BrwSetColumn(pBrw, 2, "Age", 1, 0, false, 0)
egui.BrwSetColumn(pBrw, 3, "Salary", 1, 0, true, 0)
egui.BrwSetColumnEx(pBrw, 2, "bColor", CLR_LBLUE3)
egui.BrwSetColumnEx(pBrw, 2, "lResizable", false)
pBrw.SetCallBackProc("onposchanged", fbrwpc, "fbrwpc")
pBrw.SetCallBackProc("onrclick", fbrwrc, "fbrwrc")
pDlg.AddWidget(&egui.Widget{Type: "label", Name: "l1",
X: 90, Y: 160, W: 100, H: 24, Winstyle: egui.DT_CENTER})
pDlg.AddWidget(&egui.Widget{Type: "button", X: 90, Y: 200, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", fbrwclose, "fbrwclose")
pDlg.Activate()
egui.EndPacket()
return ""
}
func fbrwclose([]string) string {
arrNew := egui.BrwGetArray(egui.Widg("dlg2.brw"))
egui.PLastWindow.Close()
s := ""
for i, a := range arrNew {
if a[2] != arr[i][2] {
s += a[0] + " => " + a[2] + "\r\n"
}
}
if s != "" {
egui.MsgInfo(s, "Changes", nil, "", "")
}
return ""
}
func fbrwpc(p []string) string {
pLabel := egui.Widg("dlg2.l1")
if len(p) > 1 {
i, _ := strconv.Atoi(p[1])
if i > 0 && i <= 3 {
pLabel.SetText(arr[i-1][0])
}
}
return ""
}
func fbrwrc(p []string) string {
if len(p) > 2 {
egui.MsgInfo(p[0]+" Row: "+p[2]+" Col: "+p[1], "Right click position", nil, "", "")
}
return ""
}
func fmbox1(p []string) string {
if p[0] == "menu" {
egui.MsgYesNo("Yes or No???", "MsgBox", fmbox1, "fmbox1", "mm1")
} else if p[0] == "mm1" {
if p[1] == "t" {
egui.MsgInfo("Yes!", "Answer", nil, "", "")
} else {
egui.MsgInfo("No...", "Answer", nil, "", "")
}
}
return ""
}
func fmbox2(p []string) string {
if p[0] == "menu" {
egui.MsgGet("Input something:", "MsgGet", 0, fmbox2, "fmbox2", "mm1")
} else if p[0] == "mm1" {
egui.MsgInfo(p[1], "Answer", nil, "", "")
}
return ""
}
func fmbox3(p []string) string {
if p[0] == "menu" {
arr := []string{"Alex Petrov", "Serg Lama", "Jimmy Hendrix", "Dorian Gray", "Victor Peti"}
egui.Choice(arr, "Select from a list", fmbox3, "fmbox3", "mm1")
} else if p[0] == "mm1" {
egui.MsgInfo(p[1], "Answer", nil, "", "")
}
return ""
}
func fsele_color(p []string) string {
if p[0] == "menu" {
egui.SelectColor(0, fsele_color, "fsele_color", "mm1")
} else {
iColor, _ := strconv.Atoi(p[1])
egui.Widg("main.l1").SetColor(int32(iColor), -1)
}
return ""
}
func fsele_font(p []string) string {
if p[0] == "menu" {
egui.SelectFont(fsele_font, "fsele_font", "")
} else {
fmt.Println("font id: ", p[0])
if pFont := egui.GetFont(p[0]); pFont != nil {
if len(p) < 8 {
} else {
fmt.Println("font fam: ", p[1])
}
egui.Widg("main.l1").SetFont( pFont );
}
}
return ""
}
func fsele_file(p []string) string {
if p[0] == "menu" {
egui.SelectFile("", fsele_file, "fsele_file", "mm1")
} else {
if p[1] == "" {
egui.MsgInfo("Nothing selected", "Result", nil, "", "")
} else {
egui.MsgInfo(p[1], "File selected", nil, "", "")
}
}
return ""
}
func fprint(p []string) string {
if p[0] == "menu" {
egui.InitPrinter(&egui.Printer{SPrinter: "...", BPreview: true}, "fprint", fprint, "mm1")
} else {
pPrinter := egui.PLastPrinter
pFont := pPrinter.AddFont(&egui.Font{Family: "Times New Roman", Height: 10})
pPrinter.StartPage()
pPrinter.SetFont(pFont)
pPrinter.Box(5, 5, 200, 282)
pPrinter.Say(50, 10, 165, 26, "Printing first sample !", egui.DT_CENTER)
pPrinter.Line(45, 30, 170, 30)
pPrinter.Line(45, 5, 45, 30)
pPrinter.Line(170, 5, 170, 30)
pPrinter.Say(50, 120, 150, 132, "----------", egui.DT_CENTER)
pPrinter.Box(50, 134, 160, 146)
pPrinter.Say(50, 135, 160, 146, "End Of Report", egui.DT_CENTER)
pPrinter.EndPage()
pPrinter.End()
}
return ""
} |
pDlg.AddWidget(&egui.Widget{Type: "combo", Name: "comb", X: 20, Y: 68, W: 160, H: 24,
AProps: map[string]string{"AItems": egui.ToString("first", "second", "third")}})
pDlg.AddWidget(&egui.Widget{Type: "label", X: 220, Y: 68, W: 80, H: 24, Title: "Age:"}) | random_line_split |
test1.go | // Copyright 2018 Alexander S.Kresin <alex@kresin.ru>, http://www.kresin.ru
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
egui "github.com/alkresin/external"
"io/ioutil"
"strconv"
"time"
)
const (
CLR_LBLUE = 16759929
CLR_LBLUE0 = 12164479
CLR_LBLUE2 = 16770002
CLR_LBLUE3 = 16772062
CLR_LBLUE4 = 16775920
CLR_GRAY = 0x333333
CLR_LGRAY1 = 0xeeeeee
)
var arr = [][]string{{"Alex", "17", "1200", "f", "1"}, {"Victor", "42", "1600", "f", "2"},
{"John", "31", "1000", "f", "3"}}
func main() {
var sInit string
{
b, err := ioutil.ReadFile("test.ini")
if err != nil {
sInit = ""
} else {
sInit = string(b)
}
}
if egui.Init(sInit) != 0 {
return
}
egui.CreateStyle(&(egui.Style{Name: "st1", Orient: 1, Colors: []int32{CLR_LBLUE, CLR_LBLUE3}}))
egui.CreateStyle(&(egui.Style{Name: "st2", Colors: []int32{CLR_LBLUE}, BorderW: 3}))
egui.CreateStyle(&(egui.Style{Name: "st3", Colors: []int32{CLR_LBLUE},
BorderW: 2, BorderClr: CLR_LBLUE0}))
egui.CreateStyle(&(egui.Style{Name: "st4", Colors: []int32{CLR_LBLUE2, CLR_LBLUE3},
BorderW: 1, BorderClr: CLR_LBLUE}))
pWindow := &egui.Widget{X: 100, Y: 100, W: 400, H: 280, Title: "External"}
egui.InitMainWindow(pWindow)
egui.Menu("")
{
egui.Menu("File")
{
egui.AddMenuItem("Set text to label", 0,
func(p []string) string { egui.Widg("main.l1").SetText(p[1]); return "" }, "fsett2", "Bye...1")
egui.AddMenuSeparator()
egui.AddMenuItem("Printing", 0, fprint, "fprint")
egui.AddMenuSeparator()
egui.AddMenuItem("Exit", 0, nil, "hwg_EndWindow()")
}
egui.EndMenu()
egui.Menu("Dialogs")
{
egui.AddMenuItem("Open dialog", 0, fsett3, "fsett3")
egui.AddMenuItem("Test Tab", 0, ftab, "ftab")
egui.AddMenuItem("Test browse", 0, fbrowse, "fbrowse")
}
egui.EndMenu()
egui.Menu("Standard dialogs")
{
egui.AddMenuItem("Message boxes", 0, fmbox1, "fmbox1")
egui.AddMenuItem("MsgGet box", 0, fmbox2, "fmbox2")
egui.AddMenuItem("Choice", 0, fmbox3, "fmbox3")
egui.AddMenuItem("Select color", 0, fsele_color, "fsele_color")
egui.AddMenuItem("Select font", 0, fsele_font, "fsele_font")
egui.AddMenuItem("Select file", 0, fsele_file, "fsele_file")
}
egui.EndMenu()
egui.Menu("Help")
{
egui.AddMenuItem("About", 0, nil, "hwg_MsgInfo(hb_version()+chr(10)+chr(13)+hwg_version(),\"About\")")
}
egui.EndMenu()
}
egui.EndMenu()
pPanel := pWindow.AddWidget(&egui.Widget{Type: "paneltop", H: 40,
AProps: map[string]string{"HStyle": "st1"}})
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 0, Y: 0, W: 56, H: 40, Title: "Date",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", nil, "hwg_WriteStatus(HWindow():GetMain(),1,Dtoc(Date()),.T.)")
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 56, Y: 0, W: 56, H: 40, Title: "Time",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", nil, "hwg_WriteStatus(HWindow():GetMain(),2,Time(),.T.)")
pPanel.AddWidget(&egui.Widget{Type: "ownbtn", X: 112, Y: 0, W: 56, H: 40, Title: "Get",
AProps: map[string]string{"HStyles": egui.ToString("st1", "st2", "st3")}})
egui.PLastWidget.SetCallBackProc("onclick", fsett3, "fsett3")
pWindow.AddWidget(&egui.Widget{Type: "label", Name: "l1",
X: 20, Y: 60, W: 180, H: 24, Title: "Test of a label",
AProps: map[string]string{"Transpa": "t"}})
pWindow.AddWidget(&egui.Widget{Type: "button", X: 200, Y: 56, W: 100, H: 32, Title: "SetText"})
egui.PLastWidget.SetCallBackProc("onclick", fsett1, "fsett1", "first parameter")
pWindow.AddWidget(&egui.Widget{Type: "panelbot", H: 32,
AProps: map[string]string{"HStyle": "st4", "AParts": egui.ToString(120, 120, 0)}})
pWindow.Activate()
egui.Exit()
}
func fsett1(p []string) string {
pLabel := egui.Widg("main.l1")
fmt.Println(pLabel.GetText())
pLabel.SetText(p[1])
return ""
}
func fsett3([]string) string {
egui.BeginPacket()
egui.SetDateFormat("DD.MM.YYYY")
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg", X: 300, Y: 200, W: 400, H: 260, Title: "Dialog Test", Font: pFont}
egui.InitDialog(pDlg)
pDlg.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 10, W: 160, H: 24, Title: "Identifier:"})
pDlg.AddWidget(&egui.Widget{Type: "edit", Name: "edi1", X: 20, Y: 32, W: 160, H: 24,
AProps: map[string]string{"Picture": "@!R /XXX:XXX/"}})
pDlg.AddWidget(&egui.Widget{Type: "label", X: 220, Y: 10, W: 160, H: 24, Title: "Date:"})
pDlg.AddWidget(&egui.Widget{Type: "edit", Name: "edi2", X: 220, Y: 32, W: 120, H: 24,
Title: time.Now().Format("20060102"), AProps: map[string]string{"Picture": "D@D"}})
pDlg.AddWidget(&egui.Widget{Type: "combo", Name: "comb", X: 20, Y: 68, W: 160, H: 24,
AProps: map[string]string{"AItems": egui.ToString("first", "second", "third")}})
pDlg.AddWidget(&egui.Widget{Type: "label", X: 220, Y: 68, W: 80, H: 24, Title: "Age:"})
pDlg.AddWidget(&egui.Widget{Type: "updown", Name: "upd1", X: 280, Y: 68, W: 60, H: 24})
pDlg.AddWidget(&egui.Widget{Type: "group", X: 10, Y: 110, W: 180, H: 76, Title: "Check"})
pDlg.AddWidget(&egui.Widget{Type: "check", Name: "chk1", X: 24, Y: 124, W: 150, H: 24, Title: "Married"})
pDlg.AddWidget(&egui.Widget{Type: "check", Name: "chk2", X: 24, Y: 148, W: 150, H: 24, Title: "Has children"})
pGroup := pDlg.AddWidget(&egui.Widget{Type: "radiogr", Name: "rg", X: 200, Y: 110, W: 180, H: 76, Title: "Radio"})
pDlg.AddWidget(&egui.Widget{Type: "radio", X: 224, Y: 124, W: 150, H: 24, Title: "Male"})
pDlg.AddWidget(&egui.Widget{Type: "radio", X: 224, Y: 148, W: 150, H: 24, Title: "Female"})
egui.RadioEnd(pGroup, 1)
pDlg.AddWidget(&egui.Widget{Type: "button", X: 150, Y: 220, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", fsett4, "fsett4")
pDlg.Activate()
egui.EndPacket()
return ""
}
func fsett4([]string) string {
arr := egui.GetValues(egui.Wnd("dlg"), []string{"edi1", "edi2", "comb", "chk1", "chk2", "rg", "upd1"})
egui.PLastWindow.Close()
egui.MsgInfo("Id: "+arr[0]+"\r\n"+"Date: "+arr[1]+"\r\n"+"Combo: "+arr[2]+"\r\n"+
"Married: "+arr[3]+"\r\n"+"Has children: "+arr[4]+"\r\n"+"Sex: "+arr[5]+"\r\n"+
"Age: "+arr[6], "Result", nil, "", "")
return ""
}
func ftab([]string) string {
egui.BeginPacket()
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg2", X: 300, Y: 200, W: 200, H: 340, Title: "Tab", Font: pFont,
AProps: map[string]string{"NoExitOnEsc": "t","NoCloseAble": "t"}}
egui.InitDialog(pDlg)
pTab := pDlg.AddWidget(&egui.Widget{Type: "tab", X: 10, Y: 10, W: 180, H: 280})
egui.TabPage(pTab, "First")
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 30, W: 140, H: 24, Title: "Name:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 52, W: 140, H: 24})
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 84, W: 140, H: 24, Title: "Surname:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 106, W: 140, H: 24})
egui.TabPageEnd(pTab)
egui.TabPage(pTab, "Second")
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 40, W: 140, H: 24, Title: "Age:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 62, W: 140, H: 24})
pTab.AddWidget(&egui.Widget{Type: "label", X: 20, Y: 94, W: 140, H: 24, Title: "Profession:"})
pTab.AddWidget(&egui.Widget{Type: "edit", X: 20, Y: 116, W: 140, H: 24})
egui.TabPageEnd(pTab)
pDlg.AddWidget(&egui.Widget{Type: "button", X: 60, Y: 300, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", ftabclose, "ftabclose")
pDlg.Activate()
egui.EndPacket()
return ""
}
func ftabclose([]string) string {
egui.PLastWindow.Close()
return ""
}
func fbrowse([]string) string {
egui.BeginPacket()
pFont := egui.CreateFont(&egui.Font{Name: "f1", Family: "Georgia", Height: 16})
pDlg := &egui.Widget{Name: "dlg2", X: 300, Y: 200, W: 280, H: 240, Title: "browse", Font: pFont}
egui.InitDialog(pDlg)
pBrw := pDlg.AddWidget(&egui.Widget{Type: "browse", Name: "brw", X: 10, Y: 10, W: 260, H: 140})
pBrw.SetParam("oStyleHead", egui.GetStyle("st1"))
pBrw.SetParam("tColor", CLR_GRAY)
pBrw.SetParam("bColorSel", CLR_LGRAY1)
pBrw.SetParam("htbColor", CLR_LGRAY1)
pBrw.SetParam("tColorSel", 0)
pBrw.SetParam("httColor", 0)
pBrw.SetParam("lInFocus", true)
egui.BrwSetArray(pBrw, &arr)
egui.BrwDelColumn(pBrw, 5)
egui.BrwSetColumn(pBrw, 1, "Name", 1, 0, false, 0)
egui.BrwSetColumn(pBrw, 2, "Age", 1, 0, false, 0)
egui.BrwSetColumn(pBrw, 3, "Salary", 1, 0, true, 0)
egui.BrwSetColumnEx(pBrw, 2, "bColor", CLR_LBLUE3)
egui.BrwSetColumnEx(pBrw, 2, "lResizable", false)
pBrw.SetCallBackProc("onposchanged", fbrwpc, "fbrwpc")
pBrw.SetCallBackProc("onrclick", fbrwrc, "fbrwrc")
pDlg.AddWidget(&egui.Widget{Type: "label", Name: "l1",
X: 90, Y: 160, W: 100, H: 24, Winstyle: egui.DT_CENTER})
pDlg.AddWidget(&egui.Widget{Type: "button", X: 90, Y: 200, W: 100, H: 32, Title: "Ok"})
egui.PLastWidget.SetCallBackProc("onclick", fbrwclose, "fbrwclose")
pDlg.Activate()
egui.EndPacket()
return ""
}
func fbrwclose([]string) string {
arrNew := egui.BrwGetArray(egui.Widg("dlg2.brw"))
egui.PLastWindow.Close()
s := ""
for i, a := range arrNew {
if a[2] != arr[i][2] {
s += a[0] + " => " + a[2] + "\r\n"
}
}
if s != "" {
egui.MsgInfo(s, "Changes", nil, "", "")
}
return ""
}
func fbrwpc(p []string) string {
pLabel := egui.Widg("dlg2.l1")
if len(p) > 1 {
i, _ := strconv.Atoi(p[1])
if i > 0 && i <= 3 {
pLabel.SetText(arr[i-1][0])
}
}
return ""
}
func fbrwrc(p []string) string {
if len(p) > 2 {
egui.MsgInfo(p[0]+" Row: "+p[2]+" Col: "+p[1], "Right click position", nil, "", "")
}
return ""
}
func fmbox1(p []string) string {
if p[0] == "menu" {
egui.MsgYesNo("Yes or No???", "MsgBox", fmbox1, "fmbox1", "mm1")
} else if p[0] == "mm1" {
if p[1] == "t" {
egui.MsgInfo("Yes!", "Answer", nil, "", "")
} else {
egui.MsgInfo("No...", "Answer", nil, "", "")
}
}
return ""
}
func fmbox2(p []string) string {
if p[0] == "menu" {
egui.MsgGet("Input something:", "MsgGet", 0, fmbox2, "fmbox2", "mm1")
} else if p[0] == "mm1" {
egui.MsgInfo(p[1], "Answer", nil, "", "")
}
return ""
}
func fmbox3(p []string) string {
if p[0] == "menu" {
arr := []string{"Alex Petrov", "Serg Lama", "Jimmy Hendrix", "Dorian Gray", "Victor Peti"}
egui.Choice(arr, "Select from a list", fmbox3, "fmbox3", "mm1")
} else if p[0] == "mm1" {
egui.MsgInfo(p[1], "Answer", nil, "", "")
}
return ""
}
func fsele_color(p []string) string {
if p[0] == "menu" | else {
iColor, _ := strconv.Atoi(p[1])
egui.Widg("main.l1").SetColor(int32(iColor), -1)
}
return ""
}
func fsele_font(p []string) string {
if p[0] == "menu" {
egui.SelectFont(fsele_font, "fsele_font", "")
} else {
fmt.Println("font id: ", p[0])
if pFont := egui.GetFont(p[0]); pFont != nil {
if len(p) < 8 {
} else {
fmt.Println("font fam: ", p[1])
}
egui.Widg("main.l1").SetFont( pFont );
}
}
return ""
}
func fsele_file(p []string) string {
if p[0] == "menu" {
egui.SelectFile("", fsele_file, "fsele_file", "mm1")
} else {
if p[1] == "" {
egui.MsgInfo("Nothing selected", "Result", nil, "", "")
} else {
egui.MsgInfo(p[1], "File selected", nil, "", "")
}
}
return ""
}
func fprint(p []string) string {
if p[0] == "menu" {
egui.InitPrinter(&egui.Printer{SPrinter: "...", BPreview: true}, "fprint", fprint, "mm1")
} else {
pPrinter := egui.PLastPrinter
pFont := pPrinter.AddFont(&egui.Font{Family: "Times New Roman", Height: 10})
pPrinter.StartPage()
pPrinter.SetFont(pFont)
pPrinter.Box(5, 5, 200, 282)
pPrinter.Say(50, 10, 165, 26, "Printing first sample !", egui.DT_CENTER)
pPrinter.Line(45, 30, 170, 30)
pPrinter.Line(45, 5, 45, 30)
pPrinter.Line(170, 5, 170, 30)
pPrinter.Say(50, 120, 150, 132, "----------", egui.DT_CENTER)
pPrinter.Box(50, 134, 160, 146)
pPrinter.Say(50, 135, 160, 146, "End Of Report", egui.DT_CENTER)
pPrinter.EndPage()
pPrinter.End()
}
return ""
}
| {
egui.SelectColor(0, fsele_color, "fsele_color", "mm1")
} | conditional_block |
serial_play.py | #!/usr/bin/env python
#
# Reads a vgm or vgz file and parses its contents.
# The file must have extension either .vgm for uncompressed
# files or .vgz for compressed files. After opening the file it sends music
# music data to serial port specified by first parameter.
# USB speed is configured at 9600 bps by default.
#
import gzip
import struct
import os
import sys
import time
import serial
# Serial transmission speed. Must match bps set in Serial.begin(bps)
# at the 'vgm2149_vgm_player.ino' file.
#BAUD = 115200
BAUD = 57600
WAIT60TH = 1.0/60 # delay 60th frame
WAIT50TH = 1.0/50 # delay 50th frame
class VGMReader(object):
def __parse_gd3_info(self):
# See:
# http://vgmrips.net/wiki/GD3_Specification
#
def readcstr():
chars = []
while True:
c = self.__fd.read(2)
# bytes(2) means two repetitions of value zero
if c == bytes(2):
return ("".join(chars))
chars.append(c.decode('utf-16'))
# Seek to start of string data
self.__fd.seek (self.__header['gd3_offset'] + 12)
self.__header['track_name'] = readcstr()
self.__header['track_name_jpn'] = readcstr()
self.__header['game_name'] = readcstr()
self.__header['game_name_jpn'] = readcstr()
self.__header['system_name'] = readcstr()
self.__header['system_name_jpn'] = readcstr()
self.__header['author_name'] = readcstr()
self.__header['author_name_jpn'] = readcstr()
self.__header['date'] = readcstr()
def __parse_header(self):
# See:
# http://vgmrips.net/wiki/VGM_Specification
#
# Read from header offsets 0x00 to 0x20
#
vgm_header = '< 4s I I I I I I I'
s = self.__fd.read(struct.calcsize(vgm_header))
d = {}
(d['id'],
d['eof_offset'],
d['version'],
d['clk_sn76489'],
d['clk_ym2413'],
d['gd3_offset'],
d['total_samples'],
d['loop_offset'],
) = struct.unpack(vgm_header, s)
# Store absolute offset of gd3_offset
d['gd3_offset'] += 0x14
# Read the relative offset to VGM data stream
self.__fd.seek(0x34)
s = self.__fd.read(4)
# Store absolute offset (0x34 + vgm_data_offset)
d['vgm_data_offset'] = struct.unpack('< I', s)[0] + 0x34
# Store loop offset relative to vgm song data
d['loop_offset'] += 0x1C - d['vgm_data_offset']
# Seek to ay8910 clock info (absolute offset 0x74)
# but only if version > 1.50
d['clk_ay8910'] = 0
if d['version'] > 0x150:
self.__fd.seek(0x74)
s = self.__fd.read (4)
d['clk_ay8910'] = struct.unpack('< I', s)[0]
self.__header = d
# In python3 everything we read from a binary file are bytes
# so we need to decode these to str to show them correctly.
d['id'] = d['id'].decode()
# Get version in string format 'maj.min'
d['str_version'] = self.__get_str_version()
self.__parse_gd3_info()
def __get_str_version (self):
high, low = divmod (self.__header['version'], 0x100)
str_version = format(high, 'x') + '.' + format(low, 'x')
return str_version
def __read_data_interleaved(self):
cnt = self.__header['nb_frames']
regs = [self.__fd.read(cnt) for i in range(16)]
self.__data = [f for f in zip(*regs)]
def __read_data(self):
|
def __init__(self, fd):
self.__fd = fd
self.__parse_header()
self.__data = []
def dump_header(self):
print("\x1b[2J")
for k in ('id', 'str_version', 'total_samples',
'track_name', 'game_name', 'system_name','author_name', 'date'):
print("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m".format(k, self.__header[k]))
# Print sound chips used in this VGM file
snd_chips = {
'clk_sn76489' : 'SN_76489',
'clk_ym2413' : 'YM_2413',
'clk_ay8910' : 'AY_3_8910'
}
str_chips = ""
for key, snd_chip in snd_chips.items():
if self.__header[key]:
str_chips += '[{} @ {:01.2f} MHz]\n'.format(snd_chip, self.__header[key]/1000000.0)
print ("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m ".format('Sound chip', str_chips))
def dump_data(self):
toHex = lambda x: "".join("{:02X} ".format(c) for c in x)
print (toHex (self.__data))
def get_header(self):
return self.__header
def get_data(self):
if not self.__data:
self.__read_data()
return self.__data
def to_minsec(frames, frames_rate):
secs = frames // frames_rate
mins = secs // 60
secs = secs % 60
return (mins, secs)
def send_data(ser, data, current_pos, nbytes):
#print(data[current_pos:current_pos+nbytes].hex())
ser.write(data[current_pos:current_pos+nbytes])
#print()
def vgm_play(data, header):
samples_played = 0;
song_min, song_sec = to_minsec(header['total_samples'], 44100)
with serial.Serial(sys.argv[1], BAUD) as ser:
print("\n\x1b[33;1mIninitalizing USB serial...\x1b[0m", end='')
time.sleep(2) # Wait for Arduino reset
frame_t = time.time()
print("\x1b[32;1m Ok\x1b[0m")
print("\x1b[31;1mPlaying...\x1b[0m")
try:
i = 0
# Interpret vgm sound data until we read end of
# sound data (0x66)
while (True):
while data[i] != 0x66:
# 0x50 dd: SN76489, write value dd
if data[i] == 0x50:
send_data(ser, data, i, 2) # Send 2 bytes to USB serial: '0x50 dd'
i += 2
# 0xA0 aa dd: AY-3-8910, write value dd to register aa
elif data[i] == 0xA0:
send_data(ser, data, i, 3) # Send 3 bytes to USB serial: '0xA0 aa dd'
i += 3
# 0x61 nn nn: Wait n samples, n from 0..65535 (approx 1.49 seconds)
elif data[i] == 0x61:
wait_value = struct.unpack('< H', data[i+1:i+3])[0]
samples_played += wait_value
#print(wait_value)
# Substract time spent in code
wait_value = 1.0 * wait_value / 44100 - (time.time() - frame_t)
time.sleep( wait_value if wait_value >= 0 else 0)
frame_t = time.time()
i += 3
# 0x62: Wait 1/60th second
elif data[i] == 0x62:
wait_value = WAIT60TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 735
# 0x63: Wait 1/50th second
elif data[i] == 0x63:
wait_value = WAIT50TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 882
# 0x7n: Wait n+1 samples, n can range from 0 to 15.
elif data[i] in range (0x70, 0x80):
#print(hex(data[i]))
wait_value = data[i] & 0x0F
samples_played += wait_value
time.sleep( 1.0 * wait_value / 44100)
i += 1
# Unknown VGM Command
else:
print("Unknown cmd {:x} at offset {:x} ".format(data[i], i))
i += 1
# Additionnal processing
cur_min, cur_sec = to_minsec(samples_played, 44100)
#sys.stdout.write(
# "\x1b[2K\rPlaying \x1b[36;1m{0:02}:{1:02} \x1b[0m/ \x1b[37;1m{2:02}:{3:02}\x1b[0m".format(
# cur_min, cur_sec, song_min, song_sec))
#sys.stdout.flush()
# 0x66: End of Sound Data
new_offset = header['loop_offset']
i = new_offset if new_offset >= 0 else 0
# Clear vgm2149 registers
#ser.write(16) # Write 16 bytes set to 0x00
#print("")
except KeyboardInterrupt:
# Clear vgm2149 registers
ser.write(bytes([0xFF]))
print("Aborted")
def main():
header = None
data = None
if len(sys.argv) != 3:
print("Syntax is: {} <output_device> <vgm_filepath>".format(
sys.argv[0]))
exit(0)
#
# Utiliza gzip.open si el archivo está comprimido
#
if (os.path.splitext (sys.argv[2])[1] == '.vgz'):
with gzip.open (sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
else:
with open(sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
vgm_play(data, header)
if __name__ == '__main__':
main() | cnt = self.__header['gd3_offset'] - self.__header['vgm_data_offset']
self.__fd.seek(self.__header['vgm_data_offset'])
self.__data = self.__fd.read(cnt) | identifier_body |
serial_play.py | #!/usr/bin/env python
#
# Reads a vgm or vgz file and parses its contents.
# The file must have extension either .vgm for uncompressed
# files or .vgz for compressed files. After opening the file it sends music
# music data to serial port specified by first parameter.
# USB speed is configured at 9600 bps by default.
#
import gzip
import struct
import os
import sys
import time
import serial
# Serial transmission speed. Must match bps set in Serial.begin(bps)
# at the 'vgm2149_vgm_player.ino' file.
#BAUD = 115200
BAUD = 57600
WAIT60TH = 1.0/60 # delay 60th frame
WAIT50TH = 1.0/50 # delay 50th frame
class VGMReader(object):
def __parse_gd3_info(self):
# See:
# http://vgmrips.net/wiki/GD3_Specification
#
def readcstr():
chars = []
while True:
c = self.__fd.read(2)
# bytes(2) means two repetitions of value zero
if c == bytes(2):
return ("".join(chars))
chars.append(c.decode('utf-16'))
# Seek to start of string data
self.__fd.seek (self.__header['gd3_offset'] + 12)
self.__header['track_name'] = readcstr()
self.__header['track_name_jpn'] = readcstr()
self.__header['game_name'] = readcstr()
self.__header['game_name_jpn'] = readcstr()
self.__header['system_name'] = readcstr()
self.__header['system_name_jpn'] = readcstr()
self.__header['author_name'] = readcstr()
self.__header['author_name_jpn'] = readcstr()
self.__header['date'] = readcstr()
def __parse_header(self):
# See:
# http://vgmrips.net/wiki/VGM_Specification
#
# Read from header offsets 0x00 to 0x20
#
vgm_header = '< 4s I I I I I I I'
s = self.__fd.read(struct.calcsize(vgm_header))
d = {}
(d['id'],
d['eof_offset'],
d['version'],
d['clk_sn76489'],
d['clk_ym2413'],
d['gd3_offset'],
d['total_samples'],
d['loop_offset'],
) = struct.unpack(vgm_header, s)
# Store absolute offset of gd3_offset
d['gd3_offset'] += 0x14
# Read the relative offset to VGM data stream
self.__fd.seek(0x34)
s = self.__fd.read(4)
# Store absolute offset (0x34 + vgm_data_offset)
d['vgm_data_offset'] = struct.unpack('< I', s)[0] + 0x34
# Store loop offset relative to vgm song data
d['loop_offset'] += 0x1C - d['vgm_data_offset']
# Seek to ay8910 clock info (absolute offset 0x74)
# but only if version > 1.50
d['clk_ay8910'] = 0
if d['version'] > 0x150:
self.__fd.seek(0x74)
s = self.__fd.read (4)
d['clk_ay8910'] = struct.unpack('< I', s)[0]
self.__header = d
# In python3 everything we read from a binary file are bytes
# so we need to decode these to str to show them correctly.
d['id'] = d['id'].decode()
# Get version in string format 'maj.min'
d['str_version'] = self.__get_str_version()
self.__parse_gd3_info()
def __get_str_version (self):
high, low = divmod (self.__header['version'], 0x100)
str_version = format(high, 'x') + '.' + format(low, 'x')
return str_version
def __read_data_interleaved(self):
cnt = self.__header['nb_frames']
regs = [self.__fd.read(cnt) for i in range(16)]
self.__data = [f for f in zip(*regs)]
def __read_data(self):
cnt = self.__header['gd3_offset'] - self.__header['vgm_data_offset']
self.__fd.seek(self.__header['vgm_data_offset'])
self.__data = self.__fd.read(cnt)
def __init__(self, fd):
self.__fd = fd
self.__parse_header()
self.__data = []
def dump_header(self):
print("\x1b[2J")
for k in ('id', 'str_version', 'total_samples',
'track_name', 'game_name', 'system_name','author_name', 'date'):
print("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m".format(k, self.__header[k]))
# Print sound chips used in this VGM file
snd_chips = {
'clk_sn76489' : 'SN_76489',
'clk_ym2413' : 'YM_2413',
'clk_ay8910' : 'AY_3_8910'
}
str_chips = ""
for key, snd_chip in snd_chips.items():
if self.__header[key]:
str_chips += '[{} @ {:01.2f} MHz]\n'.format(snd_chip, self.__header[key]/1000000.0)
print ("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m ".format('Sound chip', str_chips))
def dump_data(self):
toHex = lambda x: "".join("{:02X} ".format(c) for c in x)
print (toHex (self.__data))
def get_header(self):
return self.__header
def get_data(self):
if not self.__data:
self.__read_data()
return self.__data
def to_minsec(frames, frames_rate):
secs = frames // frames_rate
mins = secs // 60
secs = secs % 60
return (mins, secs)
def send_data(ser, data, current_pos, nbytes):
#print(data[current_pos:current_pos+nbytes].hex())
ser.write(data[current_pos:current_pos+nbytes])
#print()
def vgm_play(data, header):
samples_played = 0;
song_min, song_sec = to_minsec(header['total_samples'], 44100)
with serial.Serial(sys.argv[1], BAUD) as ser:
print("\n\x1b[33;1mIninitalizing USB serial...\x1b[0m", end='')
time.sleep(2) # Wait for Arduino reset
frame_t = time.time()
print("\x1b[32;1m Ok\x1b[0m")
print("\x1b[31;1mPlaying...\x1b[0m")
try:
i = 0
# Interpret vgm sound data until we read end of
# sound data (0x66)
while (True):
while data[i] != 0x66:
# 0x50 dd: SN76489, write value dd
if data[i] == 0x50:
send_data(ser, data, i, 2) # Send 2 bytes to USB serial: '0x50 dd'
i += 2
# 0xA0 aa dd: AY-3-8910, write value dd to register aa
elif data[i] == 0xA0:
send_data(ser, data, i, 3) # Send 3 bytes to USB serial: '0xA0 aa dd'
i += 3
# 0x61 nn nn: Wait n samples, n from 0..65535 (approx 1.49 seconds)
elif data[i] == 0x61:
|
# 0x62: Wait 1/60th second
elif data[i] == 0x62:
wait_value = WAIT60TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 735
# 0x63: Wait 1/50th second
elif data[i] == 0x63:
wait_value = WAIT50TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 882
# 0x7n: Wait n+1 samples, n can range from 0 to 15.
elif data[i] in range (0x70, 0x80):
#print(hex(data[i]))
wait_value = data[i] & 0x0F
samples_played += wait_value
time.sleep( 1.0 * wait_value / 44100)
i += 1
# Unknown VGM Command
else:
print("Unknown cmd {:x} at offset {:x} ".format(data[i], i))
i += 1
# Additionnal processing
cur_min, cur_sec = to_minsec(samples_played, 44100)
#sys.stdout.write(
# "\x1b[2K\rPlaying \x1b[36;1m{0:02}:{1:02} \x1b[0m/ \x1b[37;1m{2:02}:{3:02}\x1b[0m".format(
# cur_min, cur_sec, song_min, song_sec))
#sys.stdout.flush()
# 0x66: End of Sound Data
new_offset = header['loop_offset']
i = new_offset if new_offset >= 0 else 0
# Clear vgm2149 registers
#ser.write(16) # Write 16 bytes set to 0x00
#print("")
except KeyboardInterrupt:
# Clear vgm2149 registers
ser.write(bytes([0xFF]))
print("Aborted")
def main():
header = None
data = None
if len(sys.argv) != 3:
print("Syntax is: {} <output_device> <vgm_filepath>".format(
sys.argv[0]))
exit(0)
#
# Utiliza gzip.open si el archivo está comprimido
#
if (os.path.splitext (sys.argv[2])[1] == '.vgz'):
with gzip.open (sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
else:
with open(sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
vgm_play(data, header)
if __name__ == '__main__':
main() | wait_value = struct.unpack('< H', data[i+1:i+3])[0]
samples_played += wait_value
#print(wait_value)
# Substract time spent in code
wait_value = 1.0 * wait_value / 44100 - (time.time() - frame_t)
time.sleep( wait_value if wait_value >= 0 else 0)
frame_t = time.time()
i += 3 | conditional_block |
serial_play.py | #!/usr/bin/env python
#
# Reads a vgm or vgz file and parses its contents.
# The file must have extension either .vgm for uncompressed
# files or .vgz for compressed files. After opening the file it sends music
# music data to serial port specified by first parameter.
# USB speed is configured at 9600 bps by default.
#
import gzip
import struct
import os
import sys
import time
import serial
# Serial transmission speed. Must match bps set in Serial.begin(bps)
# at the 'vgm2149_vgm_player.ino' file.
#BAUD = 115200
BAUD = 57600
WAIT60TH = 1.0/60 # delay 60th frame
WAIT50TH = 1.0/50 # delay 50th frame
class VGMReader(object):
def __parse_gd3_info(self):
# See:
# http://vgmrips.net/wiki/GD3_Specification
#
def readcstr():
chars = []
while True:
c = self.__fd.read(2)
# bytes(2) means two repetitions of value zero
if c == bytes(2):
return ("".join(chars))
chars.append(c.decode('utf-16'))
# Seek to start of string data
self.__fd.seek (self.__header['gd3_offset'] + 12)
self.__header['track_name'] = readcstr()
self.__header['track_name_jpn'] = readcstr()
self.__header['game_name'] = readcstr()
self.__header['game_name_jpn'] = readcstr()
self.__header['system_name'] = readcstr()
self.__header['system_name_jpn'] = readcstr()
self.__header['author_name'] = readcstr()
self.__header['author_name_jpn'] = readcstr()
self.__header['date'] = readcstr()
def __parse_header(self):
# See:
# http://vgmrips.net/wiki/VGM_Specification
#
# Read from header offsets 0x00 to 0x20
#
vgm_header = '< 4s I I I I I I I'
s = self.__fd.read(struct.calcsize(vgm_header))
d = {}
(d['id'],
d['eof_offset'],
d['version'],
d['clk_sn76489'],
d['clk_ym2413'],
d['gd3_offset'],
d['total_samples'],
d['loop_offset'],
) = struct.unpack(vgm_header, s)
# Store absolute offset of gd3_offset
d['gd3_offset'] += 0x14
# Read the relative offset to VGM data stream
self.__fd.seek(0x34)
s = self.__fd.read(4)
# Store absolute offset (0x34 + vgm_data_offset)
d['vgm_data_offset'] = struct.unpack('< I', s)[0] + 0x34
# Store loop offset relative to vgm song data
d['loop_offset'] += 0x1C - d['vgm_data_offset']
# Seek to ay8910 clock info (absolute offset 0x74)
# but only if version > 1.50
d['clk_ay8910'] = 0
if d['version'] > 0x150:
self.__fd.seek(0x74)
s = self.__fd.read (4)
d['clk_ay8910'] = struct.unpack('< I', s)[0]
self.__header = d
# In python3 everything we read from a binary file are bytes
# so we need to decode these to str to show them correctly.
d['id'] = d['id'].decode()
# Get version in string format 'maj.min'
d['str_version'] = self.__get_str_version()
self.__parse_gd3_info()
def __get_str_version (self):
high, low = divmod (self.__header['version'], 0x100)
str_version = format(high, 'x') + '.' + format(low, 'x')
return str_version
def __read_data_interleaved(self):
cnt = self.__header['nb_frames']
regs = [self.__fd.read(cnt) for i in range(16)]
self.__data = [f for f in zip(*regs)]
def __read_data(self):
cnt = self.__header['gd3_offset'] - self.__header['vgm_data_offset']
self.__fd.seek(self.__header['vgm_data_offset'])
self.__data = self.__fd.read(cnt)
def __init__(self, fd):
self.__fd = fd
self.__parse_header()
self.__data = []
def dump_header(self):
print("\x1b[2J")
for k in ('id', 'str_version', 'total_samples',
'track_name', 'game_name', 'system_name','author_name', 'date'):
print("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m".format(k, self.__header[k]))
# Print sound chips used in this VGM file
snd_chips = {
'clk_sn76489' : 'SN_76489',
'clk_ym2413' : 'YM_2413',
'clk_ay8910' : 'AY_3_8910'
}
str_chips = ""
for key, snd_chip in snd_chips.items():
if self.__header[key]:
str_chips += '[{} @ {:01.2f} MHz]\n'.format(snd_chip, self.__header[key]/1000000.0)
print ("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m ".format('Sound chip', str_chips))
def dump_data(self):
toHex = lambda x: "".join("{:02X} ".format(c) for c in x)
print (toHex (self.__data))
def get_header(self):
return self.__header
def get_data(self):
if not self.__data:
self.__read_data()
return self.__data
def to_minsec(frames, frames_rate):
secs = frames // frames_rate
mins = secs // 60
secs = secs % 60
return (mins, secs)
def send_data(ser, data, current_pos, nbytes):
#print(data[current_pos:current_pos+nbytes].hex())
ser.write(data[current_pos:current_pos+nbytes])
#print()
def vgm_play(data, header):
samples_played = 0;
song_min, song_sec = to_minsec(header['total_samples'], 44100)
with serial.Serial(sys.argv[1], BAUD) as ser:
print("\n\x1b[33;1mIninitalizing USB serial...\x1b[0m", end='')
time.sleep(2) # Wait for Arduino reset
frame_t = time.time()
print("\x1b[32;1m Ok\x1b[0m")
print("\x1b[31;1mPlaying...\x1b[0m")
try:
i = 0
# Interpret vgm sound data until we read end of
# sound data (0x66)
while (True):
while data[i] != 0x66:
# 0x50 dd: SN76489, write value dd
if data[i] == 0x50:
send_data(ser, data, i, 2) # Send 2 bytes to USB serial: '0x50 dd'
i += 2
# 0xA0 aa dd: AY-3-8910, write value dd to register aa
elif data[i] == 0xA0:
send_data(ser, data, i, 3) # Send 3 bytes to USB serial: '0xA0 aa dd'
i += 3
# 0x61 nn nn: Wait n samples, n from 0..65535 (approx 1.49 seconds)
elif data[i] == 0x61:
wait_value = struct.unpack('< H', data[i+1:i+3])[0]
samples_played += wait_value
#print(wait_value)
# Substract time spent in code
wait_value = 1.0 * wait_value / 44100 - (time.time() - frame_t)
time.sleep( wait_value if wait_value >= 0 else 0)
frame_t = time.time()
i += 3
# 0x62: Wait 1/60th second
elif data[i] == 0x62:
wait_value = WAIT60TH - (time.time() - frame_t)
|
# 0x63: Wait 1/50th second
elif data[i] == 0x63:
wait_value = WAIT50TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 882
# 0x7n: Wait n+1 samples, n can range from 0 to 15.
elif data[i] in range (0x70, 0x80):
#print(hex(data[i]))
wait_value = data[i] & 0x0F
samples_played += wait_value
time.sleep( 1.0 * wait_value / 44100)
i += 1
# Unknown VGM Command
else:
print("Unknown cmd {:x} at offset {:x} ".format(data[i], i))
i += 1
# Additionnal processing
cur_min, cur_sec = to_minsec(samples_played, 44100)
#sys.stdout.write(
# "\x1b[2K\rPlaying \x1b[36;1m{0:02}:{1:02} \x1b[0m/ \x1b[37;1m{2:02}:{3:02}\x1b[0m".format(
# cur_min, cur_sec, song_min, song_sec))
#sys.stdout.flush()
# 0x66: End of Sound Data
new_offset = header['loop_offset']
i = new_offset if new_offset >= 0 else 0
# Clear vgm2149 registers
#ser.write(16) # Write 16 bytes set to 0x00
#print("")
except KeyboardInterrupt:
# Clear vgm2149 registers
ser.write(bytes([0xFF]))
print("Aborted")
def main():
header = None
data = None
if len(sys.argv) != 3:
print("Syntax is: {} <output_device> <vgm_filepath>".format(
sys.argv[0]))
exit(0)
#
# Utiliza gzip.open si el archivo está comprimido
#
if (os.path.splitext (sys.argv[2])[1] == '.vgz'):
with gzip.open (sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
else:
with open(sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
vgm_play(data, header)
if __name__ == '__main__':
main() | time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 735
| random_line_split |
serial_play.py | #!/usr/bin/env python
#
# Reads a vgm or vgz file and parses its contents.
# The file must have extension either .vgm for uncompressed
# files or .vgz for compressed files. After opening the file it sends music
# music data to serial port specified by first parameter.
# USB speed is configured at 9600 bps by default.
#
import gzip
import struct
import os
import sys
import time
import serial
# Serial transmission speed. Must match bps set in Serial.begin(bps)
# at the 'vgm2149_vgm_player.ino' file.
#BAUD = 115200
BAUD = 57600
WAIT60TH = 1.0/60 # delay 60th frame
WAIT50TH = 1.0/50 # delay 50th frame
class VGMReader(object):
def __parse_gd3_info(self):
# See:
# http://vgmrips.net/wiki/GD3_Specification
#
def readcstr():
chars = []
while True:
c = self.__fd.read(2)
# bytes(2) means two repetitions of value zero
if c == bytes(2):
return ("".join(chars))
chars.append(c.decode('utf-16'))
# Seek to start of string data
self.__fd.seek (self.__header['gd3_offset'] + 12)
self.__header['track_name'] = readcstr()
self.__header['track_name_jpn'] = readcstr()
self.__header['game_name'] = readcstr()
self.__header['game_name_jpn'] = readcstr()
self.__header['system_name'] = readcstr()
self.__header['system_name_jpn'] = readcstr()
self.__header['author_name'] = readcstr()
self.__header['author_name_jpn'] = readcstr()
self.__header['date'] = readcstr()
def __parse_header(self):
# See:
# http://vgmrips.net/wiki/VGM_Specification
#
# Read from header offsets 0x00 to 0x20
#
vgm_header = '< 4s I I I I I I I'
s = self.__fd.read(struct.calcsize(vgm_header))
d = {}
(d['id'],
d['eof_offset'],
d['version'],
d['clk_sn76489'],
d['clk_ym2413'],
d['gd3_offset'],
d['total_samples'],
d['loop_offset'],
) = struct.unpack(vgm_header, s)
# Store absolute offset of gd3_offset
d['gd3_offset'] += 0x14
# Read the relative offset to VGM data stream
self.__fd.seek(0x34)
s = self.__fd.read(4)
# Store absolute offset (0x34 + vgm_data_offset)
d['vgm_data_offset'] = struct.unpack('< I', s)[0] + 0x34
# Store loop offset relative to vgm song data
d['loop_offset'] += 0x1C - d['vgm_data_offset']
# Seek to ay8910 clock info (absolute offset 0x74)
# but only if version > 1.50
d['clk_ay8910'] = 0
if d['version'] > 0x150:
self.__fd.seek(0x74)
s = self.__fd.read (4)
d['clk_ay8910'] = struct.unpack('< I', s)[0]
self.__header = d
# In python3 everything we read from a binary file are bytes
# so we need to decode these to str to show them correctly.
d['id'] = d['id'].decode()
# Get version in string format 'maj.min'
d['str_version'] = self.__get_str_version()
self.__parse_gd3_info()
def __get_str_version (self):
high, low = divmod (self.__header['version'], 0x100)
str_version = format(high, 'x') + '.' + format(low, 'x')
return str_version
def __read_data_interleaved(self):
cnt = self.__header['nb_frames']
regs = [self.__fd.read(cnt) for i in range(16)]
self.__data = [f for f in zip(*regs)]
def __read_data(self):
cnt = self.__header['gd3_offset'] - self.__header['vgm_data_offset']
self.__fd.seek(self.__header['vgm_data_offset'])
self.__data = self.__fd.read(cnt)
def __init__(self, fd):
self.__fd = fd
self.__parse_header()
self.__data = []
def dump_header(self):
print("\x1b[2J")
for k in ('id', 'str_version', 'total_samples',
'track_name', 'game_name', 'system_name','author_name', 'date'):
print("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m".format(k, self.__header[k]))
# Print sound chips used in this VGM file
snd_chips = {
'clk_sn76489' : 'SN_76489',
'clk_ym2413' : 'YM_2413',
'clk_ay8910' : 'AY_3_8910'
}
str_chips = ""
for key, snd_chip in snd_chips.items():
if self.__header[key]:
str_chips += '[{} @ {:01.2f} MHz]\n'.format(snd_chip, self.__header[key]/1000000.0)
print ("\x1b[36;1m{:>20}\x1b[0m: \x1b[37;1m{}\x1b[0m ".format('Sound chip', str_chips))
def dump_data(self):
toHex = lambda x: "".join("{:02X} ".format(c) for c in x)
print (toHex (self.__data))
def get_header(self):
return self.__header
def get_data(self):
if not self.__data:
self.__read_data()
return self.__data
def to_minsec(frames, frames_rate):
secs = frames // frames_rate
mins = secs // 60
secs = secs % 60
return (mins, secs)
def send_data(ser, data, current_pos, nbytes):
#print(data[current_pos:current_pos+nbytes].hex())
ser.write(data[current_pos:current_pos+nbytes])
#print()
def | (data, header):
samples_played = 0;
song_min, song_sec = to_minsec(header['total_samples'], 44100)
with serial.Serial(sys.argv[1], BAUD) as ser:
print("\n\x1b[33;1mIninitalizing USB serial...\x1b[0m", end='')
time.sleep(2) # Wait for Arduino reset
frame_t = time.time()
print("\x1b[32;1m Ok\x1b[0m")
print("\x1b[31;1mPlaying...\x1b[0m")
try:
i = 0
# Interpret vgm sound data until we read end of
# sound data (0x66)
while (True):
while data[i] != 0x66:
# 0x50 dd: SN76489, write value dd
if data[i] == 0x50:
send_data(ser, data, i, 2) # Send 2 bytes to USB serial: '0x50 dd'
i += 2
# 0xA0 aa dd: AY-3-8910, write value dd to register aa
elif data[i] == 0xA0:
send_data(ser, data, i, 3) # Send 3 bytes to USB serial: '0xA0 aa dd'
i += 3
# 0x61 nn nn: Wait n samples, n from 0..65535 (approx 1.49 seconds)
elif data[i] == 0x61:
wait_value = struct.unpack('< H', data[i+1:i+3])[0]
samples_played += wait_value
#print(wait_value)
# Substract time spent in code
wait_value = 1.0 * wait_value / 44100 - (time.time() - frame_t)
time.sleep( wait_value if wait_value >= 0 else 0)
frame_t = time.time()
i += 3
# 0x62: Wait 1/60th second
elif data[i] == 0x62:
wait_value = WAIT60TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 735
# 0x63: Wait 1/50th second
elif data[i] == 0x63:
wait_value = WAIT50TH - (time.time() - frame_t)
time.sleep(wait_value if wait_value > 0 else 0)
frame_t = time.time()
i += 1
samples_played += 882
# 0x7n: Wait n+1 samples, n can range from 0 to 15.
elif data[i] in range (0x70, 0x80):
#print(hex(data[i]))
wait_value = data[i] & 0x0F
samples_played += wait_value
time.sleep( 1.0 * wait_value / 44100)
i += 1
# Unknown VGM Command
else:
print("Unknown cmd {:x} at offset {:x} ".format(data[i], i))
i += 1
# Additionnal processing
cur_min, cur_sec = to_minsec(samples_played, 44100)
#sys.stdout.write(
# "\x1b[2K\rPlaying \x1b[36;1m{0:02}:{1:02} \x1b[0m/ \x1b[37;1m{2:02}:{3:02}\x1b[0m".format(
# cur_min, cur_sec, song_min, song_sec))
#sys.stdout.flush()
# 0x66: End of Sound Data
new_offset = header['loop_offset']
i = new_offset if new_offset >= 0 else 0
# Clear vgm2149 registers
#ser.write(16) # Write 16 bytes set to 0x00
#print("")
except KeyboardInterrupt:
# Clear vgm2149 registers
ser.write(bytes([0xFF]))
print("Aborted")
def main():
header = None
data = None
if len(sys.argv) != 3:
print("Syntax is: {} <output_device> <vgm_filepath>".format(
sys.argv[0]))
exit(0)
#
# Utiliza gzip.open si el archivo está comprimido
#
if (os.path.splitext (sys.argv[2])[1] == '.vgz'):
with gzip.open (sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
else:
with open(sys.argv[2], 'rb') as fd:
vgm = VGMReader(fd)
vgm.dump_header()
header = vgm.get_header()
data = vgm.get_data()
vgm_play(data, header)
if __name__ == '__main__':
main() | vgm_play | identifier_name |
test_utils.py | #!/usr/bin/python
from __future__ import division, generators
from hwgrader.utils import TestError
from hwgrader.module_utils import _IOW
from hwgrader import MMLOG_DEVICE_PATH, MMLOG_MODULE_LOAD, MMLOG_MODULE_UNLOAD
import fcntl
import pickle
import shutil
import time
import errno
import sys
import os
import re
__all__ = [
'KERN_IDLE',
'KERN_EMERG',
'KERN_ALERT',
'KERN_CRIT',
'KERN_ERR',
'KERN_WARNING',
'KERN_NOTICE',
'KERN_INFO',
'KERN_DEBUG',
'ESRCH',
'ENOMEM',
'EFAULT',
'EINVAL',
'LOG_BUF_LEN',
'stubTestcase',
'memTrack',
'tfork',
'tfork2',
'enumerate',
'count'
]
#
# Kernel printk log levels.
#
KERN_IDLE = -1
KERN_EMERG = 0 # system is unusable
KERN_ALERT = 1 # action must be taken immediately
KERN_CRIT = 2 # critical conditions
KERN_ERR = 3 # error conditions
KERN_WARNING = 4 # warning conditions
KERN_NOTICE = 5 # normal but significant condition
KERN_INFO = 6 # informational
KERN_DEBUG = 7 # debug-level messages
#
# Error codes
#
ESRCH = 3 # No such process
ENOMEM = 12 # Out of memory
EFAULT = 14 # Bad address
EINVAL = 22 # Invalid argument
#
# The size of the ring buffer is increased (modified 'printk.c')
# Therefore we need to read more using the dmesg command.
#
LOG_BUF_LEN = 1048576
TEMP_FOLDER = 'temp'
class stubTestcase(object):
"""Class that enbles running the memTrack tests outside a unittest"""
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise Exception, msg
def assert_(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr: raise Exception, msg
class memTrack(object):
"""Track kernel memory allocation and release."""
def __init__(self):
#
# Calculate the ioctl cmd number
#
MMLOG_MAGIC = 'r'
self.MMLOG_SET_LOG_LEVEL = _IOW(MMLOG_MAGIC, 0, 'int')
#
# Just in case:
#
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
out.close()
def open(self):
#
# Load the module
#
if os.system(MMLOG_MODULE_LOAD):
raise Exception('Failed loading the module\n')
self.f_dev = os.open(MMLOG_DEVICE_PATH, os.O_RDWR)
def __del__(self):
if self.f_dev:
self.close()
def close(self):
if self.f_dev:
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
os.close(self.f_dev)
self.f_dev = None
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
def track_func(self, func, args):
"""Track KMALLOC/KFREE calls during the exection of func with args"""
try:
self.start_track()
ret = func(*args)
finally:
self.end_track()
return ret
def start_track(self):
"""Start tracking KMALLOC/KFREE calls"""
self.open()
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_ALERT)
def end_track(self):
"""Stop tracking KMALLOC/KFREE calls"""
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
self.close()
def validate(self, tracked_pids, test_case=stubTestcase, debug=False):
"""Validate (previous) KMALLOC/KFREE calls of a set of tasks (pids)"""
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
dmesg_lines = out.readlines()
out.close()
allocations = []
memory_allocated = False
if debug:
f = open('mm_debug.txt', 'w+')
f.write('All KMALLOC/KFREE messages:\n\n')
f.write(''.join(dmesg_lines))
f.write('\nTracked pids: %s\nOnly relevant KMALLOC/KFREE messages:\n' % repr(tracked_pids))
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
f.write(line)
f.write('\nProcessing KMALLOC/KFREE messages:\n')
try:
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
if debug:
f.write(line)
if action == 'KMALLOC':
memory_allocated = True
if address in allocations:
test_case.fail('Same address, %s, allocated twice without release.' % address)
break
allocations.append(address)
if action == 'KFREE':
if address not in allocations:
test_case.fail('Freeing a non allocated address, %s.' % address)
break
allocations.remove(address)
else:
test_case.assert_(memory_allocated, 'No memory allocated during execution.')
test_case.assert_(not allocations, 'Failed to free some of the allocated memory, left %d:\n%s' % (len(allocations), '\n'.join(allocations)))
finally:
if debug:
f.close()
class tfork(object):
"""A convenient fork"""
def __init__(self):
"""Create a fork"""
self._ppid = os.getpid()
self._cpid = os.fork()
self._inchild = self._cpid == 0
self._fork_time = time.time()
self._exit_time = 0
self._exit_code = 0
if self._inchild:
self._cpid = os.getpid()
def _isChild(self):
return self._inchild
isChild = property(fget=_isChild)
def _get_cpid(self):
return self._cpid
cpid = property(fget=_get_cpid)
def _get_ppid(self):
return self._ppid
ppid = property(fget=_get_ppid)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
os._exit(status)
def wait(self):
if self._inchild:
raise TestError('Requested wait() in child')
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def _get_fork_time(self):
return self._fork_time
fork_time = property(fget=_get_fork_time)
def _get_exit_time(self):
if self._inchild:
raise TestError('Exit time is available only in parent')
return self._exit_time
exit_time = property(fget=_get_exit_time)
def _get_exit_code(self):
if self._inchild:
raise TestError('Exit code is available only in parent')
return self._exit_code
exit_code = property(fget=_get_exit_code)
def safe_close(fd):
try:
os.close(fd)
except:
pass
class tfork2(tfork):
"""A convenient fork with two way pipe communication"""
RELEASE_MSG = 'release'
def __init__(self):
"""Create a fork"""
#
# One set of pipes is used for synchronization
# The general pipe is used for general communication of the test
#
self._pr_parent, self._pw_child = os.pipe()
self._pr_child, self._pw_parent = os.pipe()
self._general_pr_parent, self._general_pw_child = os.pipe()
self._general_pr_child, self._general_pw_parent = os.pipe()
super(tfork2, self).__init__()
self.close_other()
def close_self(self):
if self._inchild:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
else:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
def close_other(self):
if self._inchild:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
else:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
self.close_self()
os._exit(status) | raise TestError('Requested wait() in child')
if self_close:
self.close_self()
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def sync(self):
"""Wait for the other side of the fork to release this side"""
if self._inchild:
os.read(self._pr_child, len(self.RELEASE_MSG))
else:
os.read(self._pr_parent, len(self.RELEASE_MSG))
def release(self):
"""Release for the other side of the fork that is syncing this side"""
if self._inchild:
os.write(self._pw_child, self.RELEASE_MSG)
else:
os.write(self._pw_parent, self.RELEASE_MSG)
def send(self, message):
"""Send from this side of the pipe to the other side"""
if self._inchild:
os.write(self._general_pw_child, message)
else:
os.write(self._general_pw_parent, message)
def receive(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side"""
if self._inchild:
return os.read(self._general_pr_child, message_length)
else:
return os.read(self._general_pr_parent, message_length)
def send2(self, message):
"""Send from this side of the pipe to the other side, synchronously (the function returns only when the other party signaled receive)"""
self.send(message)
self.sync(message)
def receive2(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side, synchronously (let the other party know that recieved)"""
self.receive(message)
self.release(message)
def _wpipe(self):
"""Return the correct write side of the general pipe"""
if self._inchild:
return self._general_pw_child
else:
return self._general_pw_parent
def _rpipe(self):
"""Return the correct read side of the general pipe"""
if self._inchild:
return self._general_pr_child
else:
return self._general_pr_parent
wpipe = property(fget=_wpipe)
rpipe = property(fget=_rpipe)
def pickle_dump(self, data):
"""Dump data through the pipe. The data is sent using pickle binary format."""
os.write(self.wpipe, pickle.dumps(data, bin=True))
def pickle_load(self):
"""Load data from the pipe. The data is sent using pickle binary format."""
#
# I am wrapping the file descriptor because this way pickle
# returns on each data send separately (allowing for sending
# multiple data before reading).
# I close the file descriptor or else for some reason just
# closing the write side of the pipe doesn't raise an EOF
# in the read side.
#
if not hasattr(self, '_rf'):
self._rf = os.fdopen(os.dup(self.rpipe), 'r')
data = pickle.load(self._rf)
return data
def compile_extension(
test_folder,
submission_folder,
_globals
):
#
# Prepare a temporary folder with all necessary files.
import tempfile
temp_folder = tempfile.mktemp()
os.mkdir(temp_folder)
shutil.copy(os.path.join(submission_folder, _globals['__header_file__']), temp_folder)
for file in _globals['__extension_files__']:
shutil.copy(os.path.join(test_folder, file), temp_folder)
shutil.copy(os.path.abspath(sys.argv[0]), temp_folder)
#
# Compile the extension module and import it into the modules namespace
# Note:
# I am saving the sys.argv because the run_setup script overwrites them
# due to a bug
#
os.chdir(temp_folder)
from distutils.core import run_setup
save_argv = list(sys.argv)
run_setup('setup.py', script_args=['build_ext', '-b', temp_folder])
sys.argv = save_argv
if os.path.exists(os.path.join(temp_folder, _globals['__module_name__']+'.so')):
compile_success = True
sys.path.append(temp_folder)
else:
compile_success = False
sys.path.append(test_folder)
_globals.update({_globals['__module_name__']: __import__(_globals['__module_name__'])})
del sys.path[-1]
return compile_success
class ErrnoError(Exception):
def __init__(self, e, msg):
self.errno = e.errno
self.msg = msg
def __str__(self):
return '%s %s' % (errno.errorcode[self.errno], self.msg)
def enumerate(collection):
'Generates an indexed series: (0,coll[0]), (1,coll[1]) ...'
i = 0
it = iter(collection)
while 1:
yield (i, it.next())
i += 1
COUNT_FILE_NAME = os.path.join(os.path.expanduser('~'), 'last_count.txt')
def count(start=0, step=1, persistent=False):
# count(10) --> 10 11 12 13 14 ...
# count(2.5, 0.5) -> 2.5 3.0 3.5 ...
if persistent:
if os.path.exists(COUNT_FILE_NAME):
f = open(COUNT_FILE_NAME, mode='r')
start = int(f.read().strip()) + 1
f.close()
n = start
while True:
if persistent:
f = open(COUNT_FILE_NAME, mode='w')
f.write('%d' % n)
f.close()
yield n
n += step |
def wait(self, self_close=True):
if self._inchild: | random_line_split |
test_utils.py | #!/usr/bin/python
from __future__ import division, generators
from hwgrader.utils import TestError
from hwgrader.module_utils import _IOW
from hwgrader import MMLOG_DEVICE_PATH, MMLOG_MODULE_LOAD, MMLOG_MODULE_UNLOAD
import fcntl
import pickle
import shutil
import time
import errno
import sys
import os
import re
__all__ = [
'KERN_IDLE',
'KERN_EMERG',
'KERN_ALERT',
'KERN_CRIT',
'KERN_ERR',
'KERN_WARNING',
'KERN_NOTICE',
'KERN_INFO',
'KERN_DEBUG',
'ESRCH',
'ENOMEM',
'EFAULT',
'EINVAL',
'LOG_BUF_LEN',
'stubTestcase',
'memTrack',
'tfork',
'tfork2',
'enumerate',
'count'
]
#
# Kernel printk log levels.
#
KERN_IDLE = -1
KERN_EMERG = 0 # system is unusable
KERN_ALERT = 1 # action must be taken immediately
KERN_CRIT = 2 # critical conditions
KERN_ERR = 3 # error conditions
KERN_WARNING = 4 # warning conditions
KERN_NOTICE = 5 # normal but significant condition
KERN_INFO = 6 # informational
KERN_DEBUG = 7 # debug-level messages
#
# Error codes
#
ESRCH = 3 # No such process
ENOMEM = 12 # Out of memory
EFAULT = 14 # Bad address
EINVAL = 22 # Invalid argument
#
# The size of the ring buffer is increased (modified 'printk.c')
# Therefore we need to read more using the dmesg command.
#
LOG_BUF_LEN = 1048576
TEMP_FOLDER = 'temp'
class stubTestcase(object):
"""Class that enbles running the memTrack tests outside a unittest"""
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise Exception, msg
def assert_(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr: raise Exception, msg
class memTrack(object):
"""Track kernel memory allocation and release."""
def __init__(self):
#
# Calculate the ioctl cmd number
#
MMLOG_MAGIC = 'r'
self.MMLOG_SET_LOG_LEVEL = _IOW(MMLOG_MAGIC, 0, 'int')
#
# Just in case:
#
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
out.close()
def open(self):
#
# Load the module
#
if os.system(MMLOG_MODULE_LOAD):
raise Exception('Failed loading the module\n')
self.f_dev = os.open(MMLOG_DEVICE_PATH, os.O_RDWR)
def __del__(self):
if self.f_dev:
self.close()
def close(self):
if self.f_dev:
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
os.close(self.f_dev)
self.f_dev = None
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
def track_func(self, func, args):
"""Track KMALLOC/KFREE calls during the exection of func with args"""
try:
self.start_track()
ret = func(*args)
finally:
self.end_track()
return ret
def start_track(self):
"""Start tracking KMALLOC/KFREE calls"""
self.open()
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_ALERT)
def end_track(self):
"""Stop tracking KMALLOC/KFREE calls"""
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
self.close()
def validate(self, tracked_pids, test_case=stubTestcase, debug=False):
"""Validate (previous) KMALLOC/KFREE calls of a set of tasks (pids)"""
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
dmesg_lines = out.readlines()
out.close()
allocations = []
memory_allocated = False
if debug:
f = open('mm_debug.txt', 'w+')
f.write('All KMALLOC/KFREE messages:\n\n')
f.write(''.join(dmesg_lines))
f.write('\nTracked pids: %s\nOnly relevant KMALLOC/KFREE messages:\n' % repr(tracked_pids))
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
f.write(line)
f.write('\nProcessing KMALLOC/KFREE messages:\n')
try:
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
if debug:
f.write(line)
if action == 'KMALLOC':
memory_allocated = True
if address in allocations:
test_case.fail('Same address, %s, allocated twice without release.' % address)
break
allocations.append(address)
if action == 'KFREE':
if address not in allocations:
test_case.fail('Freeing a non allocated address, %s.' % address)
break
allocations.remove(address)
else:
test_case.assert_(memory_allocated, 'No memory allocated during execution.')
test_case.assert_(not allocations, 'Failed to free some of the allocated memory, left %d:\n%s' % (len(allocations), '\n'.join(allocations)))
finally:
if debug:
f.close()
class tfork(object):
"""A convenient fork"""
def __init__(self):
"""Create a fork"""
self._ppid = os.getpid()
self._cpid = os.fork()
self._inchild = self._cpid == 0
self._fork_time = time.time()
self._exit_time = 0
self._exit_code = 0
if self._inchild:
self._cpid = os.getpid()
def _isChild(self):
return self._inchild
isChild = property(fget=_isChild)
def _get_cpid(self):
return self._cpid
cpid = property(fget=_get_cpid)
def _get_ppid(self):
return self._ppid
ppid = property(fget=_get_ppid)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
os._exit(status)
def wait(self):
|
def _get_fork_time(self):
return self._fork_time
fork_time = property(fget=_get_fork_time)
def _get_exit_time(self):
if self._inchild:
raise TestError('Exit time is available only in parent')
return self._exit_time
exit_time = property(fget=_get_exit_time)
def _get_exit_code(self):
if self._inchild:
raise TestError('Exit code is available only in parent')
return self._exit_code
exit_code = property(fget=_get_exit_code)
def safe_close(fd):
try:
os.close(fd)
except:
pass
class tfork2(tfork):
"""A convenient fork with two way pipe communication"""
RELEASE_MSG = 'release'
def __init__(self):
"""Create a fork"""
#
# One set of pipes is used for synchronization
# The general pipe is used for general communication of the test
#
self._pr_parent, self._pw_child = os.pipe()
self._pr_child, self._pw_parent = os.pipe()
self._general_pr_parent, self._general_pw_child = os.pipe()
self._general_pr_child, self._general_pw_parent = os.pipe()
super(tfork2, self).__init__()
self.close_other()
def close_self(self):
if self._inchild:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
else:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
def close_other(self):
if self._inchild:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
else:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
self.close_self()
os._exit(status)
def wait(self, self_close=True):
if self._inchild:
raise TestError('Requested wait() in child')
if self_close:
self.close_self()
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def sync(self):
"""Wait for the other side of the fork to release this side"""
if self._inchild:
os.read(self._pr_child, len(self.RELEASE_MSG))
else:
os.read(self._pr_parent, len(self.RELEASE_MSG))
def release(self):
"""Release for the other side of the fork that is syncing this side"""
if self._inchild:
os.write(self._pw_child, self.RELEASE_MSG)
else:
os.write(self._pw_parent, self.RELEASE_MSG)
def send(self, message):
"""Send from this side of the pipe to the other side"""
if self._inchild:
os.write(self._general_pw_child, message)
else:
os.write(self._general_pw_parent, message)
def receive(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side"""
if self._inchild:
return os.read(self._general_pr_child, message_length)
else:
return os.read(self._general_pr_parent, message_length)
def send2(self, message):
"""Send from this side of the pipe to the other side, synchronously (the function returns only when the other party signaled receive)"""
self.send(message)
self.sync(message)
def receive2(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side, synchronously (let the other party know that recieved)"""
self.receive(message)
self.release(message)
def _wpipe(self):
"""Return the correct write side of the general pipe"""
if self._inchild:
return self._general_pw_child
else:
return self._general_pw_parent
def _rpipe(self):
"""Return the correct read side of the general pipe"""
if self._inchild:
return self._general_pr_child
else:
return self._general_pr_parent
wpipe = property(fget=_wpipe)
rpipe = property(fget=_rpipe)
def pickle_dump(self, data):
"""Dump data through the pipe. The data is sent using pickle binary format."""
os.write(self.wpipe, pickle.dumps(data, bin=True))
def pickle_load(self):
"""Load data from the pipe. The data is sent using pickle binary format."""
#
# I am wrapping the file descriptor because this way pickle
# returns on each data send separately (allowing for sending
# multiple data before reading).
# I close the file descriptor or else for some reason just
# closing the write side of the pipe doesn't raise an EOF
# in the read side.
#
if not hasattr(self, '_rf'):
self._rf = os.fdopen(os.dup(self.rpipe), 'r')
data = pickle.load(self._rf)
return data
def compile_extension(
test_folder,
submission_folder,
_globals
):
#
# Prepare a temporary folder with all necessary files.
import tempfile
temp_folder = tempfile.mktemp()
os.mkdir(temp_folder)
shutil.copy(os.path.join(submission_folder, _globals['__header_file__']), temp_folder)
for file in _globals['__extension_files__']:
shutil.copy(os.path.join(test_folder, file), temp_folder)
shutil.copy(os.path.abspath(sys.argv[0]), temp_folder)
#
# Compile the extension module and import it into the modules namespace
# Note:
# I am saving the sys.argv because the run_setup script overwrites them
# due to a bug
#
os.chdir(temp_folder)
from distutils.core import run_setup
save_argv = list(sys.argv)
run_setup('setup.py', script_args=['build_ext', '-b', temp_folder])
sys.argv = save_argv
if os.path.exists(os.path.join(temp_folder, _globals['__module_name__']+'.so')):
compile_success = True
sys.path.append(temp_folder)
else:
compile_success = False
sys.path.append(test_folder)
_globals.update({_globals['__module_name__']: __import__(_globals['__module_name__'])})
del sys.path[-1]
return compile_success
class ErrnoError(Exception):
def __init__(self, e, msg):
self.errno = e.errno
self.msg = msg
def __str__(self):
return '%s %s' % (errno.errorcode[self.errno], self.msg)
def enumerate(collection):
'Generates an indexed series: (0,coll[0]), (1,coll[1]) ...'
i = 0
it = iter(collection)
while 1:
yield (i, it.next())
i += 1
COUNT_FILE_NAME = os.path.join(os.path.expanduser('~'), 'last_count.txt')
def count(start=0, step=1, persistent=False):
# count(10) --> 10 11 12 13 14 ...
# count(2.5, 0.5) -> 2.5 3.0 3.5 ...
if persistent:
if os.path.exists(COUNT_FILE_NAME):
f = open(COUNT_FILE_NAME, mode='r')
start = int(f.read().strip()) + 1
f.close()
n = start
while True:
if persistent:
f = open(COUNT_FILE_NAME, mode='w')
f.write('%d' % n)
f.close()
yield n
n += step
| if self._inchild:
raise TestError('Requested wait() in child')
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code | identifier_body |
test_utils.py | #!/usr/bin/python
from __future__ import division, generators
from hwgrader.utils import TestError
from hwgrader.module_utils import _IOW
from hwgrader import MMLOG_DEVICE_PATH, MMLOG_MODULE_LOAD, MMLOG_MODULE_UNLOAD
import fcntl
import pickle
import shutil
import time
import errno
import sys
import os
import re
__all__ = [
'KERN_IDLE',
'KERN_EMERG',
'KERN_ALERT',
'KERN_CRIT',
'KERN_ERR',
'KERN_WARNING',
'KERN_NOTICE',
'KERN_INFO',
'KERN_DEBUG',
'ESRCH',
'ENOMEM',
'EFAULT',
'EINVAL',
'LOG_BUF_LEN',
'stubTestcase',
'memTrack',
'tfork',
'tfork2',
'enumerate',
'count'
]
#
# Kernel printk log levels.
#
KERN_IDLE = -1
KERN_EMERG = 0 # system is unusable
KERN_ALERT = 1 # action must be taken immediately
KERN_CRIT = 2 # critical conditions
KERN_ERR = 3 # error conditions
KERN_WARNING = 4 # warning conditions
KERN_NOTICE = 5 # normal but significant condition
KERN_INFO = 6 # informational
KERN_DEBUG = 7 # debug-level messages
#
# Error codes
#
ESRCH = 3 # No such process
ENOMEM = 12 # Out of memory
EFAULT = 14 # Bad address
EINVAL = 22 # Invalid argument
#
# The size of the ring buffer is increased (modified 'printk.c')
# Therefore we need to read more using the dmesg command.
#
LOG_BUF_LEN = 1048576
TEMP_FOLDER = 'temp'
class stubTestcase(object):
"""Class that enbles running the memTrack tests outside a unittest"""
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise Exception, msg
def assert_(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr: raise Exception, msg
class memTrack(object):
"""Track kernel memory allocation and release."""
def __init__(self):
#
# Calculate the ioctl cmd number
#
MMLOG_MAGIC = 'r'
self.MMLOG_SET_LOG_LEVEL = _IOW(MMLOG_MAGIC, 0, 'int')
#
# Just in case:
#
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
out.close()
def open(self):
#
# Load the module
#
if os.system(MMLOG_MODULE_LOAD):
raise Exception('Failed loading the module\n')
self.f_dev = os.open(MMLOG_DEVICE_PATH, os.O_RDWR)
def __del__(self):
if self.f_dev:
self.close()
def close(self):
if self.f_dev:
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
os.close(self.f_dev)
self.f_dev = None
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
def track_func(self, func, args):
"""Track KMALLOC/KFREE calls during the exection of func with args"""
try:
self.start_track()
ret = func(*args)
finally:
self.end_track()
return ret
def start_track(self):
"""Start tracking KMALLOC/KFREE calls"""
self.open()
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_ALERT)
def end_track(self):
"""Stop tracking KMALLOC/KFREE calls"""
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
self.close()
def validate(self, tracked_pids, test_case=stubTestcase, debug=False):
"""Validate (previous) KMALLOC/KFREE calls of a set of tasks (pids)"""
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
dmesg_lines = out.readlines()
out.close()
allocations = []
memory_allocated = False
if debug:
f = open('mm_debug.txt', 'w+')
f.write('All KMALLOC/KFREE messages:\n\n')
f.write(''.join(dmesg_lines))
f.write('\nTracked pids: %s\nOnly relevant KMALLOC/KFREE messages:\n' % repr(tracked_pids))
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
|
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
f.write(line)
f.write('\nProcessing KMALLOC/KFREE messages:\n')
try:
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
if debug:
f.write(line)
if action == 'KMALLOC':
memory_allocated = True
if address in allocations:
test_case.fail('Same address, %s, allocated twice without release.' % address)
break
allocations.append(address)
if action == 'KFREE':
if address not in allocations:
test_case.fail('Freeing a non allocated address, %s.' % address)
break
allocations.remove(address)
else:
test_case.assert_(memory_allocated, 'No memory allocated during execution.')
test_case.assert_(not allocations, 'Failed to free some of the allocated memory, left %d:\n%s' % (len(allocations), '\n'.join(allocations)))
finally:
if debug:
f.close()
class tfork(object):
"""A convenient fork"""
def __init__(self):
"""Create a fork"""
self._ppid = os.getpid()
self._cpid = os.fork()
self._inchild = self._cpid == 0
self._fork_time = time.time()
self._exit_time = 0
self._exit_code = 0
if self._inchild:
self._cpid = os.getpid()
def _isChild(self):
return self._inchild
isChild = property(fget=_isChild)
def _get_cpid(self):
return self._cpid
cpid = property(fget=_get_cpid)
def _get_ppid(self):
return self._ppid
ppid = property(fget=_get_ppid)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
os._exit(status)
def wait(self):
if self._inchild:
raise TestError('Requested wait() in child')
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def _get_fork_time(self):
return self._fork_time
fork_time = property(fget=_get_fork_time)
def _get_exit_time(self):
if self._inchild:
raise TestError('Exit time is available only in parent')
return self._exit_time
exit_time = property(fget=_get_exit_time)
def _get_exit_code(self):
if self._inchild:
raise TestError('Exit code is available only in parent')
return self._exit_code
exit_code = property(fget=_get_exit_code)
def safe_close(fd):
try:
os.close(fd)
except:
pass
class tfork2(tfork):
"""A convenient fork with two way pipe communication"""
RELEASE_MSG = 'release'
def __init__(self):
"""Create a fork"""
#
# One set of pipes is used for synchronization
# The general pipe is used for general communication of the test
#
self._pr_parent, self._pw_child = os.pipe()
self._pr_child, self._pw_parent = os.pipe()
self._general_pr_parent, self._general_pw_child = os.pipe()
self._general_pr_child, self._general_pw_parent = os.pipe()
super(tfork2, self).__init__()
self.close_other()
def close_self(self):
if self._inchild:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
else:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
def close_other(self):
if self._inchild:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
else:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
self.close_self()
os._exit(status)
def wait(self, self_close=True):
if self._inchild:
raise TestError('Requested wait() in child')
if self_close:
self.close_self()
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def sync(self):
"""Wait for the other side of the fork to release this side"""
if self._inchild:
os.read(self._pr_child, len(self.RELEASE_MSG))
else:
os.read(self._pr_parent, len(self.RELEASE_MSG))
def release(self):
"""Release for the other side of the fork that is syncing this side"""
if self._inchild:
os.write(self._pw_child, self.RELEASE_MSG)
else:
os.write(self._pw_parent, self.RELEASE_MSG)
def send(self, message):
"""Send from this side of the pipe to the other side"""
if self._inchild:
os.write(self._general_pw_child, message)
else:
os.write(self._general_pw_parent, message)
def receive(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side"""
if self._inchild:
return os.read(self._general_pr_child, message_length)
else:
return os.read(self._general_pr_parent, message_length)
def send2(self, message):
"""Send from this side of the pipe to the other side, synchronously (the function returns only when the other party signaled receive)"""
self.send(message)
self.sync(message)
def receive2(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side, synchronously (let the other party know that recieved)"""
self.receive(message)
self.release(message)
def _wpipe(self):
"""Return the correct write side of the general pipe"""
if self._inchild:
return self._general_pw_child
else:
return self._general_pw_parent
def _rpipe(self):
"""Return the correct read side of the general pipe"""
if self._inchild:
return self._general_pr_child
else:
return self._general_pr_parent
wpipe = property(fget=_wpipe)
rpipe = property(fget=_rpipe)
def pickle_dump(self, data):
"""Dump data through the pipe. The data is sent using pickle binary format."""
os.write(self.wpipe, pickle.dumps(data, bin=True))
def pickle_load(self):
"""Load data from the pipe. The data is sent using pickle binary format."""
#
# I am wrapping the file descriptor because this way pickle
# returns on each data send separately (allowing for sending
# multiple data before reading).
# I close the file descriptor or else for some reason just
# closing the write side of the pipe doesn't raise an EOF
# in the read side.
#
if not hasattr(self, '_rf'):
self._rf = os.fdopen(os.dup(self.rpipe), 'r')
data = pickle.load(self._rf)
return data
def compile_extension(
test_folder,
submission_folder,
_globals
):
#
# Prepare a temporary folder with all necessary files.
import tempfile
temp_folder = tempfile.mktemp()
os.mkdir(temp_folder)
shutil.copy(os.path.join(submission_folder, _globals['__header_file__']), temp_folder)
for file in _globals['__extension_files__']:
shutil.copy(os.path.join(test_folder, file), temp_folder)
shutil.copy(os.path.abspath(sys.argv[0]), temp_folder)
#
# Compile the extension module and import it into the modules namespace
# Note:
# I am saving the sys.argv because the run_setup script overwrites them
# due to a bug
#
os.chdir(temp_folder)
from distutils.core import run_setup
save_argv = list(sys.argv)
run_setup('setup.py', script_args=['build_ext', '-b', temp_folder])
sys.argv = save_argv
if os.path.exists(os.path.join(temp_folder, _globals['__module_name__']+'.so')):
compile_success = True
sys.path.append(temp_folder)
else:
compile_success = False
sys.path.append(test_folder)
_globals.update({_globals['__module_name__']: __import__(_globals['__module_name__'])})
del sys.path[-1]
return compile_success
class ErrnoError(Exception):
def __init__(self, e, msg):
self.errno = e.errno
self.msg = msg
def __str__(self):
return '%s %s' % (errno.errorcode[self.errno], self.msg)
def enumerate(collection):
'Generates an indexed series: (0,coll[0]), (1,coll[1]) ...'
i = 0
it = iter(collection)
while 1:
yield (i, it.next())
i += 1
COUNT_FILE_NAME = os.path.join(os.path.expanduser('~'), 'last_count.txt')
def count(start=0, step=1, persistent=False):
# count(10) --> 10 11 12 13 14 ...
# count(2.5, 0.5) -> 2.5 3.0 3.5 ...
if persistent:
if os.path.exists(COUNT_FILE_NAME):
f = open(COUNT_FILE_NAME, mode='r')
start = int(f.read().strip()) + 1
f.close()
n = start
while True:
if persistent:
f = open(COUNT_FILE_NAME, mode='w')
f.write('%d' % n)
f.close()
yield n
n += step
| continue | conditional_block |
test_utils.py | #!/usr/bin/python
from __future__ import division, generators
from hwgrader.utils import TestError
from hwgrader.module_utils import _IOW
from hwgrader import MMLOG_DEVICE_PATH, MMLOG_MODULE_LOAD, MMLOG_MODULE_UNLOAD
import fcntl
import pickle
import shutil
import time
import errno
import sys
import os
import re
__all__ = [
'KERN_IDLE',
'KERN_EMERG',
'KERN_ALERT',
'KERN_CRIT',
'KERN_ERR',
'KERN_WARNING',
'KERN_NOTICE',
'KERN_INFO',
'KERN_DEBUG',
'ESRCH',
'ENOMEM',
'EFAULT',
'EINVAL',
'LOG_BUF_LEN',
'stubTestcase',
'memTrack',
'tfork',
'tfork2',
'enumerate',
'count'
]
#
# Kernel printk log levels.
#
KERN_IDLE = -1
KERN_EMERG = 0 # system is unusable
KERN_ALERT = 1 # action must be taken immediately
KERN_CRIT = 2 # critical conditions
KERN_ERR = 3 # error conditions
KERN_WARNING = 4 # warning conditions
KERN_NOTICE = 5 # normal but significant condition
KERN_INFO = 6 # informational
KERN_DEBUG = 7 # debug-level messages
#
# Error codes
#
ESRCH = 3 # No such process
ENOMEM = 12 # Out of memory
EFAULT = 14 # Bad address
EINVAL = 22 # Invalid argument
#
# The size of the ring buffer is increased (modified 'printk.c')
# Therefore we need to read more using the dmesg command.
#
LOG_BUF_LEN = 1048576
TEMP_FOLDER = 'temp'
class stubTestcase(object):
"""Class that enbles running the memTrack tests outside a unittest"""
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise Exception, msg
def assert_(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr: raise Exception, msg
class memTrack(object):
"""Track kernel memory allocation and release."""
def __init__(self):
#
# Calculate the ioctl cmd number
#
MMLOG_MAGIC = 'r'
self.MMLOG_SET_LOG_LEVEL = _IOW(MMLOG_MAGIC, 0, 'int')
#
# Just in case:
#
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
out.close()
def open(self):
#
# Load the module
#
if os.system(MMLOG_MODULE_LOAD):
raise Exception('Failed loading the module\n')
self.f_dev = os.open(MMLOG_DEVICE_PATH, os.O_RDWR)
def __del__(self):
if self.f_dev:
self.close()
def close(self):
if self.f_dev:
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
os.close(self.f_dev)
self.f_dev = None
try:
os.system(MMLOG_MODULE_UNLOAD)
except:
pass
def track_func(self, func, args):
"""Track KMALLOC/KFREE calls during the exection of func with args"""
try:
self.start_track()
ret = func(*args)
finally:
self.end_track()
return ret
def start_track(self):
"""Start tracking KMALLOC/KFREE calls"""
self.open()
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_ALERT)
def end_track(self):
"""Stop tracking KMALLOC/KFREE calls"""
fcntl.ioctl(self.f_dev, self.MMLOG_SET_LOG_LEVEL, KERN_IDLE)
self.close()
def validate(self, tracked_pids, test_case=stubTestcase, debug=False):
"""Validate (previous) KMALLOC/KFREE calls of a set of tasks (pids)"""
out = os.popen('dmesg -c -s %d' % LOG_BUF_LEN)
dmesg_lines = out.readlines()
out.close()
allocations = []
memory_allocated = False
if debug:
f = open('mm_debug.txt', 'w+')
f.write('All KMALLOC/KFREE messages:\n\n')
f.write(''.join(dmesg_lines))
f.write('\nTracked pids: %s\nOnly relevant KMALLOC/KFREE messages:\n' % repr(tracked_pids))
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
f.write(line)
f.write('\nProcessing KMALLOC/KFREE messages:\n')
try:
for line in dmesg_lines:
re_result = re.search(r'.*?(KMALLOC|KFREE) (\d*) (\w*)', line)
if not re_result:
continue
action = re_result.group(1)
pid = int(re_result.group(2))
address = re_result.group(3)
if pid not in tracked_pids:
continue
if debug:
f.write(line)
if action == 'KMALLOC':
memory_allocated = True
if address in allocations:
test_case.fail('Same address, %s, allocated twice without release.' % address)
break
allocations.append(address)
if action == 'KFREE':
if address not in allocations:
test_case.fail('Freeing a non allocated address, %s.' % address)
break
allocations.remove(address)
else:
test_case.assert_(memory_allocated, 'No memory allocated during execution.')
test_case.assert_(not allocations, 'Failed to free some of the allocated memory, left %d:\n%s' % (len(allocations), '\n'.join(allocations)))
finally:
if debug:
f.close()
class tfork(object):
"""A convenient fork"""
def __init__(self):
"""Create a fork"""
self._ppid = os.getpid()
self._cpid = os.fork()
self._inchild = self._cpid == 0
self._fork_time = time.time()
self._exit_time = 0
self._exit_code = 0
if self._inchild:
self._cpid = os.getpid()
def _isChild(self):
return self._inchild
isChild = property(fget=_isChild)
def | (self):
return self._cpid
cpid = property(fget=_get_cpid)
def _get_ppid(self):
return self._ppid
ppid = property(fget=_get_ppid)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
os._exit(status)
def wait(self):
if self._inchild:
raise TestError('Requested wait() in child')
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def _get_fork_time(self):
return self._fork_time
fork_time = property(fget=_get_fork_time)
def _get_exit_time(self):
if self._inchild:
raise TestError('Exit time is available only in parent')
return self._exit_time
exit_time = property(fget=_get_exit_time)
def _get_exit_code(self):
if self._inchild:
raise TestError('Exit code is available only in parent')
return self._exit_code
exit_code = property(fget=_get_exit_code)
def safe_close(fd):
try:
os.close(fd)
except:
pass
class tfork2(tfork):
"""A convenient fork with two way pipe communication"""
RELEASE_MSG = 'release'
def __init__(self):
"""Create a fork"""
#
# One set of pipes is used for synchronization
# The general pipe is used for general communication of the test
#
self._pr_parent, self._pw_child = os.pipe()
self._pr_child, self._pw_parent = os.pipe()
self._general_pr_parent, self._general_pw_child = os.pipe()
self._general_pr_child, self._general_pw_parent = os.pipe()
super(tfork2, self).__init__()
self.close_other()
def close_self(self):
if self._inchild:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
else:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
def close_other(self):
if self._inchild:
safe_close(self._pr_parent)
safe_close(self._pw_parent)
safe_close(self._general_pr_parent)
safe_close(self._general_pw_parent)
else:
safe_close(self._pr_child)
safe_close(self._pw_child)
safe_close(self._general_pr_child)
safe_close(self._general_pw_child)
def exit(self, status=0):
if not self._inchild:
raise TestError('Requested exit() in parent')
self.close_self()
os._exit(status)
def wait(self, self_close=True):
if self._inchild:
raise TestError('Requested wait() in child')
if self_close:
self.close_self()
pid, exit_code = os.wait()
self._exit_time = time.time()
self._exit_code = exit_code
def sync(self):
"""Wait for the other side of the fork to release this side"""
if self._inchild:
os.read(self._pr_child, len(self.RELEASE_MSG))
else:
os.read(self._pr_parent, len(self.RELEASE_MSG))
def release(self):
"""Release for the other side of the fork that is syncing this side"""
if self._inchild:
os.write(self._pw_child, self.RELEASE_MSG)
else:
os.write(self._pw_parent, self.RELEASE_MSG)
def send(self, message):
"""Send from this side of the pipe to the other side"""
if self._inchild:
os.write(self._general_pw_child, message)
else:
os.write(self._general_pw_parent, message)
def receive(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side"""
if self._inchild:
return os.read(self._general_pr_child, message_length)
else:
return os.read(self._general_pr_parent, message_length)
def send2(self, message):
"""Send from this side of the pipe to the other side, synchronously (the function returns only when the other party signaled receive)"""
self.send(message)
self.sync(message)
def receive2(self, message_length=100):
"""Receive a message sent from the other side of the pipe to the this side, synchronously (let the other party know that recieved)"""
self.receive(message)
self.release(message)
def _wpipe(self):
"""Return the correct write side of the general pipe"""
if self._inchild:
return self._general_pw_child
else:
return self._general_pw_parent
def _rpipe(self):
"""Return the correct read side of the general pipe"""
if self._inchild:
return self._general_pr_child
else:
return self._general_pr_parent
wpipe = property(fget=_wpipe)
rpipe = property(fget=_rpipe)
def pickle_dump(self, data):
"""Dump data through the pipe. The data is sent using pickle binary format."""
os.write(self.wpipe, pickle.dumps(data, bin=True))
def pickle_load(self):
"""Load data from the pipe. The data is sent using pickle binary format."""
#
# I am wrapping the file descriptor because this way pickle
# returns on each data send separately (allowing for sending
# multiple data before reading).
# I close the file descriptor or else for some reason just
# closing the write side of the pipe doesn't raise an EOF
# in the read side.
#
if not hasattr(self, '_rf'):
self._rf = os.fdopen(os.dup(self.rpipe), 'r')
data = pickle.load(self._rf)
return data
def compile_extension(
test_folder,
submission_folder,
_globals
):
#
# Prepare a temporary folder with all necessary files.
import tempfile
temp_folder = tempfile.mktemp()
os.mkdir(temp_folder)
shutil.copy(os.path.join(submission_folder, _globals['__header_file__']), temp_folder)
for file in _globals['__extension_files__']:
shutil.copy(os.path.join(test_folder, file), temp_folder)
shutil.copy(os.path.abspath(sys.argv[0]), temp_folder)
#
# Compile the extension module and import it into the modules namespace
# Note:
# I am saving the sys.argv because the run_setup script overwrites them
# due to a bug
#
os.chdir(temp_folder)
from distutils.core import run_setup
save_argv = list(sys.argv)
run_setup('setup.py', script_args=['build_ext', '-b', temp_folder])
sys.argv = save_argv
if os.path.exists(os.path.join(temp_folder, _globals['__module_name__']+'.so')):
compile_success = True
sys.path.append(temp_folder)
else:
compile_success = False
sys.path.append(test_folder)
_globals.update({_globals['__module_name__']: __import__(_globals['__module_name__'])})
del sys.path[-1]
return compile_success
class ErrnoError(Exception):
def __init__(self, e, msg):
self.errno = e.errno
self.msg = msg
def __str__(self):
return '%s %s' % (errno.errorcode[self.errno], self.msg)
def enumerate(collection):
'Generates an indexed series: (0,coll[0]), (1,coll[1]) ...'
i = 0
it = iter(collection)
while 1:
yield (i, it.next())
i += 1
COUNT_FILE_NAME = os.path.join(os.path.expanduser('~'), 'last_count.txt')
def count(start=0, step=1, persistent=False):
# count(10) --> 10 11 12 13 14 ...
# count(2.5, 0.5) -> 2.5 3.0 3.5 ...
if persistent:
if os.path.exists(COUNT_FILE_NAME):
f = open(COUNT_FILE_NAME, mode='r')
start = int(f.read().strip()) + 1
f.close()
n = start
while True:
if persistent:
f = open(COUNT_FILE_NAME, mode='w')
f.write('%d' % n)
f.close()
yield n
n += step
| _get_cpid | identifier_name |
network.rs | extern crate reactivers;
extern crate rand;
use reactivers::engine::process::*;
use reactivers::engine::signal::*;
use reactivers::engine::signal::spmc_signal::*;
use reactivers::engine::signal::mpsc_signal::*;
use std::sync::Arc;
use std::fs::File;
use std::io::prelude::*;
use self::rand::Rng;
use super::graph::*;
use super::car::*;
use super::road::*;
// These constant directions are used to index roads and nodes at a crossroad.
const NORTH: usize = 0;
const EAST: usize = 1;
const SOUTH: usize = 2;
const WEST: usize = 3;
pub type Side = usize;
const LEFT: usize = 0;
const RIGHT: usize = 1;
/// Global update network information.
pub struct GlobalInfo {
pub weights: EdgesWeight, // Last estimation of the edges weights.
pub moves: Vec<Move>, // Moves of all the cars.
}
/// Move of a car.
#[derive(Copy, Clone)]
pub enum Move {
NONE, // None happened.
SPAWN(RoadInfo, usize, CrossroadId), // The car has spawned at specified road, position,
// with specified destination crossroad.
STEP(i32), // The car performs a step of specified length.
VANISH, // The car vanished.
CROSS(RoadInfo), // The car crossed and is now on specified road.
}
/// Network structure containing all the information relative to crossroads and roads.
#[derive(Clone)]
pub struct Network {
pub width: usize, // Width of the network.
pub height: usize, // Height of the network.
pub car_count: usize, // Number of cars.
pub cars_per_unit: i32, // Number of cars between two centers of crossroads.
pub cars_per_crossroad: i32, // Number of cars fitting in a crossroad.
grid: Vec<Vec<Option<Crossroad>>>, // Grid containing the crossroads.
pub roads: Vec<Road>, // Vector containing the roads.
graph: Graph, // Corresponding abstract graph.
car_graph: Option<Arc<Graph>>, // Shared reference to the same graph.
pub crossroads: Vec<CrossroadId>, // Vector containing all the coordinates of existing
// crossroads.
}
/// Crossroad Coordinates.
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct CrossroadId {
pub x: usize, // Abscissa
pub y: usize, // Ordinate
}
use std::ops::{ Index, IndexMut };
/// Allows indexing the grid by the crossroad coordinates.
impl Index<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
type Output = Option<Crossroad>;
#[inline]
fn index(&self, index: CrossroadId) -> &Option<Crossroad> {
&self[index.y][index.x]
}
}
/// Allows mutable indexing of the grid by the crossroad coordinates.
impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
#[inline]
fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> {
&mut self[index.y][index.x]
}
}
use std::fmt;
impl fmt::Display for CrossroadId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
use std::ops::Add;
/// Allows to add some move to crossroad coordinates.
impl Add<(i32, i32)> for CrossroadId {
type Output = CrossroadId;
fn add(self, (x, y): (i32, i32)) -> CrossroadId {
CrossroadId {
x: (self.x as i32 + x) as usize,
y: (self.y as i32 + y) as usize,
}
}
}
impl CrossroadId {
/// Creates new crossroad identifier.
pub fn new(x: usize, y: usize) -> CrossroadId {
CrossroadId { x, y }
}
/// Computes the unit move (dx, dy) and the length to join the destination crossroad.
pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) {
if self.x == dest.x {
let dy = (dest.y as i32) - (self.y as i32);
let len = i32::abs(dy);
(0, dy / len, len)
}
else if self.y == dest.y {
let dx = (dest.x as i32) - (self.x as i32);
let len = i32::abs(dx);
(dx / len, 0, len)
}
else {
panic!("Crossroads {} and {} are not linkable.", self, dest);
}
}
}
/// A Crossroad.
#[derive(Clone)]
pub struct Crossroad {
id: CrossroadId, // Coordinates
pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes.
// They are indexed by direction.
roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad.
// They are indexed by direction and side.
roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad.
// They are indexed by direction and side.
}
impl Crossroad {
/// Creates a new crossroad with four nodes without any roads.
pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad {
let mut c = Crossroad {
id,
nodes: vec!(),
roads: none_array(4, 2),
roads_arriving: none_array(4, 2),
};
for _ in 0..4 {
c.nodes.push(g.add_node(c.id));
}
c
}
/// Enables some roads. Only the cars from enabled roads are able to cross a crossroad.
fn enable_path(&self, roads: &mut Vec<Road>) {
// First policy: we enable the most loaded road with some guy waiting.
// let mut max = -1;
// let mut r_max = 0;
// for r in self.existing_roads_arriving() {
// if roads[r].is_waiting() && roads[r].get_car_count() > max {
// r_max = r;
// max = roads[r].get_car_count();
// }
// }
// roads[r_max].enable();
// Second policy: we enable the most loaded roads with guys waiting, but in pairs.
// We compute the pair of compatible roads with the maximum cumulated load.
let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT));
let mut max_load = 0;
for d in 0..4 {
for s in 0..2 {
for x in 0..2 {
let (d2, s2) = {
if x == 0 {
(d, 1 - s)
}
else {
((d + 2) % 4, s)
}
};
let load = self.compute_load(d, s, roads) +
self.compute_load(d2, s2, roads);
if load > max_load {
max_load = load;
max_pair = ((d, s), (d2, s2));
}
}
}
}
let ((d1, s1), (d2, s2)) = max_pair;
if self.roads_arriving[d1][s1].is_some() {
roads[self.roads_arriving[d1][s1].unwrap()].enable();
}
if self.roads_arriving[d2][s2].is_some() {
roads[self.roads_arriving[d2][s2].unwrap()].enable();
}
}
/// Computes the load of a road, i.e. the numbers of cars on this road.
/// If there is no car ready to cross, returns 0.
fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 {
let r = self.roads_arriving[direction][side];
if r.is_none() || !roads[r.unwrap()].is_waiting() {
return 0;
}
return roads[r.unwrap()].get_car_count();
}
}
impl Network {
/// Creates a new empty Network, with specified width and heights.
pub fn new(width: usize, height: usize) -> Network {
Network {
width,
height,
car_count: 0,
cars_per_unit: 10,
cars_per_crossroad: 4,
grid: none_array(height, width),
roads: vec!(),
graph: Graph::new(),
car_graph: None,
crossroads: vec!(),
}
}
/// Adds a crossroad to specified location.
pub fn add_crossroad(&mut self, x: usize, y: usize) {
let c = CrossroadId::new(x, y);
// We check the crossroad does not exist.
self.assert_crossroad_not_exists(c);
// We add it to the graph and update the network.
self.grid[c] = Some(Crossroad::new(c, &mut self.graph));
self.crossroads.push(c);
}
/// Adds a new specific road.
pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side){
// We get the parameters of the road.
let (dx, dy, length) = src.join(dest);
let length = length * self.cars_per_unit - self.cars_per_crossroad;
let (d1, d2) = compute_directions(dx, dy, side);
let id = self.roads.len();
// First, it builds the road in the network.
let road_info = RoadInfo {
id,
start: src,
end: dest,
side,
destination: self.crossroad(dest).nodes[d2],
length: length as usize,
};
// Then, we add it to the crossroads and the roads.
let road = Road::new(road_info);
self.roads.push(road);
self.crossroad_mut(src).roads[d1][side] = Some(id);
self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id);
// Then, it builds the two corresponding edges in the graph.
let (n1, n2) = {
let c = self.crossroad(src);
(c.nodes[d1], c.nodes[previous_direction(d1)])
};
let n3 = self.crossroad(dest).nodes[d2];
self.graph.add_edge(n1, n3, id);
self.graph.add_edge(n2, n3, id);
}
/// Add the two road linking the first crossroad to the second one.
pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) {
let (src, dest) =
(CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y));
// Checks the source and destination crossroads exist.
self.assert_crossroad_exists(src);
self.assert_crossroad_exists(dest);
// Checks that they are aligned.
let (dx, dy, length) = src.join(dest);
// Checks that the road can be built between the two crossroads, i.e. that it does not
// generate any collision.
for k in 1..length {
self.assert_crossroad_not_exists(src + (k*dx, k*dy));
}
// Creates both roads.
self.new_road(src, dest, LEFT);
self.new_road(src, dest, RIGHT);
}
/// Adds all roads between the crossroads `c1` and `c2`.
pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) {
self.add_road(c1, c2);
self.add_road(c2, c1);
}
/// Panics if the crossroad exists.
pub fn assert_crossroad_exists(&self, c: CrossroadId) {
if self.grid[c].is_none() {
panic!("This crossroad {} does not exist.", c);
}
}
/// Panics if the crossroad does not exist.
pub fn assert_crossroad_not_exists(&self, c: CrossroadId) {
if self.grid[c].is_some() {
panic!("This crossroad {} already exists.", c);
}
}
/// Retrieves the specified crossroad. Panics if it does not exist.
pub fn crossroad(&self, c: CrossroadId) -> &Crossroad {
self.grid[c].as_ref().unwrap()
}
/// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist.
pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad {
self.grid[c].as_mut().unwrap()
}
/// Creates a new car. It transfers the current graph to the car, with a fresh identifier.
pub fn create_car(&mut self) -> Car {
if self.car_graph.is_none() {
// If needed, we generate this shared reference.
self.car_graph = Some(Arc::new(self.clone_graph()));
}
let id = self.car_count;
self.car_count += 1;
Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap())
}
/// Spawns a car on a random road, and finds a random destination.
pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) {
// First, it finds a road to spawn the car.
let mut rng = rand::thread_rng();
let mut road_id = rng.gen_range(0, self.roads.len());
let mut pos = self.roads[road_id].spawn_car(id);
while pos == -1 {
road_id = rng.gen_range(0, self.roads.len());
pos = self.roads[road_id].spawn_car(id);
}
// Then, it gets the crossroad at the end of this road.
let road_info = self.roads[road_id].info();
let source_c = road_info.end;
// It randomly chooses a crossroad different from the previous crossroad.
let mut destination = self.random_crossroad();
while destination == source_c {
destination = self.random_crossroad();
}
// Returns the final spawn position and destination.
(road_info, pos as usize, destination)
}
/// Spawns all the car that requested to be. Updates the move vector with the resulting spawns.
pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) {
for (i, a) in actions.iter().enumerate() {
if let Action::SPAWN = *a {
let (road_info, pos, destination) = self.generate_request(i);
moves[i] = Move::SPAWN(road_info, pos, destination);
}
}
}
/// Makes the crossroads enable some roads.
pub fn enable_paths(&mut self) {
for &c in &self.crossroads {
self.grid[c].as_ref().unwrap().enable_path(&mut self.roads);
}
}
/// Performs an update step on all roads, based on the Actions and Speeds vector.
/// Updates the resulting Moves vector, and returns the EdgesWeight estimation.
pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>)
-> EdgesWeight
{
let roads = &mut self.roads;
// All the possibles enabled paths are tried.
for i in 0..roads.len() {
// Each roads tries to make its first car cross, if enabled.
Road::deliver(i, actions, moves, roads);
}
// We make a step for all remaining cars, and get the weights estimations.
let mut weights = vec!();
for i in 0..roads.len() {
weights.push(roads[i].step_forward(moves, speeds));
}
let edges_weight = EdgesWeight::new(weights);
return edges_weight
}
/// Returns the central reactive process of the network.
pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>,
pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>)
-> impl Process<Value=()> {
let mut weights = vec!();
for r in &self.roads {
weights.push(r.weight());
}
let mut step = 0;
let mut mean_moves = self.car_count as f32;
let beta = 0.99;
let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | {
// We count the steps.
step += 1;
// We enable some path.
self.enable_paths();
// We compute the road step and get back some weights.
let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect();
let weights = self.roads_step(&mut actions, &mut moves, &speeds);
// We spawn the cars that requested to be.
self.spawn_cars(actions, &mut moves);
// We count the number of cars that did something.
let nb_moves: i32 = moves.iter().map(| m | { match m {
&Move::NONE => 0,
_ => 1,
}}).sum();
// We keep some moving mean of this number. If it is too low, nothing is happening, so
// it panics.
mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32);
if mean_moves < 1e-3 {
panic!("It looks like a stationary state: not enough moves.");
}
// Returns the updated information about the step.
Arc::new(GlobalInfo { weights, moves })
};
let p =
pos_signal.await_in() // Awaits the car actions
.map(cont) // Computes the resulting moves and weights.
.emit_consume(central_signal) // Emits this information.
.loop_inf(); // Loops.
return p;
}
/// Returns a String representing the network.
pub fn to_string(&self) -> String {
// We first build the corresponding char two-dimensional vector.
let (width, height) = (2 * self.width - 1, 2 * self.height - 1);
let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| { ' ' }).collect()}).collect();
// Then we add the crossroads.
for c in &self.crossroads {
char_map[2 * c.y][2 * c.x] = 'C';
}
// Then we add the roads.
for r in &self.roads {
let start = r.info().start;
let (dx, dy, length) = start.join(r.info().end);
// Chooses the right symbol.
let c = if dx == 0 | else { '-' };
let (x, y) = (2*start.x, 2*start.y);
for k in 1..(2*length) {
char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c;
}
}
// We collect the characters into a string.
char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect()
}
/// Loads a network from a file located in trafficsim/maps/.
pub fn load_file(&mut self, filename: &str) {
let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found");
let mut contents = String::new();
f.read_to_string(&mut contents)
.expect("Something went wrong reading the file");
self.load_string(&contents);
}
/// Loads a network from a string.
pub fn load_string(&mut self, s: &str) {
// We remove ending blank lines.
let s = s.trim_right();
// We split lines and remove ending spaces and `\n`.
let mut char_map: Vec<Vec<char>> =
s.split("\n")
.map(| line | { line.trim_right().chars().collect() })
.collect();
// We compute the resulting width and height of the character array.
let width = char_map.iter().map(| line | { line.len() }).max().unwrap();
let height = char_map.len();
// We add missing spaces.
for line in char_map.iter_mut() {
for _ in 0..(width - line.len()) {
line.push(' ');
}
}
// We change the network size.
*self = Network::new((width + 1) / 2, (height + 1) / 2);
// Then, we add all the crossroads.
for (j, line) in char_map.iter().enumerate() {
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
self.add_crossroad(i / 2, j / 2);
}
}
}
// Then we add the horizontal roads.
for (j, line) in char_map.iter().enumerate() {
let mut last_crossroad = None;
let mut road_length = 0;
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if *c == '-' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
// Then we add the vertical roads.
for i in 0..width {
let mut last_crossroad = None;
let mut road_length = 0;
for j in 0..height {
let c = char_map[j][i];
if c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if c == '|' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
}
/// Returns the cloned graph.
pub fn clone_graph(&self) -> Graph {
self.graph.clone()
}
/// Returns random crossroad coordinates (of an existing crossroad).
pub fn random_crossroad(&self) -> CrossroadId {
let i = rand::thread_rng().gen_range(0, self.crossroads.len());
self.crossroads[i]
}
/// Removes unused roads, i.e. dead ends.
pub fn simplify(&mut self) {
println!("The network has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
// First, we identify nodes that have no escape.
let dead_ends: Vec<bool> = self.graph.nodes.iter().map(| n | {
n.edges().is_empty()
}).collect();
// Then, we mark all the roads that do not end in a dead end.
let used_roads: Vec<bool> = self.roads.iter().map(|r| {
!dead_ends[r.info().destination]
}).collect();
// We create a fresh network.
let mut network = Network::new(self.width, self.height);
// Then, we add all the interesting crossroads, i.e. that don't have 4 dead end nodes.
for &c in &self.crossroads {
let c = self.crossroad(c);
if c.nodes.iter()
.map(|id| { !dead_ends[*id] })
.fold(false, |x, y| { x || y }) {
network.add_crossroad(c.id.x, c.id.y);
}
}
// Finally, we add only the used edges.
for r in &self.roads {
let r = r.info();
if used_roads[r.id] {
network.new_road(r.start, r.end, r.side);
}
}
// We change the initial network.
*self = network;
println!("After simplification, it only has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
}
}
impl fmt::Display for Network {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
/// Returns an array of size `height` x `width`.
pub fn none_array<T>(height: usize, width: usize) -> Vec<Vec<Option<T>>> {
(0..height).map(|_| { (0..width).map(|_| { None }).collect()}).collect()
}
/// Computes the road direction and its node direction.
pub fn compute_directions(dx: i32, dy: i32, side: Side) -> (usize, usize) {
let d1 = match (dx, dy) {
(1, 0) => EAST,
(0, 1) => SOUTH,
(-1, 0) => WEST,
(0, -1) => NORTH,
_ => panic!("Invalid direction."),
};
let d2 = (d1 + (1-side) * 2) % 4;
(d1, d2)
}
/// Returns the previous (clockwise) direction.
pub fn previous_direction(d: usize) -> usize {
(d + 3) % 4
} | { '|' } | conditional_block |
network.rs | extern crate reactivers;
extern crate rand;
use reactivers::engine::process::*;
use reactivers::engine::signal::*;
use reactivers::engine::signal::spmc_signal::*;
use reactivers::engine::signal::mpsc_signal::*;
use std::sync::Arc;
use std::fs::File;
use std::io::prelude::*;
use self::rand::Rng;
use super::graph::*;
use super::car::*;
use super::road::*;
// These constant directions are used to index roads and nodes at a crossroad.
const NORTH: usize = 0;
const EAST: usize = 1;
const SOUTH: usize = 2;
const WEST: usize = 3;
pub type Side = usize;
const LEFT: usize = 0;
const RIGHT: usize = 1;
/// Global update network information.
pub struct GlobalInfo {
pub weights: EdgesWeight, // Last estimation of the edges weights.
pub moves: Vec<Move>, // Moves of all the cars.
}
/// Move of a car.
#[derive(Copy, Clone)]
pub enum Move {
NONE, // None happened.
SPAWN(RoadInfo, usize, CrossroadId), // The car has spawned at specified road, position,
// with specified destination crossroad.
STEP(i32), // The car performs a step of specified length.
VANISH, // The car vanished.
CROSS(RoadInfo), // The car crossed and is now on specified road.
}
/// Network structure containing all the information relative to crossroads and roads.
#[derive(Clone)]
pub struct Network {
pub width: usize, // Width of the network.
pub height: usize, // Height of the network.
pub car_count: usize, // Number of cars.
pub cars_per_unit: i32, // Number of cars between two centers of crossroads.
pub cars_per_crossroad: i32, // Number of cars fitting in a crossroad.
grid: Vec<Vec<Option<Crossroad>>>, // Grid containing the crossroads.
pub roads: Vec<Road>, // Vector containing the roads.
graph: Graph, // Corresponding abstract graph.
car_graph: Option<Arc<Graph>>, // Shared reference to the same graph.
pub crossroads: Vec<CrossroadId>, // Vector containing all the coordinates of existing
// crossroads.
}
/// Crossroad Coordinates.
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct | {
pub x: usize, // Abscissa
pub y: usize, // Ordinate
}
use std::ops::{ Index, IndexMut };
/// Allows indexing the grid by the crossroad coordinates.
impl Index<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
type Output = Option<Crossroad>;
#[inline]
fn index(&self, index: CrossroadId) -> &Option<Crossroad> {
&self[index.y][index.x]
}
}
/// Allows mutable indexing of the grid by the crossroad coordinates.
impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
#[inline]
fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> {
&mut self[index.y][index.x]
}
}
use std::fmt;
impl fmt::Display for CrossroadId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
use std::ops::Add;
/// Allows to add some move to crossroad coordinates.
impl Add<(i32, i32)> for CrossroadId {
type Output = CrossroadId;
fn add(self, (x, y): (i32, i32)) -> CrossroadId {
CrossroadId {
x: (self.x as i32 + x) as usize,
y: (self.y as i32 + y) as usize,
}
}
}
impl CrossroadId {
/// Creates new crossroad identifier.
pub fn new(x: usize, y: usize) -> CrossroadId {
CrossroadId { x, y }
}
/// Computes the unit move (dx, dy) and the length to join the destination crossroad.
pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) {
if self.x == dest.x {
let dy = (dest.y as i32) - (self.y as i32);
let len = i32::abs(dy);
(0, dy / len, len)
}
else if self.y == dest.y {
let dx = (dest.x as i32) - (self.x as i32);
let len = i32::abs(dx);
(dx / len, 0, len)
}
else {
panic!("Crossroads {} and {} are not linkable.", self, dest);
}
}
}
/// A Crossroad.
#[derive(Clone)]
pub struct Crossroad {
id: CrossroadId, // Coordinates
pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes.
// They are indexed by direction.
roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad.
// They are indexed by direction and side.
roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad.
// They are indexed by direction and side.
}
impl Crossroad {
/// Creates a new crossroad with four nodes without any roads.
pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad {
let mut c = Crossroad {
id,
nodes: vec!(),
roads: none_array(4, 2),
roads_arriving: none_array(4, 2),
};
for _ in 0..4 {
c.nodes.push(g.add_node(c.id));
}
c
}
/// Enables some roads. Only the cars from enabled roads are able to cross a crossroad.
fn enable_path(&self, roads: &mut Vec<Road>) {
// First policy: we enable the most loaded road with some guy waiting.
// let mut max = -1;
// let mut r_max = 0;
// for r in self.existing_roads_arriving() {
// if roads[r].is_waiting() && roads[r].get_car_count() > max {
// r_max = r;
// max = roads[r].get_car_count();
// }
// }
// roads[r_max].enable();
// Second policy: we enable the most loaded roads with guys waiting, but in pairs.
// We compute the pair of compatible roads with the maximum cumulated load.
let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT));
let mut max_load = 0;
for d in 0..4 {
for s in 0..2 {
for x in 0..2 {
let (d2, s2) = {
if x == 0 {
(d, 1 - s)
}
else {
((d + 2) % 4, s)
}
};
let load = self.compute_load(d, s, roads) +
self.compute_load(d2, s2, roads);
if load > max_load {
max_load = load;
max_pair = ((d, s), (d2, s2));
}
}
}
}
let ((d1, s1), (d2, s2)) = max_pair;
if self.roads_arriving[d1][s1].is_some() {
roads[self.roads_arriving[d1][s1].unwrap()].enable();
}
if self.roads_arriving[d2][s2].is_some() {
roads[self.roads_arriving[d2][s2].unwrap()].enable();
}
}
/// Computes the load of a road, i.e. the numbers of cars on this road.
/// If there is no car ready to cross, returns 0.
fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 {
let r = self.roads_arriving[direction][side];
if r.is_none() || !roads[r.unwrap()].is_waiting() {
return 0;
}
return roads[r.unwrap()].get_car_count();
}
}
impl Network {
/// Creates a new empty Network, with specified width and heights.
pub fn new(width: usize, height: usize) -> Network {
Network {
width,
height,
car_count: 0,
cars_per_unit: 10,
cars_per_crossroad: 4,
grid: none_array(height, width),
roads: vec!(),
graph: Graph::new(),
car_graph: None,
crossroads: vec!(),
}
}
/// Adds a crossroad to specified location.
pub fn add_crossroad(&mut self, x: usize, y: usize) {
let c = CrossroadId::new(x, y);
// We check the crossroad does not exist.
self.assert_crossroad_not_exists(c);
// We add it to the graph and update the network.
self.grid[c] = Some(Crossroad::new(c, &mut self.graph));
self.crossroads.push(c);
}
/// Adds a new specific road.
pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side){
// We get the parameters of the road.
let (dx, dy, length) = src.join(dest);
let length = length * self.cars_per_unit - self.cars_per_crossroad;
let (d1, d2) = compute_directions(dx, dy, side);
let id = self.roads.len();
// First, it builds the road in the network.
let road_info = RoadInfo {
id,
start: src,
end: dest,
side,
destination: self.crossroad(dest).nodes[d2],
length: length as usize,
};
// Then, we add it to the crossroads and the roads.
let road = Road::new(road_info);
self.roads.push(road);
self.crossroad_mut(src).roads[d1][side] = Some(id);
self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id);
// Then, it builds the two corresponding edges in the graph.
let (n1, n2) = {
let c = self.crossroad(src);
(c.nodes[d1], c.nodes[previous_direction(d1)])
};
let n3 = self.crossroad(dest).nodes[d2];
self.graph.add_edge(n1, n3, id);
self.graph.add_edge(n2, n3, id);
}
/// Add the two road linking the first crossroad to the second one.
pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) {
let (src, dest) =
(CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y));
// Checks the source and destination crossroads exist.
self.assert_crossroad_exists(src);
self.assert_crossroad_exists(dest);
// Checks that they are aligned.
let (dx, dy, length) = src.join(dest);
// Checks that the road can be built between the two crossroads, i.e. that it does not
// generate any collision.
for k in 1..length {
self.assert_crossroad_not_exists(src + (k*dx, k*dy));
}
// Creates both roads.
self.new_road(src, dest, LEFT);
self.new_road(src, dest, RIGHT);
}
/// Adds all roads between the crossroads `c1` and `c2`.
pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) {
self.add_road(c1, c2);
self.add_road(c2, c1);
}
/// Panics if the crossroad exists.
pub fn assert_crossroad_exists(&self, c: CrossroadId) {
if self.grid[c].is_none() {
panic!("This crossroad {} does not exist.", c);
}
}
/// Panics if the crossroad does not exist.
pub fn assert_crossroad_not_exists(&self, c: CrossroadId) {
if self.grid[c].is_some() {
panic!("This crossroad {} already exists.", c);
}
}
/// Retrieves the specified crossroad. Panics if it does not exist.
pub fn crossroad(&self, c: CrossroadId) -> &Crossroad {
self.grid[c].as_ref().unwrap()
}
/// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist.
pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad {
self.grid[c].as_mut().unwrap()
}
/// Creates a new car. It transfers the current graph to the car, with a fresh identifier.
pub fn create_car(&mut self) -> Car {
if self.car_graph.is_none() {
// If needed, we generate this shared reference.
self.car_graph = Some(Arc::new(self.clone_graph()));
}
let id = self.car_count;
self.car_count += 1;
Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap())
}
/// Spawns a car on a random road, and finds a random destination.
pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) {
// First, it finds a road to spawn the car.
let mut rng = rand::thread_rng();
let mut road_id = rng.gen_range(0, self.roads.len());
let mut pos = self.roads[road_id].spawn_car(id);
while pos == -1 {
road_id = rng.gen_range(0, self.roads.len());
pos = self.roads[road_id].spawn_car(id);
}
// Then, it gets the crossroad at the end of this road.
let road_info = self.roads[road_id].info();
let source_c = road_info.end;
// It randomly chooses a crossroad different from the previous crossroad.
let mut destination = self.random_crossroad();
while destination == source_c {
destination = self.random_crossroad();
}
// Returns the final spawn position and destination.
(road_info, pos as usize, destination)
}
/// Spawns all the car that requested to be. Updates the move vector with the resulting spawns.
pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) {
for (i, a) in actions.iter().enumerate() {
if let Action::SPAWN = *a {
let (road_info, pos, destination) = self.generate_request(i);
moves[i] = Move::SPAWN(road_info, pos, destination);
}
}
}
/// Makes the crossroads enable some roads.
pub fn enable_paths(&mut self) {
for &c in &self.crossroads {
self.grid[c].as_ref().unwrap().enable_path(&mut self.roads);
}
}
/// Performs an update step on all roads, based on the Actions and Speeds vector.
/// Updates the resulting Moves vector, and returns the EdgesWeight estimation.
pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>)
-> EdgesWeight
{
let roads = &mut self.roads;
// All the possibles enabled paths are tried.
for i in 0..roads.len() {
// Each roads tries to make its first car cross, if enabled.
Road::deliver(i, actions, moves, roads);
}
// We make a step for all remaining cars, and get the weights estimations.
let mut weights = vec!();
for i in 0..roads.len() {
weights.push(roads[i].step_forward(moves, speeds));
}
let edges_weight = EdgesWeight::new(weights);
return edges_weight
}
/// Returns the central reactive process of the network.
pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>,
pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>)
-> impl Process<Value=()> {
let mut weights = vec!();
for r in &self.roads {
weights.push(r.weight());
}
let mut step = 0;
let mut mean_moves = self.car_count as f32;
let beta = 0.99;
let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | {
// We count the steps.
step += 1;
// We enable some path.
self.enable_paths();
// We compute the road step and get back some weights.
let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect();
let weights = self.roads_step(&mut actions, &mut moves, &speeds);
// We spawn the cars that requested to be.
self.spawn_cars(actions, &mut moves);
// We count the number of cars that did something.
let nb_moves: i32 = moves.iter().map(| m | { match m {
&Move::NONE => 0,
_ => 1,
}}).sum();
// We keep some moving mean of this number. If it is too low, nothing is happening, so
// it panics.
mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32);
if mean_moves < 1e-3 {
panic!("It looks like a stationary state: not enough moves.");
}
// Returns the updated information about the step.
Arc::new(GlobalInfo { weights, moves })
};
let p =
pos_signal.await_in() // Awaits the car actions
.map(cont) // Computes the resulting moves and weights.
.emit_consume(central_signal) // Emits this information.
.loop_inf(); // Loops.
return p;
}
/// Returns a String representing the network.
pub fn to_string(&self) -> String {
// We first build the corresponding char two-dimensional vector.
let (width, height) = (2 * self.width - 1, 2 * self.height - 1);
let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| { ' ' }).collect()}).collect();
// Then we add the crossroads.
for c in &self.crossroads {
char_map[2 * c.y][2 * c.x] = 'C';
}
// Then we add the roads.
for r in &self.roads {
let start = r.info().start;
let (dx, dy, length) = start.join(r.info().end);
// Chooses the right symbol.
let c = if dx == 0 { '|' } else { '-' };
let (x, y) = (2*start.x, 2*start.y);
for k in 1..(2*length) {
char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c;
}
}
// We collect the characters into a string.
char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect()
}
/// Loads a network from a file located in trafficsim/maps/.
pub fn load_file(&mut self, filename: &str) {
let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found");
let mut contents = String::new();
f.read_to_string(&mut contents)
.expect("Something went wrong reading the file");
self.load_string(&contents);
}
/// Loads a network from a string.
pub fn load_string(&mut self, s: &str) {
// We remove ending blank lines.
let s = s.trim_right();
// We split lines and remove ending spaces and `\n`.
let mut char_map: Vec<Vec<char>> =
s.split("\n")
.map(| line | { line.trim_right().chars().collect() })
.collect();
// We compute the resulting width and height of the character array.
let width = char_map.iter().map(| line | { line.len() }).max().unwrap();
let height = char_map.len();
// We add missing spaces.
for line in char_map.iter_mut() {
for _ in 0..(width - line.len()) {
line.push(' ');
}
}
// We change the network size.
*self = Network::new((width + 1) / 2, (height + 1) / 2);
// Then, we add all the crossroads.
for (j, line) in char_map.iter().enumerate() {
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
self.add_crossroad(i / 2, j / 2);
}
}
}
// Then we add the horizontal roads.
for (j, line) in char_map.iter().enumerate() {
let mut last_crossroad = None;
let mut road_length = 0;
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if *c == '-' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
// Then we add the vertical roads.
for i in 0..width {
let mut last_crossroad = None;
let mut road_length = 0;
for j in 0..height {
let c = char_map[j][i];
if c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if c == '|' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
}
/// Returns the cloned graph.
pub fn clone_graph(&self) -> Graph {
self.graph.clone()
}
/// Returns random crossroad coordinates (of an existing crossroad).
pub fn random_crossroad(&self) -> CrossroadId {
let i = rand::thread_rng().gen_range(0, self.crossroads.len());
self.crossroads[i]
}
/// Removes unused roads, i.e. dead ends.
pub fn simplify(&mut self) {
println!("The network has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
// First, we identify nodes that have no escape.
let dead_ends: Vec<bool> = self.graph.nodes.iter().map(| n | {
n.edges().is_empty()
}).collect();
// Then, we mark all the roads that do not end in a dead end.
let used_roads: Vec<bool> = self.roads.iter().map(|r| {
!dead_ends[r.info().destination]
}).collect();
// We create a fresh network.
let mut network = Network::new(self.width, self.height);
// Then, we add all the interesting crossroads, i.e. that don't have 4 dead end nodes.
for &c in &self.crossroads {
let c = self.crossroad(c);
if c.nodes.iter()
.map(|id| { !dead_ends[*id] })
.fold(false, |x, y| { x || y }) {
network.add_crossroad(c.id.x, c.id.y);
}
}
// Finally, we add only the used edges.
for r in &self.roads {
let r = r.info();
if used_roads[r.id] {
network.new_road(r.start, r.end, r.side);
}
}
// We change the initial network.
*self = network;
println!("After simplification, it only has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
}
}
impl fmt::Display for Network {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
/// Returns an array of size `height` x `width`.
pub fn none_array<T>(height: usize, width: usize) -> Vec<Vec<Option<T>>> {
(0..height).map(|_| { (0..width).map(|_| { None }).collect()}).collect()
}
/// Computes the road direction and its node direction.
pub fn compute_directions(dx: i32, dy: i32, side: Side) -> (usize, usize) {
let d1 = match (dx, dy) {
(1, 0) => EAST,
(0, 1) => SOUTH,
(-1, 0) => WEST,
(0, -1) => NORTH,
_ => panic!("Invalid direction."),
};
let d2 = (d1 + (1-side) * 2) % 4;
(d1, d2)
}
/// Returns the previous (clockwise) direction.
pub fn previous_direction(d: usize) -> usize {
(d + 3) % 4
} | CrossroadId | identifier_name |
network.rs | extern crate reactivers;
extern crate rand;
use reactivers::engine::process::*;
use reactivers::engine::signal::*;
use reactivers::engine::signal::spmc_signal::*;
use reactivers::engine::signal::mpsc_signal::*;
use std::sync::Arc;
use std::fs::File;
use std::io::prelude::*;
use self::rand::Rng;
use super::graph::*;
use super::car::*;
use super::road::*;
// These constant directions are used to index roads and nodes at a crossroad.
const NORTH: usize = 0;
const EAST: usize = 1;
const SOUTH: usize = 2;
const WEST: usize = 3;
pub type Side = usize;
const LEFT: usize = 0;
const RIGHT: usize = 1;
/// Global update network information.
pub struct GlobalInfo {
pub weights: EdgesWeight, // Last estimation of the edges weights.
pub moves: Vec<Move>, // Moves of all the cars.
}
/// Move of a car.
#[derive(Copy, Clone)] | // with specified destination crossroad.
STEP(i32), // The car performs a step of specified length.
VANISH, // The car vanished.
CROSS(RoadInfo), // The car crossed and is now on specified road.
}
/// Network structure containing all the information relative to crossroads and roads.
#[derive(Clone)]
pub struct Network {
pub width: usize, // Width of the network.
pub height: usize, // Height of the network.
pub car_count: usize, // Number of cars.
pub cars_per_unit: i32, // Number of cars between two centers of crossroads.
pub cars_per_crossroad: i32, // Number of cars fitting in a crossroad.
grid: Vec<Vec<Option<Crossroad>>>, // Grid containing the crossroads.
pub roads: Vec<Road>, // Vector containing the roads.
graph: Graph, // Corresponding abstract graph.
car_graph: Option<Arc<Graph>>, // Shared reference to the same graph.
pub crossroads: Vec<CrossroadId>, // Vector containing all the coordinates of existing
// crossroads.
}
/// Crossroad Coordinates.
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct CrossroadId {
pub x: usize, // Abscissa
pub y: usize, // Ordinate
}
use std::ops::{ Index, IndexMut };
/// Allows indexing the grid by the crossroad coordinates.
impl Index<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
type Output = Option<Crossroad>;
#[inline]
fn index(&self, index: CrossroadId) -> &Option<Crossroad> {
&self[index.y][index.x]
}
}
/// Allows mutable indexing of the grid by the crossroad coordinates.
impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
#[inline]
fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> {
&mut self[index.y][index.x]
}
}
use std::fmt;
impl fmt::Display for CrossroadId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
use std::ops::Add;
/// Allows to add some move to crossroad coordinates.
impl Add<(i32, i32)> for CrossroadId {
type Output = CrossroadId;
fn add(self, (x, y): (i32, i32)) -> CrossroadId {
CrossroadId {
x: (self.x as i32 + x) as usize,
y: (self.y as i32 + y) as usize,
}
}
}
impl CrossroadId {
/// Creates new crossroad identifier.
pub fn new(x: usize, y: usize) -> CrossroadId {
CrossroadId { x, y }
}
/// Computes the unit move (dx, dy) and the length to join the destination crossroad.
pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) {
if self.x == dest.x {
let dy = (dest.y as i32) - (self.y as i32);
let len = i32::abs(dy);
(0, dy / len, len)
}
else if self.y == dest.y {
let dx = (dest.x as i32) - (self.x as i32);
let len = i32::abs(dx);
(dx / len, 0, len)
}
else {
panic!("Crossroads {} and {} are not linkable.", self, dest);
}
}
}
/// A Crossroad.
#[derive(Clone)]
pub struct Crossroad {
id: CrossroadId, // Coordinates
pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes.
// They are indexed by direction.
roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad.
// They are indexed by direction and side.
roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad.
// They are indexed by direction and side.
}
impl Crossroad {
/// Creates a new crossroad with four nodes without any roads.
pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad {
let mut c = Crossroad {
id,
nodes: vec!(),
roads: none_array(4, 2),
roads_arriving: none_array(4, 2),
};
for _ in 0..4 {
c.nodes.push(g.add_node(c.id));
}
c
}
/// Enables some roads. Only the cars from enabled roads are able to cross a crossroad.
fn enable_path(&self, roads: &mut Vec<Road>) {
// First policy: we enable the most loaded road with some guy waiting.
// let mut max = -1;
// let mut r_max = 0;
// for r in self.existing_roads_arriving() {
// if roads[r].is_waiting() && roads[r].get_car_count() > max {
// r_max = r;
// max = roads[r].get_car_count();
// }
// }
// roads[r_max].enable();
// Second policy: we enable the most loaded roads with guys waiting, but in pairs.
// We compute the pair of compatible roads with the maximum cumulated load.
let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT));
let mut max_load = 0;
for d in 0..4 {
for s in 0..2 {
for x in 0..2 {
let (d2, s2) = {
if x == 0 {
(d, 1 - s)
}
else {
((d + 2) % 4, s)
}
};
let load = self.compute_load(d, s, roads) +
self.compute_load(d2, s2, roads);
if load > max_load {
max_load = load;
max_pair = ((d, s), (d2, s2));
}
}
}
}
let ((d1, s1), (d2, s2)) = max_pair;
if self.roads_arriving[d1][s1].is_some() {
roads[self.roads_arriving[d1][s1].unwrap()].enable();
}
if self.roads_arriving[d2][s2].is_some() {
roads[self.roads_arriving[d2][s2].unwrap()].enable();
}
}
/// Computes the load of a road, i.e. the numbers of cars on this road.
/// If there is no car ready to cross, returns 0.
fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 {
let r = self.roads_arriving[direction][side];
if r.is_none() || !roads[r.unwrap()].is_waiting() {
return 0;
}
return roads[r.unwrap()].get_car_count();
}
}
impl Network {
/// Creates a new empty Network, with specified width and heights.
pub fn new(width: usize, height: usize) -> Network {
Network {
width,
height,
car_count: 0,
cars_per_unit: 10,
cars_per_crossroad: 4,
grid: none_array(height, width),
roads: vec!(),
graph: Graph::new(),
car_graph: None,
crossroads: vec!(),
}
}
/// Adds a crossroad to specified location.
pub fn add_crossroad(&mut self, x: usize, y: usize) {
let c = CrossroadId::new(x, y);
// We check the crossroad does not exist.
self.assert_crossroad_not_exists(c);
// We add it to the graph and update the network.
self.grid[c] = Some(Crossroad::new(c, &mut self.graph));
self.crossroads.push(c);
}
/// Adds a new specific road.
pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side){
// We get the parameters of the road.
let (dx, dy, length) = src.join(dest);
let length = length * self.cars_per_unit - self.cars_per_crossroad;
let (d1, d2) = compute_directions(dx, dy, side);
let id = self.roads.len();
// First, it builds the road in the network.
let road_info = RoadInfo {
id,
start: src,
end: dest,
side,
destination: self.crossroad(dest).nodes[d2],
length: length as usize,
};
// Then, we add it to the crossroads and the roads.
let road = Road::new(road_info);
self.roads.push(road);
self.crossroad_mut(src).roads[d1][side] = Some(id);
self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id);
// Then, it builds the two corresponding edges in the graph.
let (n1, n2) = {
let c = self.crossroad(src);
(c.nodes[d1], c.nodes[previous_direction(d1)])
};
let n3 = self.crossroad(dest).nodes[d2];
self.graph.add_edge(n1, n3, id);
self.graph.add_edge(n2, n3, id);
}
/// Add the two road linking the first crossroad to the second one.
pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) {
let (src, dest) =
(CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y));
// Checks the source and destination crossroads exist.
self.assert_crossroad_exists(src);
self.assert_crossroad_exists(dest);
// Checks that they are aligned.
let (dx, dy, length) = src.join(dest);
// Checks that the road can be built between the two crossroads, i.e. that it does not
// generate any collision.
for k in 1..length {
self.assert_crossroad_not_exists(src + (k*dx, k*dy));
}
// Creates both roads.
self.new_road(src, dest, LEFT);
self.new_road(src, dest, RIGHT);
}
/// Adds all roads between the crossroads `c1` and `c2`.
pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) {
self.add_road(c1, c2);
self.add_road(c2, c1);
}
/// Panics if the crossroad exists.
pub fn assert_crossroad_exists(&self, c: CrossroadId) {
if self.grid[c].is_none() {
panic!("This crossroad {} does not exist.", c);
}
}
/// Panics if the crossroad does not exist.
pub fn assert_crossroad_not_exists(&self, c: CrossroadId) {
if self.grid[c].is_some() {
panic!("This crossroad {} already exists.", c);
}
}
/// Retrieves the specified crossroad. Panics if it does not exist.
pub fn crossroad(&self, c: CrossroadId) -> &Crossroad {
self.grid[c].as_ref().unwrap()
}
/// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist.
pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad {
self.grid[c].as_mut().unwrap()
}
/// Creates a new car. It transfers the current graph to the car, with a fresh identifier.
pub fn create_car(&mut self) -> Car {
if self.car_graph.is_none() {
// If needed, we generate this shared reference.
self.car_graph = Some(Arc::new(self.clone_graph()));
}
let id = self.car_count;
self.car_count += 1;
Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap())
}
/// Spawns a car on a random road, and finds a random destination.
pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) {
// First, it finds a road to spawn the car.
let mut rng = rand::thread_rng();
let mut road_id = rng.gen_range(0, self.roads.len());
let mut pos = self.roads[road_id].spawn_car(id);
while pos == -1 {
road_id = rng.gen_range(0, self.roads.len());
pos = self.roads[road_id].spawn_car(id);
}
// Then, it gets the crossroad at the end of this road.
let road_info = self.roads[road_id].info();
let source_c = road_info.end;
// It randomly chooses a crossroad different from the previous crossroad.
let mut destination = self.random_crossroad();
while destination == source_c {
destination = self.random_crossroad();
}
// Returns the final spawn position and destination.
(road_info, pos as usize, destination)
}
/// Spawns all the car that requested to be. Updates the move vector with the resulting spawns.
pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) {
for (i, a) in actions.iter().enumerate() {
if let Action::SPAWN = *a {
let (road_info, pos, destination) = self.generate_request(i);
moves[i] = Move::SPAWN(road_info, pos, destination);
}
}
}
/// Makes the crossroads enable some roads.
pub fn enable_paths(&mut self) {
for &c in &self.crossroads {
self.grid[c].as_ref().unwrap().enable_path(&mut self.roads);
}
}
/// Performs an update step on all roads, based on the Actions and Speeds vector.
/// Updates the resulting Moves vector, and returns the EdgesWeight estimation.
pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>)
-> EdgesWeight
{
let roads = &mut self.roads;
// All the possibles enabled paths are tried.
for i in 0..roads.len() {
// Each roads tries to make its first car cross, if enabled.
Road::deliver(i, actions, moves, roads);
}
// We make a step for all remaining cars, and get the weights estimations.
let mut weights = vec!();
for i in 0..roads.len() {
weights.push(roads[i].step_forward(moves, speeds));
}
let edges_weight = EdgesWeight::new(weights);
return edges_weight
}
/// Returns the central reactive process of the network.
pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>,
pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>)
-> impl Process<Value=()> {
let mut weights = vec!();
for r in &self.roads {
weights.push(r.weight());
}
let mut step = 0;
let mut mean_moves = self.car_count as f32;
let beta = 0.99;
let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | {
// We count the steps.
step += 1;
// We enable some path.
self.enable_paths();
// We compute the road step and get back some weights.
let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect();
let weights = self.roads_step(&mut actions, &mut moves, &speeds);
// We spawn the cars that requested to be.
self.spawn_cars(actions, &mut moves);
// We count the number of cars that did something.
let nb_moves: i32 = moves.iter().map(| m | { match m {
&Move::NONE => 0,
_ => 1,
}}).sum();
// We keep some moving mean of this number. If it is too low, nothing is happening, so
// it panics.
mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32);
if mean_moves < 1e-3 {
panic!("It looks like a stationary state: not enough moves.");
}
// Returns the updated information about the step.
Arc::new(GlobalInfo { weights, moves })
};
let p =
pos_signal.await_in() // Awaits the car actions
.map(cont) // Computes the resulting moves and weights.
.emit_consume(central_signal) // Emits this information.
.loop_inf(); // Loops.
return p;
}
/// Returns a String representing the network.
pub fn to_string(&self) -> String {
// We first build the corresponding char two-dimensional vector.
let (width, height) = (2 * self.width - 1, 2 * self.height - 1);
let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| { ' ' }).collect()}).collect();
// Then we add the crossroads.
for c in &self.crossroads {
char_map[2 * c.y][2 * c.x] = 'C';
}
// Then we add the roads.
for r in &self.roads {
let start = r.info().start;
let (dx, dy, length) = start.join(r.info().end);
// Chooses the right symbol.
let c = if dx == 0 { '|' } else { '-' };
let (x, y) = (2*start.x, 2*start.y);
for k in 1..(2*length) {
char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c;
}
}
// We collect the characters into a string.
char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect()
}
/// Loads a network from a file located in trafficsim/maps/.
pub fn load_file(&mut self, filename: &str) {
let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found");
let mut contents = String::new();
f.read_to_string(&mut contents)
.expect("Something went wrong reading the file");
self.load_string(&contents);
}
/// Loads a network from a string.
pub fn load_string(&mut self, s: &str) {
// We remove ending blank lines.
let s = s.trim_right();
// We split lines and remove ending spaces and `\n`.
let mut char_map: Vec<Vec<char>> =
s.split("\n")
.map(| line | { line.trim_right().chars().collect() })
.collect();
// We compute the resulting width and height of the character array.
let width = char_map.iter().map(| line | { line.len() }).max().unwrap();
let height = char_map.len();
// We add missing spaces.
for line in char_map.iter_mut() {
for _ in 0..(width - line.len()) {
line.push(' ');
}
}
// We change the network size.
*self = Network::new((width + 1) / 2, (height + 1) / 2);
// Then, we add all the crossroads.
for (j, line) in char_map.iter().enumerate() {
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
self.add_crossroad(i / 2, j / 2);
}
}
}
// Then we add the horizontal roads.
for (j, line) in char_map.iter().enumerate() {
let mut last_crossroad = None;
let mut road_length = 0;
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if *c == '-' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
// Then we add the vertical roads.
for i in 0..width {
let mut last_crossroad = None;
let mut road_length = 0;
for j in 0..height {
let c = char_map[j][i];
if c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if c == '|' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
}
/// Returns the cloned graph.
pub fn clone_graph(&self) -> Graph {
self.graph.clone()
}
/// Returns random crossroad coordinates (of an existing crossroad).
pub fn random_crossroad(&self) -> CrossroadId {
let i = rand::thread_rng().gen_range(0, self.crossroads.len());
self.crossroads[i]
}
/// Removes unused roads, i.e. dead ends.
pub fn simplify(&mut self) {
println!("The network has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
// First, we identify nodes that have no escape.
let dead_ends: Vec<bool> = self.graph.nodes.iter().map(| n | {
n.edges().is_empty()
}).collect();
// Then, we mark all the roads that do not end in a dead end.
let used_roads: Vec<bool> = self.roads.iter().map(|r| {
!dead_ends[r.info().destination]
}).collect();
// We create a fresh network.
let mut network = Network::new(self.width, self.height);
// Then, we add all the interesting crossroads, i.e. that don't have 4 dead end nodes.
for &c in &self.crossroads {
let c = self.crossroad(c);
if c.nodes.iter()
.map(|id| { !dead_ends[*id] })
.fold(false, |x, y| { x || y }) {
network.add_crossroad(c.id.x, c.id.y);
}
}
// Finally, we add only the used edges.
for r in &self.roads {
let r = r.info();
if used_roads[r.id] {
network.new_road(r.start, r.end, r.side);
}
}
// We change the initial network.
*self = network;
println!("After simplification, it only has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
}
}
impl fmt::Display for Network {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
/// Returns an array of size `height` x `width`.
pub fn none_array<T>(height: usize, width: usize) -> Vec<Vec<Option<T>>> {
(0..height).map(|_| { (0..width).map(|_| { None }).collect()}).collect()
}
/// Computes the road direction and its node direction.
pub fn compute_directions(dx: i32, dy: i32, side: Side) -> (usize, usize) {
let d1 = match (dx, dy) {
(1, 0) => EAST,
(0, 1) => SOUTH,
(-1, 0) => WEST,
(0, -1) => NORTH,
_ => panic!("Invalid direction."),
};
let d2 = (d1 + (1-side) * 2) % 4;
(d1, d2)
}
/// Returns the previous (clockwise) direction.
pub fn previous_direction(d: usize) -> usize {
(d + 3) % 4
} | pub enum Move {
NONE, // None happened.
SPAWN(RoadInfo, usize, CrossroadId), // The car has spawned at specified road, position, | random_line_split |
network.rs | extern crate reactivers;
extern crate rand;
use reactivers::engine::process::*;
use reactivers::engine::signal::*;
use reactivers::engine::signal::spmc_signal::*;
use reactivers::engine::signal::mpsc_signal::*;
use std::sync::Arc;
use std::fs::File;
use std::io::prelude::*;
use self::rand::Rng;
use super::graph::*;
use super::car::*;
use super::road::*;
// These constant directions are used to index roads and nodes at a crossroad.
const NORTH: usize = 0;
const EAST: usize = 1;
const SOUTH: usize = 2;
const WEST: usize = 3;
pub type Side = usize;
const LEFT: usize = 0;
const RIGHT: usize = 1;
/// Global update network information.
pub struct GlobalInfo {
pub weights: EdgesWeight, // Last estimation of the edges weights.
pub moves: Vec<Move>, // Moves of all the cars.
}
/// Move of a car.
#[derive(Copy, Clone)]
pub enum Move {
NONE, // None happened.
SPAWN(RoadInfo, usize, CrossroadId), // The car has spawned at specified road, position,
// with specified destination crossroad.
STEP(i32), // The car performs a step of specified length.
VANISH, // The car vanished.
CROSS(RoadInfo), // The car crossed and is now on specified road.
}
/// Network structure containing all the information relative to crossroads and roads.
#[derive(Clone)]
pub struct Network {
pub width: usize, // Width of the network.
pub height: usize, // Height of the network.
pub car_count: usize, // Number of cars.
pub cars_per_unit: i32, // Number of cars between two centers of crossroads.
pub cars_per_crossroad: i32, // Number of cars fitting in a crossroad.
grid: Vec<Vec<Option<Crossroad>>>, // Grid containing the crossroads.
pub roads: Vec<Road>, // Vector containing the roads.
graph: Graph, // Corresponding abstract graph.
car_graph: Option<Arc<Graph>>, // Shared reference to the same graph.
pub crossroads: Vec<CrossroadId>, // Vector containing all the coordinates of existing
// crossroads.
}
/// Crossroad Coordinates.
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct CrossroadId {
pub x: usize, // Abscissa
pub y: usize, // Ordinate
}
use std::ops::{ Index, IndexMut };
/// Allows indexing the grid by the crossroad coordinates.
impl Index<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
type Output = Option<Crossroad>;
#[inline]
fn index(&self, index: CrossroadId) -> &Option<Crossroad> {
&self[index.y][index.x]
}
}
/// Allows mutable indexing of the grid by the crossroad coordinates.
impl IndexMut<CrossroadId> for Vec<Vec<Option<Crossroad>>> {
#[inline]
fn index_mut(&mut self, index: CrossroadId) -> &mut Option<Crossroad> {
&mut self[index.y][index.x]
}
}
use std::fmt;
impl fmt::Display for CrossroadId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
use std::ops::Add;
/// Allows to add some move to crossroad coordinates.
impl Add<(i32, i32)> for CrossroadId {
type Output = CrossroadId;
fn add(self, (x, y): (i32, i32)) -> CrossroadId {
CrossroadId {
x: (self.x as i32 + x) as usize,
y: (self.y as i32 + y) as usize,
}
}
}
impl CrossroadId {
/// Creates new crossroad identifier.
pub fn new(x: usize, y: usize) -> CrossroadId {
CrossroadId { x, y }
}
/// Computes the unit move (dx, dy) and the length to join the destination crossroad.
pub fn join(&self, dest: CrossroadId) -> (i32, i32, i32) {
if self.x == dest.x {
let dy = (dest.y as i32) - (self.y as i32);
let len = i32::abs(dy);
(0, dy / len, len)
}
else if self.y == dest.y {
let dx = (dest.x as i32) - (self.x as i32);
let len = i32::abs(dx);
(dx / len, 0, len)
}
else {
panic!("Crossroads {} and {} are not linkable.", self, dest);
}
}
}
/// A Crossroad.
#[derive(Clone)]
pub struct Crossroad {
id: CrossroadId, // Coordinates
pub nodes: Vec<NodeId>, // Vector of its 4 quarter nodes.
// They are indexed by direction.
roads: Vec<Vec<Option<RoadId>>>, // Roads leaving this crossroad.
// They are indexed by direction and side.
roads_arriving: Vec<Vec<Option<RoadId>>>, // Roads arriving at this crossroad.
// They are indexed by direction and side.
}
impl Crossroad {
/// Creates a new crossroad with four nodes without any roads.
pub fn new(id: CrossroadId, g: &mut Graph) -> Crossroad {
let mut c = Crossroad {
id,
nodes: vec!(),
roads: none_array(4, 2),
roads_arriving: none_array(4, 2),
};
for _ in 0..4 {
c.nodes.push(g.add_node(c.id));
}
c
}
/// Enables some roads. Only the cars from enabled roads are able to cross a crossroad.
fn enable_path(&self, roads: &mut Vec<Road>) {
// First policy: we enable the most loaded road with some guy waiting.
// let mut max = -1;
// let mut r_max = 0;
// for r in self.existing_roads_arriving() {
// if roads[r].is_waiting() && roads[r].get_car_count() > max {
// r_max = r;
// max = roads[r].get_car_count();
// }
// }
// roads[r_max].enable();
// Second policy: we enable the most loaded roads with guys waiting, but in pairs.
// We compute the pair of compatible roads with the maximum cumulated load.
let mut max_pair = ((NORTH, LEFT), (NORTH, LEFT));
let mut max_load = 0;
for d in 0..4 {
for s in 0..2 {
for x in 0..2 {
let (d2, s2) = {
if x == 0 {
(d, 1 - s)
}
else {
((d + 2) % 4, s)
}
};
let load = self.compute_load(d, s, roads) +
self.compute_load(d2, s2, roads);
if load > max_load {
max_load = load;
max_pair = ((d, s), (d2, s2));
}
}
}
}
let ((d1, s1), (d2, s2)) = max_pair;
if self.roads_arriving[d1][s1].is_some() {
roads[self.roads_arriving[d1][s1].unwrap()].enable();
}
if self.roads_arriving[d2][s2].is_some() {
roads[self.roads_arriving[d2][s2].unwrap()].enable();
}
}
/// Computes the load of a road, i.e. the numbers of cars on this road.
/// If there is no car ready to cross, returns 0.
fn compute_load(&self, direction: usize, side: usize, roads: &mut Vec<Road>) -> i32 {
let r = self.roads_arriving[direction][side];
if r.is_none() || !roads[r.unwrap()].is_waiting() {
return 0;
}
return roads[r.unwrap()].get_car_count();
}
}
impl Network {
/// Creates a new empty Network, with specified width and heights.
pub fn new(width: usize, height: usize) -> Network {
Network {
width,
height,
car_count: 0,
cars_per_unit: 10,
cars_per_crossroad: 4,
grid: none_array(height, width),
roads: vec!(),
graph: Graph::new(),
car_graph: None,
crossroads: vec!(),
}
}
/// Adds a crossroad to specified location.
pub fn add_crossroad(&mut self, x: usize, y: usize) {
let c = CrossroadId::new(x, y);
// We check the crossroad does not exist.
self.assert_crossroad_not_exists(c);
// We add it to the graph and update the network.
self.grid[c] = Some(Crossroad::new(c, &mut self.graph));
self.crossroads.push(c);
}
/// Adds a new specific road.
pub fn new_road(&mut self, src: CrossroadId, dest: CrossroadId, side: Side) |
/// Add the two road linking the first crossroad to the second one.
pub fn add_road(&mut self, (src_x, src_y): (usize, usize), (dest_x, dest_y): (usize, usize)) {
let (src, dest) =
(CrossroadId::new(src_x, src_y), CrossroadId::new(dest_x, dest_y));
// Checks the source and destination crossroads exist.
self.assert_crossroad_exists(src);
self.assert_crossroad_exists(dest);
// Checks that they are aligned.
let (dx, dy, length) = src.join(dest);
// Checks that the road can be built between the two crossroads, i.e. that it does not
// generate any collision.
for k in 1..length {
self.assert_crossroad_not_exists(src + (k*dx, k*dy));
}
// Creates both roads.
self.new_road(src, dest, LEFT);
self.new_road(src, dest, RIGHT);
}
/// Adds all roads between the crossroads `c1` and `c2`.
pub fn add_all_roads(&mut self, c1: (usize, usize), c2: (usize, usize)) {
self.add_road(c1, c2);
self.add_road(c2, c1);
}
/// Panics if the crossroad exists.
pub fn assert_crossroad_exists(&self, c: CrossroadId) {
if self.grid[c].is_none() {
panic!("This crossroad {} does not exist.", c);
}
}
/// Panics if the crossroad does not exist.
pub fn assert_crossroad_not_exists(&self, c: CrossroadId) {
if self.grid[c].is_some() {
panic!("This crossroad {} already exists.", c);
}
}
/// Retrieves the specified crossroad. Panics if it does not exist.
pub fn crossroad(&self, c: CrossroadId) -> &Crossroad {
self.grid[c].as_ref().unwrap()
}
/// Retrieves a mutable reference to the specified crossroad. Panics if it does not exist.
pub fn crossroad_mut(&mut self, c: CrossroadId) -> &mut Crossroad {
self.grid[c].as_mut().unwrap()
}
/// Creates a new car. It transfers the current graph to the car, with a fresh identifier.
pub fn create_car(&mut self) -> Car {
if self.car_graph.is_none() {
// If needed, we generate this shared reference.
self.car_graph = Some(Arc::new(self.clone_graph()));
}
let id = self.car_count;
self.car_count += 1;
Car::new(id, 0, CrossroadId::new(0, 0), self.car_graph.clone().unwrap())
}
/// Spawns a car on a random road, and finds a random destination.
pub fn generate_request(&mut self, id: CarId) -> (RoadInfo, usize, CrossroadId) {
// First, it finds a road to spawn the car.
let mut rng = rand::thread_rng();
let mut road_id = rng.gen_range(0, self.roads.len());
let mut pos = self.roads[road_id].spawn_car(id);
while pos == -1 {
road_id = rng.gen_range(0, self.roads.len());
pos = self.roads[road_id].spawn_car(id);
}
// Then, it gets the crossroad at the end of this road.
let road_info = self.roads[road_id].info();
let source_c = road_info.end;
// It randomly chooses a crossroad different from the previous crossroad.
let mut destination = self.random_crossroad();
while destination == source_c {
destination = self.random_crossroad();
}
// Returns the final spawn position and destination.
(road_info, pos as usize, destination)
}
/// Spawns all the car that requested to be. Updates the move vector with the resulting spawns.
pub fn spawn_cars(&mut self, actions: Vec<Action>, moves: &mut Vec<Move>) {
for (i, a) in actions.iter().enumerate() {
if let Action::SPAWN = *a {
let (road_info, pos, destination) = self.generate_request(i);
moves[i] = Move::SPAWN(road_info, pos, destination);
}
}
}
/// Makes the crossroads enable some roads.
pub fn enable_paths(&mut self) {
for &c in &self.crossroads {
self.grid[c].as_ref().unwrap().enable_path(&mut self.roads);
}
}
/// Performs an update step on all roads, based on the Actions and Speeds vector.
/// Updates the resulting Moves vector, and returns the EdgesWeight estimation.
pub fn roads_step(&mut self, actions: &mut Vec<Action>, moves: &mut Vec<Move>, speeds: &Vec<Speed>)
-> EdgesWeight
{
let roads = &mut self.roads;
// All the possibles enabled paths are tried.
for i in 0..roads.len() {
// Each roads tries to make its first car cross, if enabled.
Road::deliver(i, actions, moves, roads);
}
// We make a step for all remaining cars, and get the weights estimations.
let mut weights = vec!();
for i in 0..roads.len() {
weights.push(roads[i].step_forward(moves, speeds));
}
let edges_weight = EdgesWeight::new(weights);
return edges_weight
}
/// Returns the central reactive process of the network.
pub fn process(mut self, central_signal: SPMCSignalSender<Arc<GlobalInfo>>,
pos_signal: MPSCSignalReceiver<(CarId, (Action, Speed)), (Vec<Action>, Vec<Speed>)>)
-> impl Process<Value=()> {
let mut weights = vec!();
for r in &self.roads {
weights.push(r.weight());
}
let mut step = 0;
let mut mean_moves = self.car_count as f32;
let beta = 0.99;
let cont = move | (mut actions, speeds): (Vec<Action>, Vec<Speed>) | {
// We count the steps.
step += 1;
// We enable some path.
self.enable_paths();
// We compute the road step and get back some weights.
let mut moves = (0..actions.len()).map(|_| { Move::NONE }).collect();
let weights = self.roads_step(&mut actions, &mut moves, &speeds);
// We spawn the cars that requested to be.
self.spawn_cars(actions, &mut moves);
// We count the number of cars that did something.
let nb_moves: i32 = moves.iter().map(| m | { match m {
&Move::NONE => 0,
_ => 1,
}}).sum();
// We keep some moving mean of this number. If it is too low, nothing is happening, so
// it panics.
mean_moves = beta * mean_moves + (1. - beta) * (nb_moves as f32);
if mean_moves < 1e-3 {
panic!("It looks like a stationary state: not enough moves.");
}
// Returns the updated information about the step.
Arc::new(GlobalInfo { weights, moves })
};
let p =
pos_signal.await_in() // Awaits the car actions
.map(cont) // Computes the resulting moves and weights.
.emit_consume(central_signal) // Emits this information.
.loop_inf(); // Loops.
return p;
}
/// Returns a String representing the network.
pub fn to_string(&self) -> String {
// We first build the corresponding char two-dimensional vector.
let (width, height) = (2 * self.width - 1, 2 * self.height - 1);
let mut char_map: Vec<Vec<char>> = (0..height).map(|_| { (0..width).map(|_| { ' ' }).collect()}).collect();
// Then we add the crossroads.
for c in &self.crossroads {
char_map[2 * c.y][2 * c.x] = 'C';
}
// Then we add the roads.
for r in &self.roads {
let start = r.info().start;
let (dx, dy, length) = start.join(r.info().end);
// Chooses the right symbol.
let c = if dx == 0 { '|' } else { '-' };
let (x, y) = (2*start.x, 2*start.y);
for k in 1..(2*length) {
char_map[(y as i32 + k * dy) as usize][(x as i32 + k * dx) as usize] = c;
}
}
// We collect the characters into a string.
char_map.into_iter().map(|line| { line.into_iter().collect::<String>().add("\n") }).collect()
}
/// Loads a network from a file located in trafficsim/maps/.
pub fn load_file(&mut self, filename: &str) {
let mut f = File::open(format!("./src/trafficsim/maps/{}", filename)).expect("File not found");
let mut contents = String::new();
f.read_to_string(&mut contents)
.expect("Something went wrong reading the file");
self.load_string(&contents);
}
/// Loads a network from a string.
pub fn load_string(&mut self, s: &str) {
// We remove ending blank lines.
let s = s.trim_right();
// We split lines and remove ending spaces and `\n`.
let mut char_map: Vec<Vec<char>> =
s.split("\n")
.map(| line | { line.trim_right().chars().collect() })
.collect();
// We compute the resulting width and height of the character array.
let width = char_map.iter().map(| line | { line.len() }).max().unwrap();
let height = char_map.len();
// We add missing spaces.
for line in char_map.iter_mut() {
for _ in 0..(width - line.len()) {
line.push(' ');
}
}
// We change the network size.
*self = Network::new((width + 1) / 2, (height + 1) / 2);
// Then, we add all the crossroads.
for (j, line) in char_map.iter().enumerate() {
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
self.add_crossroad(i / 2, j / 2);
}
}
}
// Then we add the horizontal roads.
for (j, line) in char_map.iter().enumerate() {
let mut last_crossroad = None;
let mut road_length = 0;
for (i, c) in line.iter().enumerate() {
if *c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if *c == '-' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
// Then we add the vertical roads.
for i in 0..width {
let mut last_crossroad = None;
let mut road_length = 0;
for j in 0..height {
let c = char_map[j][i];
if c == 'C' {
if last_crossroad.is_some() && road_length > 0 {
self.add_all_roads(last_crossroad.unwrap(), (i / 2, j / 2));
}
last_crossroad = Some((i / 2, j / 2));
road_length = 0;
}
else if c == '|' {
if last_crossroad.is_none() {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
else {
road_length += 1;
}
}
else {
if road_length > 0 {
panic!("Invalid road at position ({}, {}): no crossroad to join.", i, j);
}
last_crossroad = None;
}
}
}
}
/// Returns the cloned graph.
pub fn clone_graph(&self) -> Graph {
self.graph.clone()
}
/// Returns random crossroad coordinates (of an existing crossroad).
pub fn random_crossroad(&self) -> CrossroadId {
let i = rand::thread_rng().gen_range(0, self.crossroads.len());
self.crossroads[i]
}
/// Removes unused roads, i.e. dead ends.
pub fn simplify(&mut self) {
println!("The network has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
// First, we identify nodes that have no escape.
let dead_ends: Vec<bool> = self.graph.nodes.iter().map(| n | {
n.edges().is_empty()
}).collect();
// Then, we mark all the roads that do not end in a dead end.
let used_roads: Vec<bool> = self.roads.iter().map(|r| {
!dead_ends[r.info().destination]
}).collect();
// We create a fresh network.
let mut network = Network::new(self.width, self.height);
// Then, we add all the interesting crossroads, i.e. that don't have 4 dead end nodes.
for &c in &self.crossroads {
let c = self.crossroad(c);
if c.nodes.iter()
.map(|id| { !dead_ends[*id] })
.fold(false, |x, y| { x || y }) {
network.add_crossroad(c.id.x, c.id.y);
}
}
// Finally, we add only the used edges.
for r in &self.roads {
let r = r.info();
if used_roads[r.id] {
network.new_road(r.start, r.end, r.side);
}
}
// We change the initial network.
*self = network;
println!("After simplification, it only has {} crossroads and {} roads.",
self.crossroads.len(), self.roads.len());
}
}
impl fmt::Display for Network {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.to_string())
}
}
/// Returns an array of size `height` x `width`.
pub fn none_array<T>(height: usize, width: usize) -> Vec<Vec<Option<T>>> {
(0..height).map(|_| { (0..width).map(|_| { None }).collect()}).collect()
}
/// Computes the road direction and its node direction.
pub fn compute_directions(dx: i32, dy: i32, side: Side) -> (usize, usize) {
let d1 = match (dx, dy) {
(1, 0) => EAST,
(0, 1) => SOUTH,
(-1, 0) => WEST,
(0, -1) => NORTH,
_ => panic!("Invalid direction."),
};
let d2 = (d1 + (1-side) * 2) % 4;
(d1, d2)
}
/// Returns the previous (clockwise) direction.
pub fn previous_direction(d: usize) -> usize {
(d + 3) % 4
} | {
// We get the parameters of the road.
let (dx, dy, length) = src.join(dest);
let length = length * self.cars_per_unit - self.cars_per_crossroad;
let (d1, d2) = compute_directions(dx, dy, side);
let id = self.roads.len();
// First, it builds the road in the network.
let road_info = RoadInfo {
id,
start: src,
end: dest,
side,
destination: self.crossroad(dest).nodes[d2],
length: length as usize,
};
// Then, we add it to the crossroads and the roads.
let road = Road::new(road_info);
self.roads.push(road);
self.crossroad_mut(src).roads[d1][side] = Some(id);
self.crossroad_mut(dest).roads_arriving[d1][side] = Some(id);
// Then, it builds the two corresponding edges in the graph.
let (n1, n2) = {
let c = self.crossroad(src);
(c.nodes[d1], c.nodes[previous_direction(d1)])
};
let n3 = self.crossroad(dest).nodes[d2];
self.graph.add_edge(n1, n3, id);
self.graph.add_edge(n2, n3, id);
} | identifier_body |
reflect_test.go | package base
import (
"fmt"
"log"
"reflect"
"runtime"
"testing"
)
// Go语言提供了一种机制在运行时更新和检查变量的值、调用变量的方法和变量支持的内在操作,但是在编译时并不知道这些变量的具体类型,这种机制被称为反射。
// go语言提供了一种机制,在编译时不知道类型的情况下,可更新变量,在运行时查看值,调用方法以及直接对他们的布局进行操作。这种机制称为反射(reflection)
// 在计算机科学领域,反射是指一类应用,它们能够自描述和自控制。也就是说,这类应用通过采用某种机制来实现对自己行为的描述(self-representation)和监测(examination),并能根据自身行为的状态和结果,调整或修改应用所描述行为的状态和相关的语义。
// Golang关于类型设计的一些原则
//
//变量包括(type, value)两部分
//理解这一点就知道为什么nil != nil了
//type 包括 static type和concrete type. 简单来说 static type是你在编码是看见的类型(如int、string),concrete type是runtime系统看见的类型
//类型断言能否成功,取决于变量的concrete type,而不是static type. 因此,一个 reader变量如果它的concrete type也实现了write方法的话,它也可以被类型断言为writer.
//
// 反射,就是建立在类型之上的,Golang的指定类型的变量的类型是静态的(也就是指定int、string这些的变量,它的type是static type),
// 在创建变量的时候就已经确定,反射主要与Golang的interface类型相关(它的type是concrete type),只有interface类型才有反射一说。
// 在Golang的实现中,每个interface变量都有一个对应pair,pair中记录了实际变量的值和类型:
//(value, type),value是实际变量值,type是实际变量的类型
// 一个interface{}类型的变量包含了2个指针,一个指针指向值的类型【对应concrete type】,另外一个指针指向实际的值【对应value】
// 例如,创建类型为*os.File的变量,然后将其赋给一个接口变量r:
//tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
//var r io.Reader
//r = tty
//接口变量r的pair中将记录如下信息:(tty, *os.File),这个pair在接口变量的连续赋值过程中是不变的,将接口变量r赋给另一个接口变量w:
//var w io.Writer
//w = r.(io.Writer)
//接口变量w的pair与r的pair相同,都是:(tty, *os.File),即使w是空接口类型,pair也是不变的。
//interface及其pair的存在,是Golang中实现反射的前提,理解了pair,就更容易理解反射。反射就是用来检测存储在接口变量内部(值value;类型concrete type) pair对的一种机制。
// reflect.TypeOf()是获取pair中的type,reflect.ValueOf()获取pair中的value
// Golang reflect慢主要有两个原因
//涉及到内存分配以及后续的GC;
//reflect实现里面有大量的枚举,也就是for循环,比如类型之类的。
// Type:Type类型用来表示一个go类型。
// Value为go值提供了反射接口
//反射法则:
//反射从接口到反射对象中(Reflection goes from interface value to reflection object.)
//反射从反射对象到接口中(Reflection goes from reflection object to interface value.)
//要修改反射对象,值必须是“可设置”的(To modify a reflection object, the value must be settable.)
// 反射是审查接口变量中(type, value)组合的机制。
// Type和Value可以访问接口变量的内容。reflect.TypeOf和reflect.ValueOf返回的reflect.Type和reflect.Value可以拼凑一个接口值
func TestReflectBase1(t *testing.T) {
var x float64 = 3.4
// reflect.TypeOf的声明中包含了一个空接口:
// 调用reflect.TypeOf(x)时,作为参数传入的x在此之前已被存进了一个空接口。而reflect.TypeOf解包了空接口,恢复了它所含的类型信息
fmt.Println("type:", reflect.TypeOf(x))
fmt.Println("type,kind:", reflect.TypeOf(x).Kind())
// reflect.ValueOf函数则是恢复了值
valueOf := reflect.ValueOf(x)
fmt.Println("value:", valueOf)
// Value有一个Type方法返回reflect.Value的Type
fmt.Println("value,type:", valueOf.Type())
fmt.Println("kind is float64:", valueOf.Kind() == reflect.Float64)
fmt.Println("value:", valueOf.Float()) // 取值
}
func TestReflectAttetion(t *testing.T) {
// 为了保持API的简洁,Value的Getter和Setter方法是用最大的类型去操作数据:
// 例如让所有的整型都使用int64表示。Value的Int方法返回一个int64的值,SetInt需要传入int64参数;
var x uint8 = 'x'
v := reflect.ValueOf(x)
fmt.Println("type:", v.Type()) // uint8.
fmt.Println("kind is uint8: ", v.Kind() == reflect.Uint8) // true.
// 将数值转换成它的实际类型在某些时候是有必要的:
x = uint8(v.Uint()) // v.Uint returns a uint64.
// 反射对象的Kind方法描述的是基础类型,而不是静态类型。如果一个反射对象包含了用户定义类型的值,如下:
type MyInt int
var q MyInt = 7
// 虽然x的静态类型是MyInt而非int,但v的Kind依然是reflect.Int。Type可以区分开int和MyInt,但Kind无法做到。
ofV2 := reflect.ValueOf(q)
fmt.Println("v2,type:", ofV2.Type())
fmt.Println("v2,kind:", ofV2.Kind())
}
// 从反射对象到接口
//通过一个reflect.Value我们可以使用Interface方法恢复一个接口;这个方法将类型和值信息打包成一个接口并将其返回:
func TestReflectReturnInterface(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
y := v.Interface().(float64) // y will have type float64.
fmt.Println(y)
}
//Interface方法就是ValueOf函数的逆,除非ValueOf所得结果的类型是interface{}
func TestPrintInter(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
// fmt.Println和fmt.Printf的参数都是interface{},传入之后由fmt的私有方法解包
//正是因为fmt把Interface方法的返回结果传递给了格式化打印事务(formatted print routine),所以程序才能正确打印出reflect.Value的内容:
fmt.Println(v)
// 由于值的类型是float64,我们可以用浮点格式化打印它:
// 无需对v.Interface()做类型断言,这个空接口值包含了具体的值的类型信息,Printf会恢复它
fmt.Printf("value is %7.1e\n", v.Interface())
f := v.Float()
fmt.Println(f)
}
type User struct {
ID int
Name string
}
// 定义一个Enum类型
type Enum int
const (
Zero Enum = 0
)
type cat struct {
}
// 使用 reflect.TypeOf() 函数可以获得任意值的类型对象(reflect.Type),通过类型对象可以访问任意值的类型信息
// Go 程序中的类型(Type)指的是系统原生数据类型,如 int、string、bool、float32 等类型,以及使用 type 关键字定义的类型,这些类型的名称就是其类型本身的名称
// TypeOf returns the reflection Type that represents the dynamic type of i.
// If i is a nil interface value, TypeOf returns nil.
// func TypeOf(i interface{}) Type {...}
func TestReflectBase2(t *testing.T) {
typeOfCat := reflect.TypeOf(cat{})
// 显示反射类型对象的名称和种类
fmt.Println(typeOfCat.String(), typeOfCat.Name(), typeOfCat.Kind())
// 获取Zero常量的反射类型对象
typeOfA := reflect.TypeOf(Zero)
// 显示反射类型对象的名称和种类
fmt.Println(typeOfA.Name(), typeOfA.Kind())
inf := new(Skills)
// Array, Chan, Map, Ptr, or Slice.
inf_type := reflect.TypeOf(inf).Elem() // 引用类型需要用Elem()获取指针所指的对象类型
stu1 := Student{Name: "wd", Age: 22}
stu_type := reflect.TypeOf(stu1)
fmt.Println(stu_type.String()) //main.Student
fmt.Println(stu_type.Name()) //Student
fmt.Println(stu_type.PkgPath()) //main
fmt.Println(stu_type.Kind()) //struct
fmt.Println(stu_type.Size()) //24
fmt.Println(inf_type.NumMethod()) //2
fmt.Println(inf_type.Method(0), inf_type.Method(0).Name) // {reading main func() <invalid Value> 0} reading
fmt.Println(inf_type.MethodByName("reading")) //{reading main func() <invalid Value> 0} true
}
// 对指针获取反射对象时,可以通过 reflect.Elem() 方法获取这个指针指向的元素类型,这个获取过程被称为取元素,等效于对指针类型变量做了一个*操作
func TestReflectPoint(t *testing.T) {
// 创建cat的实例并返回指针
ins := &cat{}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 显示反射类型对象的名称和种类(指针变量的类型名称和种类),go对所有指针变量的种类都是 Ptr
fmt.Printf("name:'%v' kind:'%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 注意对于:数组、切片、映射、通道、指针、接口 ,取指针类型的元素类型
typeOfCat = typeOfCat.Elem()
// 显示反射类型对象的名称和种类(指针变量指向元素的类型名称和种类)
fmt.Printf("element name: '%v', element kind: '%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 通过elem,但必须传递的是指针,修改值
var a int64 = 100
reflectSetValue2(&a)
fmt.Println("a:", a)
}
// 若要修改反射对象,值必须可设置
func TestReflectSetValueErr(t *testing.T) {
var x float64 = 3.4
// 传递了一份x的拷贝到reflect.ValueOf中,所以传到reflect.ValueOf的接口值不是由x,而是由x的拷贝创建的。
v := reflect.ValueOf(x)
// 问题在于7.1是不可寻址的,这意味着v就会变得不可设置。“可设置”(settability)是reflect.Value的特性之一,但并非所有的Value都是可设置的
// 对一个不可设置的Value调用的Set方法是错误的
// v.SetFloat(7.1) // Error: will panic.
// 反射对象的“可设置性”由它是否拥有原项目(orginal item)所决定。
fmt.Println("settability of v:", v.CanSet())
}
func TestReflectSetValueRight(t *testing.T) {
var x float64 = 3.4
// 如果我们想用反射修改x,我们必须把值的指针传给反射库
p := reflect.ValueOf(&x) // Note: take the address of x.
fmt.Println("type of p:", p.Type())
// 反射对象p不是可设置的
fmt.Println("settability of p:", p.CanSet())
// 想要设置的不是它,而是*p。 为了知道p指向了哪,我们调用Value的Elem方法,它通过指针定向并把结果保存在了一个Value中,命名为v:
v2 := p.Elem()
fmt.Println("settability of v2:", v2.CanSet())
v2.SetFloat(7.1)
fmt.Println(v2.Interface())
fmt.Println(x)
}
func TestReflectSetStructValueRight(t *testing.T) {
// 结构体中只有可导出的的字段是“可设置”的
type T struct {
A int
B string
}
q := T{23, "skidoo"}
s := reflect.ValueOf(&q).Elem()
typeOfT := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
fmt.Printf("%d: %s %s = %v\n", i,
typeOfT.Field(i).Name, f.Type(), f.Interface())
}
s.Field(0).SetInt(77)
s.Field(1).SetString("Sunset Strip")
fmt.Println("t is now", t)
}
func TestReflectSetValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("old value of pointer:", num)
// 通过reflect.ValueOf获取num中的reflect.Value,注意,参数必须是指针才能修改其值
pointer := reflect.ValueOf(&num)
newValue := pointer.Elem()
fmt.Println("type of pointer:", newValue.Type())
fmt.Println("settability of pointer:", newValue.CanSet())
// 重新赋值
newValue.SetFloat(77)
fmt.Println("new value of pointer:", num)
// 如果reflect.ValueOf的参数不是指针,会如何?
pointer = reflect.ValueOf(num)
fmt.Println("settability of pointer2:", pointer.CanSet())
//newValue = pointer.Elem() // 如果非指针,这里直接panic
}
func reflectSetValue2(x interface{}) {
v := reflect.ValueOf(x)
// 反射中使用 Elem()方法获取指针对应的值
if v.Elem().Kind() == reflect.Int64 {
v.Elem().SetInt(200)
}
}
func TestReflectStruct(t *testing.T) {
type cat struct {
Name string
// 以 ` 开始和结尾的字符串。这个字符串在Go语言中被称为 Tag(标签)。一般用于给字段添加自定义信息,方便其他模块根据信息进行不同功能的处理。
Type int `json:"type" id:"100"`
}
ins := cat{Name: "mimi", Type: 1}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 获得一个结构体类型共有多少个字段。如果类型不是结构体,将会触发panic。
for i := 0; i < typeOfCat.NumField(); i++ {
// 获取每个成员的结构体字段类型,返回 StructField 结构,这个结构描述结构体的成员信息,通过这个信息可以获取成员与结构体的关系,如偏移、索引、是否为匿名字段、结构体标签(StructTag)等
fieldType := typeOfCat.Field(i)
fmt.Printf("name: %v tag: '%v'\n", fieldType.Name, fieldType.Tag)
}
if catType, ok := typeOfCat.FieldByName("Type"); ok {
fmt.Println(catType.Tag.Get("json"), catType.Tag.Get("id"))
}
}
func TestReflectType(t *testing.T) {
type myInt int64
var a *float32 // 指针
var b myInt // 自定义类型
var c rune // 类型别名//代表int32
reflectType(a) // type: kind:ptr
reflectType(b) // type:myInt kind:int64
reflectType(c) // type:int32 kind:int32
type person struct {
name string
age int
}
var d = person{
name: "wang",
age: 18,
}
reflectType(d) // type:person kind:struct
}
func reflectType(x interface{}) {
t := reflect.TypeOf(x)
fmt.Printf("type:%v kind:%v\n", t.Name(), t.Kind())
}
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero
// func ValueOf(i interface{}) Value {...}
// reflect.ValueOf的返回值也是具体值,不过reflect.Value也可以包含一个接口值
func TestReflectValueBase(t *testing.T) {
var num float64 = 1.2345
//得到了一个类型为”relfect.Value”变量
pointer := reflect.ValueOf(&num)
value := reflect.ValueOf(num)
fmt.Println("value: ", value)
// 获得接口变量的真实内容
value.Interface()
// 注意的时候,转换的时候,如果转换的类型不完全符合,则直接panic
convertPointer := pointer.Interface().(*float64)
convertValue := value.Interface().(float64)
fmt.Println(convertPointer)
fmt.Println(convertValue)
}
func TestReflectGetAll(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
DoFiledAndMethod(user)
}
type User1 struct {
Id int
Name string
Age int
}
func (u User1) ReflectCallFunc() {
fmt.Println("Allen.Wu ReflectCallFunc")
}
// 反射字段需要对外
// 通过接口来获取任意参数,然后一一揭晓
func DoFiledAndMethod(input interface{}) {
getType := reflect.TypeOf(input)
fmt.Println("get Type is :", getType.Name())
getValue := reflect.ValueOf(input)
fmt.Println("get all Fields is:", getValue)
// 获取方法字段
// 1. 先获取interface的reflect.Type,然后通过NumField进行遍历
// 2. 再通过reflect.Type的Field获取其Field
for i := 0; i < getType.NumField(); i++ {
field := getType.Field(i)
// 取值
value := getValue.Field(i).Interface()
fmt.Printf("%s: %v = %v\n", field.Name, field.Type, value)
}
// 获取方法
// 1. 先获取interface的reflect.Type,然后通过.NumMethod进行遍历
for i := 0; i < getType.NumMethod(); i++ {
m := getType.Method(i)
fmt.Printf("%s: %v\n", m.Name, m.Type)
}
// 输出:ReflectCallFuncHasArgs: func(base.User1, string, int)
// 结构体的方法其实也是通过函数实现,把base.User1当参数了
}
func TestReflectValue(t *testing.T) {
var a float32 = 3.14
var b int64 = 100
reflectValue(a) // type is float32, value is 3.140000
reflectValue(b) // type is int64, value is 100
// 将int类型的原始值转换为reflect.Value类型
c := reflect.ValueOf(10)
fmt.Printf("type c :%T\n", c) // type c :reflect.Value
}
func reflectValue(x interface{}) {
v := reflect.ValueOf(x)
k := v.Kind()
switch k {
case reflect.Int64:
// v.Int()从反射中获取整型的原始值,然后通过int64()强制类型转换
fmt.Printf("type is int64, value is %d\n", int64(v.Int()))
case reflect.Float32:
// v.Float()从反射中获取浮点型的原始值,然后通过float32()强制类型转换
fmt.Printf("type is float32, value is %f\n", float32(v.Float()))
case reflect.Float64:
// v.Float()从反射中获取浮点型的原始值,然后通过float64()强制类型转换
fmt.Printf("type is float64, value is %f\n", float64(v.Float()))
}
}
// 反射可以将“接口类型变量”转换为“反射类型对象”,反射类型指的是reflect.Type和reflect.Value这两种
func TestReflectTypeAndValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("type: ", reflect.TypeOf(num))
fmt.Println("value: ", reflect.ValueOf(num))
}
func TestReflectMethodInfo(t *testing.T) {
of := reflect.TypeOf(TestIsNull)
in := of.In(0)
fmt.Println(in)
if in.Kind() == reflect.Ptr {
elem := in.Elem()
fmt.Println(elem.Name(), elem.Kind())
}
}
func TestReflectOptValue(t *testing.T) {
var a = 2
vof := reflect.ValueOf(a)
i := vof.Int()
fmt.Println("i:", i)
tof := reflect.TypeOf(a)
i2 := vof.Type()
// 一样
fmt.Println("type:", i2, tof)
// panic: reflect: reflect.flag.mustBeAssignable using unaddressable value [recovered]
// 报错,因为我们的a是一个值类型,而值类型的传递是拷贝了一个副本,当 vof := reflect.ValueOf(a) 函数通过传递一个 a 拷贝创建了 vof,
// 那么 vof 的改变并不能更改原始的 a。要想 vof 的更改能作用到 a,那就必须传递 a 的地址 v = reflect.ValueOf(&a)
// vof.SetInt(333)
vof2 := reflect.ValueOf(&a)
// 通过 Elem() 方法进行取地址
vof2.Elem().SetInt(333)
fmt.Println("i:", a)
}
func TestBaseReflect(t *testing.T) {
user := User{}
a := reflect.TypeOf(user)
if _, ok := a.FieldByName("Name"); ok {
println("存在")
} else {
println("不存在")
}
fmt.Println(a.Name(), a.Kind(), a.PkgPath())
field, b := a.FieldByName("ID")
fmt.Println(field, b)
user2 := &User{}
var user3 interface{}
user3 = user2
i := reflect.TypeOf(user3)
elem := i.Elem()
fmt.Println("elem:", elem.Name())
typeOf := reflect.TypeOf(test)
fmt.Println(typeOf)
name := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
fmt.Println("name:", name)
}
type Handler func(obj interface{}) error
func test(qqqq interface{}) error {
fmt.Println("1111")
return nil
}
func TestIsNull(t *testing.T) {
// 声明一个 *int 类型的指针,初始值为 nil。
var a *int
fmt.Println("var a *int:", reflect.ValueOf(a).IsNil())
// nil值
fmt.Println("nil:", reflect.ValueOf(nil).IsValid())
// (*int)(nil) 的含义是将 nil 转换为 *int,也就是*int 类型的空指针
fmt.Println("(*int)(nil):", reflect.ValueOf((*int)(nil)).Elem().IsValid())
// 实例化一个结构体
s := struct{}{}
// 尝试从结构体中查找一个不存在的字段
fmt.Println("不存在的结构体成员:", reflect.ValueOf(s).FieldByName("").IsValid())
// 尝试从结构体中查找一个不存在的方法
fmt.Println("不存在的结构体方法:", reflect.ValueOf(s).MethodByName("").IsValid())
// 实例化一个map,与 make 方式创建的 map 等效
m := map[int]int{} | // 尝试从map中查找一个不存在的键
fmt.Println("不存在的键:", reflect.ValueOf(m).MapIndex(reflect.ValueOf(3)).IsValid())
}
func TestReflectInvokeFun(t *testing.T) {
of := reflect.ValueOf(testFunc)
of.Call(getValues())
}
// 注意,又不符合则panic
// 如何通过反射来进行方法的调用?
// 本来可以用u.ReflectCallFuncXXX直接调用的,但是如果要通过反射,那么首先要将方法注册,也就是MethodByName,然后通过反射调动mv.Call
func TestReflectInvokeMethod(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
// 带有参数的调用方法
args := getValues("wudebao", 30)
invokeMethod(user, "ReflectCallFuncHasArgs", args)
// 无参数的调用方法
args = getValues()
invokeMethod(user, "ReflectCallFuncNoArgs", args)
// 调用指针类型方法
invokeMethod(&user, "ReflectCallFuncNoArgs2", args)
}
func testFunc() {
fmt.Println("testFunc..")
}
//根据参数获取对应的Values
func getValues(param ...interface{}) []reflect.Value {
vals := make([]reflect.Value, 0, len(param))
for i := range param {
vals = append(vals, reflect.ValueOf(param[i]))
}
return vals
}
func invokeMethod(obj interface{}, funcInter string, paramsValue []reflect.Value) {
getValue := reflect.ValueOf(obj)
method := getValue.MethodByName(funcInter)
if method.Kind() != reflect.Func {
log.Fatal("funcInter is not func")
return
}
if method.Type().NumIn() > 0 {
in := method.Type().In(0)
fmt.Println("in:", in)
}
if method.Type().NumOut() > 0 {
out := method.Type().Out(0)
fmt.Println("out:", out)
}
values := method.Call(paramsValue) //方法调用并返回值
for i := range values {
fmt.Println(values[i])
}
}
func (u User1) ReflectCallFuncHasArgs(name string, age int) {
fmt.Println("ReflectCallFuncHasArgs name: ", name, ", age:", age, "and origal User.Name:", u.Name)
}
func (u User1) ReflectCallFuncNoArgs() {
fmt.Println("ReflectCallFuncNoArgs")
}
func (u *User1) ReflectCallFuncNoArgs2() {
fmt.Println("ReflectCallFuncNoArgs2")
}
// 空接口相当于一个容器,能接受任何东西。.ValueOf(
// reflect.Value.Elem(),返回一个 interface 或者 pointer 的值
// Elem returns the value that the interface v contains or that the pointer v points to. It panics if v's Kind is not Interface or Ptr. It returns the zero Value if v is nil.
// reflect.Type.Elem(),返回一个类型(如:Array,Map,Chan等)的元素的类型
// Elem returns a type's element type. It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
func TestReflectNew(t *testing.T) {
var a string
of := reflect.TypeOf(a)
// New returns a Value representing a pointer to a new zero value for the specified type. That is, the returned Value's Type is PtrTo(typ).
sptr := reflect.New(of)
fmt.Println("sptr:", sptr)
// 返回值类型:reflect.Value
sval := sptr.Elem()
ss := sval.Interface().(string)
fmt.Println("ss:", ss) // 空字符串
} | random_line_split | |
reflect_test.go | package base
import (
"fmt"
"log"
"reflect"
"runtime"
"testing"
)
// Go语言提供了一种机制在运行时更新和检查变量的值、调用变量的方法和变量支持的内在操作,但是在编译时并不知道这些变量的具体类型,这种机制被称为反射。
// go语言提供了一种机制,在编译时不知道类型的情况下,可更新变量,在运行时查看值,调用方法以及直接对他们的布局进行操作。这种机制称为反射(reflection)
// 在计算机科学领域,反射是指一类应用,它们能够自描述和自控制。也就是说,这类应用通过采用某种机制来实现对自己行为的描述(self-representation)和监测(examination),并能根据自身行为的状态和结果,调整或修改应用所描述行为的状态和相关的语义。
// Golang关于类型设计的一些原则
//
//变量包括(type, value)两部分
//理解这一点就知道为什么nil != nil了
//type 包括 static type和concrete type. 简单来说 static type是你在编码是看见的类型(如int、string),concrete type是runtime系统看见的类型
//类型断言能否成功,取决于变量的concrete type,而不是static type. 因此,一个 reader变量如果它的concrete type也实现了write方法的话,它也可以被类型断言为writer.
//
// 反射,就是建立在类型之上的,Golang的指定类型的变量的类型是静态的(也就是指定int、string这些的变量,它的type是static type),
// 在创建变量的时候就已经确定,反射主要与Golang的interface类型相关(它的type是concrete type),只有interface类型才有反射一说。
// 在Golang的实现中,每个interface变量都有一个对应pair,pair中记录了实际变量的值和类型:
//(value, type),value是实际变量值,type是实际变量的类型
// 一个interface{}类型的变量包含了2个指针,一个指针指向值的类型【对应concrete type】,另外一个指针指向实际的值【对应value】
// 例如,创建类型为*os.File的变量,然后将其赋给一个接口变量r:
//tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
//var r io.Reader
//r = tty
//接口变量r的pair中将记录如下信息:(tty, *os.File),这个pair在接口变量的连续赋值过程中是不变的,将接口变量r赋给另一个接口变量w:
//var w io.Writer
//w = r.(io.Writer)
//接口变量w的pair与r的pair相同,都是:(tty, *os.File),即使w是空接口类型,pair也是不变的。
//interface及其pair的存在,是Golang中实现反射的前提,理解了pair,就更容易理解反射。反射就是用来检测存储在接口变量内部(值value;类型concrete type) pair对的一种机制。
// reflect.TypeOf()是获取pair中的type,reflect.ValueOf()获取pair中的value
// Golang reflect慢主要有两个原因
//涉及到内存分配以及后续的GC;
//reflect实现里面有大量的枚举,也就是for循环,比如类型之类的。
// Type:Type类型用来表示一个go类型。
// Value为go值提供了反射接口
//反射法则:
//反射从接口到反射对象中(Reflection goes from interface value to reflection object.)
//反射从反射对象到接口中(Reflection goes from reflection object to interface value.)
//要修改反射对象,值必须是“可设置”的(To modify a reflection object, the value must be settable.)
// 反射是审查接口变量中(type, value)组合的机制。
// Type和Value可以访问接口变量的内容。reflect.TypeOf和reflect.ValueOf返回的reflect.Type和reflect.Value可以拼凑一个接口值
func TestReflectBase1(t *testing.T) {
var x float64 = 3.4
// reflect.TypeOf的声明中包含了一个空接口:
// 调用reflect.TypeOf(x)时,作为参数传入的x在此之前已被存进了一个空接口。而reflect.TypeOf解包了空接口,恢复了它所含的类型信息
fmt.Println("type:", reflect.TypeOf(x))
fmt.Println("type,kind:", reflect.TypeOf(x).Kind())
// reflect.ValueOf函数则是恢复了值
valueOf := reflect.ValueOf(x)
fmt.Println("value:", valueOf)
// Value有一个Type方法返回reflect.Value的Type
fmt.Println("value,type:", valueOf.Type())
fmt.Println("kind is float64:", valueOf.Kind() == reflect.Float64)
fmt.Println("value:", valueOf.Float()) // 取值
}
func TestReflectAttetion(t *testing.T) {
// 为了保持API的简洁,Value的Getter和Setter方法是用最大的类型去操作数据:
// 例如让所有的整型都使用int64表示。Value的Int方法返回一个int64的值,SetInt需要传入int64参数;
var x uint8 = 'x'
v := reflect.ValueOf(x)
fmt.Println("type:", v.Type()) // uint8.
fmt.Println("kind is uint8: ", v.Kind() == reflect.Uint8) // true.
// 将数值转换成它的实际类型在某些时候是有必要的:
x = uint8(v.Uint()) // v.Uint returns a uint64.
// 反射对象的Kind方法描述的是基础类型,而不是静态类型。如果一个反射对象包含了用户定义类型的值,如下:
type MyInt int
var q MyInt = 7
// 虽然x的静态类型是MyInt而非int,但v的Kind依然是reflect.Int。Type可以区分开int和MyInt,但Kind无法做到。
ofV2 := reflect.ValueOf(q)
fmt.Println("v2,type:", ofV2.Type())
fmt.Println("v2,kind:", ofV2.Kind())
}
// 从反射对象到接口
//通过一个reflect.Value我们可以使用Interface方法恢复一个接口;这个方法将类型和值信息打包成一个接口并将其返回:
func TestReflectReturnInterface(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
y := v.Interface().(float64) // y will have type float64.
fmt.Println(y)
}
//Interface方法就是ValueOf函数的逆,除非ValueOf所得结果的类型是interface{}
func TestPrintInter(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
// fmt.Println和fmt.Printf的参数都是interface{},传入之后由fmt的私有方法解包
//正是因为fmt把Interface方法的返回结果传递给了格式化打印事务(formatted print routine),所以程序才能正确打印出reflect.Value的内容:
fmt.Println(v)
// 由于值的类型是float64,我们可以用浮点格式化打印它:
// 无需对v.Interface()做类型断言,这个空接口值包含了具体的值的类型信息,Printf会恢复它
fmt.Printf("value is %7.1e\n", v.Interface())
f := v.Float()
fmt.Println(f)
}
type User struct {
ID int
Name string
}
// 定义一个Enum类型
type Enum int
const (
Zero Enum = 0
)
type cat struct {
}
// 使用 reflect.TypeOf() 函数可以获得任意值的类型对象(reflect.Type),通过类型对象可以访问任意值的类型信息
// Go 程序中的类型(Type)指的是系统原生数据类型,如 int、string、bool、float32 等类型,以及使用 type 关键字定义的类型,这些类型的名称就是其类型本身的名称
// TypeOf returns the reflection Type that represents the dynamic type of i.
// If i is a nil interface value, TypeOf returns nil.
// func TypeOf(i interface{}) Type {...}
func TestReflectBase2(t *testing.T) {
typeOfCat := reflect.TypeOf(cat{})
// 显示反射类型对象的名称和种类
fmt.Println(typeOfCat.String(), typeOfCat.Name(), typeOfCat.Kind())
// 获取Zero常量的反射类型对象
typeOfA := reflect.TypeOf(Zero)
// 显示反射类型对象的名称和种类
fmt.Println(typeOfA.Name(), typeOfA.Kind())
inf := new(Skills)
// Array, Chan, Map, Ptr, or Slice.
inf_type := reflect.TypeOf(inf).Elem() // 引用类型需要用Elem()获取指针所指的对象类型
stu1 := Student{Name: "wd", Age: 22}
stu_type := reflect.TypeOf(stu1)
fmt.Println(stu_type.String()) //main.Student
fmt.Println(stu_type.Name()) //Student
fmt.Println(stu_type.PkgPath()) //main
fmt.Println(stu_type.Kind()) //struct
fmt.Println(stu_type.Size()) //24
fmt.Println(inf_type.NumMethod()) //2
fmt.Println(inf_type.Method(0), inf_type.Method(0).Name) // {reading main func() <invalid Value> 0} reading
fmt.Println(inf_type.MethodByName("reading")) //{reading main func() <invalid Value> 0} true
}
// 对指针获取反射对象时,可以通过 reflect.Elem() 方法获取这个指针指向的元素类型,这个获取过程被称为取元素,等效于对指针类型变量做了一个*操作
func TestReflectPoint(t *testing.T) {
// 创建cat的实例并返回指针
ins := &cat{}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 显示反射类型对象的名称和种类(指针变量的类型名称和种类),go对所有指针变量的种类都是 Ptr
fmt.Printf("name:'%v' kind:'%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 注意对于:数组、切片、映射、通道、指针、接口 ,取指针类型的元素类型
typeOfCat = typeOfCat.Elem()
// 显示反射类型对象的名称和种类(指针变量指向元素的类型名称和种类)
fmt.Printf("element name: '%v', element kind: '%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 通过elem,但必须传递的是指针,修改值
var a int64 = 100
reflectSetValue2(&a)
fmt.Println("a:", a)
}
// 若要修改反射对象,值必须可设置
func TestReflectSetValueErr(t *testing.T) {
var x float64 = 3.4
// 传递了一份x的拷贝到reflect.ValueOf中,所以传到reflect.ValueOf的接口值不是由x,而是由x的拷贝创建的。
v := reflect.ValueOf(x)
// 问题在于7.1是不可寻址的,这意味着v就会变得不可设置。“可设置”(settability)是reflect.Value的特性之一,但并非所有的Value都是可设置的
// 对一个不可设置的Value调用的Set方法是错误的
// v.SetFloat(7.1) // Error: will panic.
// 反射对象的“可设置性”由它是否拥有原项目(orginal item)所决定。
fmt.Println("settability of v:", v.CanSet())
}
func TestReflectSetValueRight(t *testing.T) {
var x float64 = 3.4
// 如果我们想用反射修改x,我们必须把值的指针传给反射库
p := reflect.ValueOf(&x) // Note: take the address of x.
fmt.Println("type of p:", p.Type())
// 反射对象p不是可设置的
fmt.Println("settability of p:", p.CanSet())
// 想要设置的不是它,而是*p。 为了知道p指向了哪,我们调用Value的Elem方法,它通过指针定向并把结果保存在了一个Value中,命名为v:
v2 := p.Elem()
fmt.Println("settability of v2:", v2.CanSet())
v2.SetFloat(7.1)
fmt.Println(v2.Interface())
fmt.Println(x)
}
func TestReflectSetStructValueRight(t *testing.T) {
// 结构体中只有可导出的的字段是“可设置”的
type T struct {
A int
B string
}
q := T{23, "skidoo"}
s := reflect.ValueOf(&q).Elem()
typeOfT := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
fmt.Printf("%d: %s %s = %v\n", i,
typeOfT.Field(i).Name, f.Type(), f.Interface())
}
s.Field(0).SetInt(77)
s.Field(1).SetString("Sunset Strip")
fmt.Println("t is now", t)
}
func TestReflectSetValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("old value of pointer:", num)
// 通过reflect.ValueOf获取num中的reflect.Value,注意,参数必须是指针才能修改其值
pointer := reflect.ValueOf(&num)
newValue := pointer.Elem()
fmt.Println("type of pointer:", newValue.Type())
fmt.Println("settability of pointer:", newValue.CanSet())
// 重新赋值
newValue.SetFloat(77)
fmt.Println("new value of pointer:", num)
// 如果reflect.ValueOf的参数不是指针,会如何?
pointer = reflect.ValueOf(num)
fmt.Println("settability of pointer2:", pointer.CanSet())
//newValue = pointer.Elem() // 如果非指针,这里直接panic
}
func reflectSetValue2(x interface{}) {
v := reflect.ValueOf(x)
// 反射中使用 Elem()方法获取指针对应的值
if v.Elem().Kind() == reflect.Int64 {
v.Elem().SetInt(200)
}
}
func TestReflectStruct(t *testing.T) {
type cat struct {
Name string
// 以 ` 开始和结尾的字符串。这个字符串在Go语言中被称为 Tag(标签)。一般用于给字段添加自定义信息,方便其他模块根据信息进行不同功能的处理。
Type int `json:"type" id:"100"`
}
ins := cat{Name: "mimi", Type: 1}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 获得一个结构体类型共有多少个字段。如果类型不是结构体,将会触发panic。
for i := 0; i < typeOfCat.NumField(); i++ {
// 获取每个成员的结构体字段类型,返回 StructField 结构,这个结构描述结构体的成员信息,通过这个信息可以获取成员与结构体的关系,如偏移、索引、是否为匿名字段、结构体标签(StructTag)等
fieldType := typeOfCat.Field(i)
fmt.Printf("name: %v tag: '%v'\n", fieldType.Name, fieldType.Tag)
}
if catType, ok := typeOfCat.FieldByName("Type"); ok {
fmt.Println(catType.Tag.Get("json"), catType.Tag.Get("id"))
}
}
func TestReflectType(t *testing.T) {
type myInt int64
var a *float32 // 指针
var b myInt // 自定义类型
var c rune // 类型别名//代表int32
reflectType(a) // type: kind:ptr
reflectType(b) // type:myInt kind:int64
reflectType(c) // type:int32 kind:int32
type person struct {
name string
age int
}
var d = person{
name: "wang",
age: 18,
}
reflectType(d) // type:person kind:struct
}
func reflectType(x interface{}) {
t := reflect.TypeOf(x)
fmt.Printf("type:%v kind:%v\n", t.Name(), t.Kind())
}
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero
// func ValueOf(i interface{}) Value {...}
// reflect.ValueOf的返回值也是具体值,不过reflect.Value也可以包含一个接口值
func TestReflectValueBase(t *testing.T) {
var num float64 = 1.2345
//得到了一个类型为”relfect.Value”变量
pointer := reflect.ValueOf(&num)
value := reflect.ValueOf(num)
fmt.Println("value: ", value)
// 获得接口变量的真实内容
value.Interface()
// 注意的时候,转换的时候,如果转换的类型不完全符合,则直接panic
convertPointer := pointer.Interface().(*float64)
convertValue := value.Interface().(float64)
fmt.Println(convertPointer)
fmt.Println(convertValue)
}
func TestReflectGetAll(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
DoFiledAndMethod(user)
}
type User1 struct {
Id int
Name string
Age int
}
func (u User1) ReflectCallFunc() {
fmt.Println("Allen.Wu ReflectCallFunc")
}
// 反射字段需要对外
// 通过接口来获取任意参数,然后一一揭晓
func DoFiledAndMethod(input interface{}) {
getType := reflect.TypeOf(input)
fmt.Println("get Type is :", getType.Name())
getValue := reflect.ValueOf(input)
fmt.Println("get all Fields is:", getValue)
// 获取方法字段
// 1. 先获取interface的reflect.Type,然后通过NumField进行遍历
// 2. 再通过reflect.Type的Field获取其Field
for i := 0; i < getType.NumField(); i++ {
field := getType.Field(i)
// 取值
value := getValue.Field(i).Interface()
fmt.Printf("%s: %v = %v\n", field.Name, field.Type, value)
}
// 获取方法
// 1. 先获取interface的reflect.Type,然后通过.NumMethod进行遍历
for i := 0; i < getType.NumMethod(); i++ {
m := getType.Method(i)
fmt.Printf("%s: %v\n", m.Name, m.Type)
}
// 输出:ReflectCallFuncHasArgs: func(base.User1, string, int)
// 结构体的方法其实也是通过函数实现,把base.User1当参数了
}
func TestReflectValue(t *testing.T) {
var a float32 = 3.14
var b int64 = 100
reflectValue(a) // type is float32, value is 3.140000
reflectValue(b) // type is int64, value is 100
// 将int类型的原始值转换为reflect.Value类型
c := reflect.ValueOf(10)
fmt.Printf("type c :%T\n", c) // type c :reflect.Value
}
func reflectValue(x interface{}) {
v := reflect.ValueOf(x)
k := v.Kind()
switch k {
case reflect.Int64:
// v.Int()从反射中获取整型的原始值,然后通过int64()强制类型转换
fmt.Printf("type is int64, value is %d\n", int64(v.Int()))
case reflect.Float32:
// v.Float()从反射中获取浮点型的原始值,然后通过float32()强制类型转换
fmt.Printf("type is float32, value is %f\n", float32(v.Float()))
case reflect.Float64:
// v.Float()从反射中获取浮点型的原始值,然后通过float64()强制类型转换
fmt.Printf("type is float64, value is %f\n", float64(v.Float()))
}
}
// 反射可以将“接口类型变量”转换为“反射类型对象”,反射类型指的是reflect.Type和reflect.Value这两种
func TestReflectTypeAndValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("type: ", reflect.TypeOf(num))
fmt.Println("value: ", reflect.ValueOf(num))
}
func TestReflectMethodInfo(t *testing.T) {
of := reflect.TypeOf(TestIsNull)
in := of.In(0)
fmt.Println(in)
if in.Kind() == reflect.Ptr {
elem := in.Elem()
fmt.Println(elem.Name(), elem.Kind())
}
}
func TestReflectOptValue(t *testing.T) {
var a = 2
vof := reflect.ValueOf(a)
i := vof.Int()
fmt.Println("i:", i)
tof := reflect.TypeOf(a)
i2 := vof.Type()
// 一样
fmt.Println("type:", i2, tof)
// panic: reflect: reflect.flag.mustBeAssignable using unaddressable value [recovered]
// 报错,因为我们的a是一个值类型,而值类型的传递是拷贝了一个副本,当 vof := reflect.ValueOf(a) 函数通过传递一个 a 拷贝创建了 vof,
// 那么 vof 的改变并不能更改原始的 a。要想 vof 的更改能作用到 a,那就必须传递 a 的地址 v = reflect.ValueOf(&a)
// vof.SetInt(333)
vof2 := reflect.ValueOf(&a)
// 通过 Elem() 方法进行取地址
vof2.Elem().SetInt(333)
fmt.Println("i:", a)
}
func TestBaseReflect(t *testing.T) {
user := User{}
a := reflect.TypeOf(user)
if _, ok := a.FieldByName("Name"); ok {
println("存在")
} else {
println("不存在")
}
fmt.Println(a.Name(), a.Kind(), a.PkgPath())
field, b := a.FieldByName("ID")
fmt.Println(field, b)
user2 := &User{}
var user3 interface{}
user3 = user2
i := reflect.TypeOf(user3)
elem := i.Elem()
fmt.Println("elem:", elem.Name())
typeOf := reflect.TypeOf(test)
fmt.Println(typeOf)
name := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
fmt.Println("name:", name)
}
type Handler func(obj interface{}) error
func test(qqqq interface{}) error {
fmt.Println("1111")
return nil
}
func TestIsNull(t *testing.T) {
// 声明一个 *int 类型的指针,初始值为 nil。
var a *int
fmt.Println("var a *int:", reflect.ValueOf(a).IsNil())
// nil值
fmt.Println("nil:", reflect.ValueOf(nil).IsValid())
// (*int)(nil) 的含义是将 nil 转换为 *int,也就是*int 类型的空指针
fmt.Println("(*int)(nil):", reflect.ValueOf((*int)(nil)).Elem().IsValid())
// 实例化一个结构体
s := struct{}{}
// 尝试从结构体中查找一个不存在的字段
fmt.Println("不存在的结构体成员:", reflect.ValueOf(s).FieldByName("").IsValid())
// 尝试从结构体中查找一个不存在的方法
fmt.Println("不存在的结构体方法:", reflect.ValueOf(s).MethodByName("").IsValid())
// 实例化一个map,与 make 方式创建的 map 等效
m := map[int]int{}
// 尝试从map中查找一个不存在的键
fmt.Println("不存在的键:", reflect.ValueOf(m).MapIndex(reflect.ValueOf(3)).IsValid())
}
func TestReflectInvokeFun(t *testing.T) {
of := reflect.ValueOf(testFunc)
of.Call(getValues())
}
// 注意,又不符合则panic
// 如何通过反射来进行方法的调用?
// 本来可以用u.ReflectCallFuncXXX直接调用的,但是如果要通过反射,那么首先要将方法注册,也就是MethodByName,然后通过反射调动mv.Call
func TestReflectInvokeMethod(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
// 带有参数的调用方法
args := getValues("wudebao", 30)
invokeMethod(user, "ReflectCallFuncHasArgs", args)
// 无参数的调用方法
args = getValues()
invokeMethod(user, "ReflectCallFuncNoArgs", args)
// 调用指针类型方法
invokeMethod(&user, "ReflectCallFuncNoArgs2", args)
}
func testFunc() {
fmt.Println("testFunc..")
}
//根据参数获取对应的Values
func getValues(param ...interface{}) []reflect.Value {
vals := make([]reflect.Value, 0, len(param))
for i := range param {
vals = append(vals, reflect.ValueOf(param[i]))
}
return vals
}
func invokeMethod(obj interface{}, funcInter string, paramsValue []reflect.Value) {
getValue := reflect.ValueOf(obj)
method := getValue.MethodByName(funcInter)
if method.Kind() != reflect.Func {
log.Fatal("funcInter is not func")
return
}
if method.Type().NumIn() > 0 {
in := method.Type().In(0)
fmt.Println("in:", in)
}
if method.Type().NumOut() > 0 {
out := method.Type().Out(0)
fmt.Println("out:", out)
}
values := method.Call(paramsValue) //方法调用并返回值
for i := range values {
fmt.Println(values[i])
}
}
func (u User1) ReflectCallFuncHasArgs(name string, age int) {
fmt.Println("ReflectCallFuncHasArgs name: ", name, ", age:", age, "and origal User.Name:", u.Name)
}
func (u User1) ReflectCallFuncNoArgs() {
fmt.Println("ReflectCallFuncNoArgs")
}
func (u *User1) ReflectCallFuncNoArgs2() {
fmt.Println("ReflectCallFuncNoArgs2")
}
// 空接口相当于一个容器,能接受任何东西。.ValueOf(
// reflect.Value.Elem(),返回一个 interface 或者 pointer 的值
// Elem returns the value that the interface v contains or that the pointer v points to. It panics if v's Kind is not Interface or Ptr. It returns the zero Value if v is nil.
// reflect.Type.Elem(),返回一个类型(如:Array,Map,Chan等)的元素的类型
// Elem returns a type's element type. It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
func TestReflectNew(t *testing.T) {
var a string
of := reflect.TypeOf(a)
// New returns a Value representing a pointer to a new zero value for the specified type. That is, the returned Value's Type is PtrTo(typ).
sptr := reflect.New(of)
fmt.Println("sptr:", sptr)
// 返回值类型:reflect.Value
sval := sptr.Elem()
ss := sval.Interface().(string)
fmt.Println("ss:", ss) // 空字符串
}
| identifier_name | ||
reflect_test.go | package base
import (
"fmt"
"log"
"reflect"
"runtime"
"testing"
)
// Go语言提供了一种机制在运行时更新和检查变量的值、调用变量的方法和变量支持的内在操作,但是在编译时并不知道这些变量的具体类型,这种机制被称为反射。
// go语言提供了一种机制,在编译时不知道类型的情况下,可更新变量,在运行时查看值,调用方法以及直接对他们的布局进行操作。这种机制称为反射(reflection)
// 在计算机科学领域,反射是指一类应用,它们能够自描述和自控制。也就是说,这类应用通过采用某种机制来实现对自己行为的描述(self-representation)和监测(examination),并能根据自身行为的状态和结果,调整或修改应用所描述行为的状态和相关的语义。
// Golang关于类型设计的一些原则
//
//变量包括(type, value)两部分
//理解这一点就知道为什么nil != nil了
//type 包括 static type和concrete type. 简单来说 static type是你在编码是看见的类型(如int、string),concrete type是runtime系统看见的类型
//类型断言能否成功,取决于变量的concrete type,而不是static type. 因此,一个 reader变量如果它的concrete type也实现了write方法的话,它也可以被类型断言为writer.
//
// 反射,就是建立在类型之上的,Golang的指定类型的变量的类型是静态的(也就是指定int、string这些的变量,它的type是static type),
// 在创建变量的时候就已经确定,反射主要与Golang的interface类型相关(它的type是concrete type),只有interface类型才有反射一说。
// 在Golang的实现中,每个interface变量都有一个对应pair,pair中记录了实际变量的值和类型:
//(value, type),value是实际变量值,type是实际变量的类型
// 一个interface{}类型的变量包含了2个指针,一个指针指向值的类型【对应concrete type】,另外一个指针指向实际的值【对应value】
// 例如,创建类型为*os.File的变量,然后将其赋给一个接口变量r:
//tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
//var r io.Reader
//r = tty
//接口变量r的pair中将记录如下信息:(tty, *os.File),这个pair在接口变量的连续赋值过程中是不变的,将接口变量r赋给另一个接口变量w:
//var w io.Writer
//w = r.(io.Writer)
//接口变量w的pair与r的pair相同,都是:(tty, *os.File),即使w是空接口类型,pair也是不变的。
//interface及其pair的存在,是Golang中实现反射的前提,理解了pair,就更容易理解反射。反射就是用来检测存储在接口变量内部(值value;类型concrete type) pair对的一种机制。
// reflect.TypeOf()是获取pair中的type,reflect.ValueOf()获取pair中的value
// Golang reflect慢主要有两个原因
//涉及到内存分配以及后续的GC;
//reflect实现里面有大量的枚举,也就是for循环,比如类型之类的。
// Type:Type类型用来表示一个go类型。
// Value为go值提供了反射接口
//反射法则:
//反射从接口到反射对象中(Reflection goes from interface value to reflection object.)
//反射从反射对象到接口中(Reflection goes from reflection object to interface value.)
//要修改反射对象,值必须是“可设置”的(To modify a reflection object, the value must be settable.)
// 反射是审查接口变量中(type, value)组合的机制。
// Type和Value可以访问接口变量的内容。reflect.TypeOf和reflect.ValueOf返回的reflect.Type和reflect.Value可以拼凑一个接口值
func TestReflectBase1(t *testing.T) {
var x float64 = 3.4
// reflect.TypeOf的声明中包含了一个空接口:
// 调用reflect.TypeOf(x)时,作为参数传入的x在此之前已被存进了一个空接口。而reflect.TypeOf解包了空接口,恢复了它所含的类型信息
fmt.Println("type:", reflect.TypeOf(x))
fmt.Println("type,kind:", reflect.TypeOf(x).Kind())
// reflect.ValueOf函数则是恢复了值
valueOf := reflect.ValueOf(x)
fmt.Println("value:", valueOf)
// Value有一个Type方法返回reflect.Value的Type
fmt.Println("value,type:", valueOf.Type())
fmt.Println("kind is float64:", valueOf.Kind() == reflect.Float64)
fmt.Println("value:", valueOf.Float()) // 取值
}
func TestReflectAttetion(t *testing.T) {
// 为了保持API的简洁,Value的Getter和Setter方法是用最大的类型去操作数据:
// 例如让所有的整型都使用int64表示。Value的Int方法返回一个int64的值,SetInt需要传入int64参数;
var x uint8 = 'x'
v := reflect.ValueOf(x)
fmt.Println("type:", v.Type()) // uint8.
fmt.Println("kind is uint8: ", v.Kind() == reflect.Uint8) // true.
// 将数值转换成它的实际类型在某些时候是有必要的:
x = uint8(v.Uint()) // v.Uint returns a uint64.
// 反射对象的Kind方法描述的是基础类型,而不是静态类型。如果一个反射对象包含了用户定义类型的值,如下:
type MyInt int
var q MyInt = 7
// 虽然x的静态类型是MyInt而非int,但v的Kind依然是reflect.Int。Type可以区分开int和MyInt,但Kind无法做到。
ofV2 := reflect.ValueOf(q)
fmt.Println("v2,type:", ofV2.Type())
fmt.Println("v2,kind:", ofV2.Kind())
}
// 从反射对象到接口
//通过一个reflect.Value我们可以使用Interface方法恢复一个接口;这个方法将类型和值信息打包成一个接口并将其返回:
func TestReflectReturnInterface(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
y := v.Interface().(float64) // y will have type float64.
fmt.Println(y)
}
//Interface方法就是ValueOf函数的逆,除非ValueOf所得结果的类型是interface{}
func TestPrintInter(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
// fmt.Println和fmt.Printf的参数都是interface{},传入之后由fmt的私有方法解包
//正是因为fmt把Interface方法的返回结果传递给了格式化打印事务(formatted print routine),所以程序才能正确打印出reflect.Value的内容:
fmt.Println(v)
// 由于值的类型是float64,我们可以用浮点格式化打印它:
// 无需对v.Interface()做类型断言,这个空接口值包含了具体的值的类型信息,Printf会恢复它
fmt.Printf("value is %7.1e\n", v.Interface())
f := v.Float()
fmt.Println(f)
}
type User struct {
ID int
Name string
}
// 定义一个Enum类型
type Enum int
const (
Zero Enum = 0
)
type cat struct {
}
// 使用 reflect.TypeOf() 函数可以获得任意值的类型对象(reflect.Type),通过类型对象可以访问任意值的类型信息
// Go 程序中的类型(Type)指的是系统原生数据类型,如 int、string、bool、float32 等类型,以及使用 type 关键字定义的类型,这些类型的名称就是其类型本身的名称
// TypeOf returns the reflection Type that represents the dynamic type of i.
// If i is a nil interface value, TypeOf returns nil.
// func TypeOf(i interface{}) Type {...}
func TestReflectBase2(t *testing.T) {
typeOfCat := reflect.TypeOf(cat{})
// 显示反射类型对象的名称和种类
fmt.Println(typeOfCat.String(), typeOfCat.Name(), typeOfCat.Kind())
// 获取Zero常量的反射类型对象
typeOfA := reflect.TypeOf(Zero)
// 显示反射类型对象的名称和种类
fmt.Println(typeOfA.Name(), typeOfA.Kind())
inf := new(Skills)
// Array, Chan, Map, Ptr, or Slice.
inf_type := reflect.TypeOf(inf).Elem() // 引用类型需要用Elem()获取指针所指的对象类型
stu1 := Student{Name: "wd", Age: 22}
stu_type := reflect.TypeOf(stu1)
fmt.Println(stu_type.String()) //main.Student
fmt.Println(stu_type.Name()) //Student
fmt.Println(stu_type.PkgPath()) //main
fmt.Println(stu_type.Kind()) //struct
fmt.Println(stu_type.Size()) //24
fmt.Println(inf_type.NumMethod()) //2
fmt.Println(inf_type.Method(0), inf_type.Method(0).Name) // {reading main func() <invalid Value> 0} reading
fmt.Println(inf_type.MethodByName("reading")) //{reading main func() <invalid Value> 0} true
}
// 对指针获取反射对象时,可以通过 reflect.Elem() 方法获取这个指针指向的元素类型,这个获取过程被称为取元素,等效于对指针类型变量做了一个*操作
func TestReflectPoint(t *testing.T) {
// 创建cat的实例并返回指针
ins := &cat{}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 显示反射类型对象的名称和种类(指针变量的类型名称和种类),go对所有指针变量的种类都是 Ptr
fmt.Printf("name:'%v' kind:'%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 注意对于:数组、切片、映射、通道、指针、接口 ,取指针类型的元素类型
typeOfCat = typeOfCat.Elem()
// 显示反射类型对象的名称和种类(指针变量指向元素的类型名称和种类)
fmt.Printf("element name: '%v', element kind: '%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 通过elem,但必须传递的是指针,修改值
var a int64 = 100
reflectSetValue2(&a)
fmt.Println("a:", a)
}
// 若要修改反射对象,值必须可设置
func TestReflectSetValueErr(t *testing.T) {
var x float64 = 3.4
// 传递了一份x的拷贝到reflect.ValueOf中,所以传到reflect.ValueOf的接口值不是由x,而是由x的拷贝创建的。
v := reflect.ValueOf(x)
// 问题在于7.1是不可寻址的,这意味着v就会变得不可设置。“可设置”(settability)是reflect.Value的特性之一,但并非所有的Value都是可设置的
// 对一个不可设置的Value调用的Set方法是错误的
// v.SetFloat(7.1) // Error: will panic.
// 反射对象的“可设置性”由它是否拥有原项目(orginal item)所决定。
fmt.Println("settability of v:", v.CanSet())
}
func TestReflectSetValueRight(t *testing.T) {
var x float64 = 3.4
// 如果我们想用反射修改x,我们必须把值的指针传给反射库
p := reflect.ValueOf(&x) // Note: take the address of x.
fmt.Println("type of p:", p.Type())
// 反射对象p不是可设置的
fmt.Println("settability of p:", p.CanSet())
// 想要设置的不是它,而是*p。 为了知道p指向了哪,我们调用Value的Elem方法,它通过指针定向并把结果保存在了一个Value中,命名为v:
v2 := p.Elem()
fmt.Println("settability of v2:", v2.CanSet())
v2.SetFloat(7.1)
fmt.Println(v2.Interface())
fmt.Println(x)
}
func TestReflectSetStructValueRight(t *testing.T) {
// 结构体中只有可导出的的字段是“可设置”的
type T struct {
A int
B string
}
q := T{23, "skidoo"}
s := reflect.ValueOf(& | 。
Type int `json:"type" id:"100"`
}
ins := cat{Name: "mimi", Type: 1}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 获得一个结构体类型共有多少个字段。如果类型不是结构体,将会触发panic。
for i := 0; i < typeOfCat.NumField(); i++ {
// 获取每个成员的结构体字段类型,返回 StructField 结构,这个结构描述结构体的成员信息,通过这个信息可以获取成员与结构体的关系,如偏移、索引、是否为匿名字段、结构体标签(StructTag)等
fieldType := typeOfCat.Field(i)
fmt.Printf("name: %v tag: '%v'\n", fieldType.Name, fieldType.Tag)
}
if catType, ok := typeOfCat.FieldByName("Type"); ok {
fmt.Println(catType.Tag.Get("json"), catType.Tag.Get("id"))
}
}
func TestReflectType(t *testing.T) {
type myInt int64
var a *float32 // 指针
var b myInt // 自定义类型
var c rune // 类型别名//代表int32
reflectType(a) // type: kind:ptr
reflectType(b) // type:myInt kind:int64
reflectType(c) // type:int32 kind:int32
type person struct {
name string
age int
}
var d = person{
name: "wang",
age: 18,
}
reflectType(d) // type:person kind:struct
}
func reflectType(x interface{}) {
t := reflect.TypeOf(x)
fmt.Printf("type:%v kind:%v\n", t.Name(), t.Kind())
}
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero
// func ValueOf(i interface{}) Value {...}
// reflect.ValueOf的返回值也是具体值,不过reflect.Value也可以包含一个接口值
func TestReflectValueBase(t *testing.T) {
var num float64 = 1.2345
//得到了一个类型为”relfect.Value”变量
pointer := reflect.ValueOf(&num)
value := reflect.ValueOf(num)
fmt.Println("value: ", value)
// 获得接口变量的真实内容
value.Interface()
// 注意的时候,转换的时候,如果转换的类型不完全符合,则直接panic
convertPointer := pointer.Interface().(*float64)
convertValue := value.Interface().(float64)
fmt.Println(convertPointer)
fmt.Println(convertValue)
}
func TestReflectGetAll(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
DoFiledAndMethod(user)
}
type User1 struct {
Id int
Name string
Age int
}
func (u User1) ReflectCallFunc() {
fmt.Println("Allen.Wu ReflectCallFunc")
}
// 反射字段需要对外
// 通过接口来获取任意参数,然后一一揭晓
func DoFiledAndMethod(input interface{}) {
getType := reflect.TypeOf(input)
fmt.Println("get Type is :", getType.Name())
getValue := reflect.ValueOf(input)
fmt.Println("get all Fields is:", getValue)
// 获取方法字段
// 1. 先获取interface的reflect.Type,然后通过NumField进行遍历
// 2. 再通过reflect.Type的Field获取其Field
for i := 0; i < getType.NumField(); i++ {
field := getType.Field(i)
// 取值
value := getValue.Field(i).Interface()
fmt.Printf("%s: %v = %v\n", field.Name, field.Type, value)
}
// 获取方法
// 1. 先获取interface的reflect.Type,然后通过.NumMethod进行遍历
for i := 0; i < getType.NumMethod(); i++ {
m := getType.Method(i)
fmt.Printf("%s: %v\n", m.Name, m.Type)
}
// 输出:ReflectCallFuncHasArgs: func(base.User1, string, int)
// 结构体的方法其实也是通过函数实现,把base.User1当参数了
}
func TestReflectValue(t *testing.T) {
var a float32 = 3.14
var b int64 = 100
reflectValue(a) // type is float32, value is 3.140000
reflectValue(b) // type is int64, value is 100
// 将int类型的原始值转换为reflect.Value类型
c := reflect.ValueOf(10)
fmt.Printf("type c :%T\n", c) // type c :reflect.Value
}
func reflectValue(x interface{}) {
v := reflect.ValueOf(x)
k := v.Kind()
switch k {
case reflect.Int64:
// v.Int()从反射中获取整型的原始值,然后通过int64()强制类型转换
fmt.Printf("type is int64, value is %d\n", int64(v.Int()))
case reflect.Float32:
// v.Float()从反射中获取浮点型的原始值,然后通过float32()强制类型转换
fmt.Printf("type is float32, value is %f\n", float32(v.Float()))
case reflect.Float64:
// v.Float()从反射中获取浮点型的原始值,然后通过float64()强制类型转换
fmt.Printf("type is float64, value is %f\n", float64(v.Float()))
}
}
// 反射可以将“接口类型变量”转换为“反射类型对象”,反射类型指的是reflect.Type和reflect.Value这两种
func TestReflectTypeAndValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("type: ", reflect.TypeOf(num))
fmt.Println("value: ", reflect.ValueOf(num))
}
func TestReflectMethodInfo(t *testing.T) {
of := reflect.TypeOf(TestIsNull)
in := of.In(0)
fmt.Println(in)
if in.Kind() == reflect.Ptr {
elem := in.Elem()
fmt.Println(elem.Name(), elem.Kind())
}
}
func TestReflectOptValue(t *testing.T) {
var a = 2
vof := reflect.ValueOf(a)
i := vof.Int()
fmt.Println("i:", i)
tof := reflect.TypeOf(a)
i2 := vof.Type()
// 一样
fmt.Println("type:", i2, tof)
// panic: reflect: reflect.flag.mustBeAssignable using unaddressable value [recovered]
// 报错,因为我们的a是一个值类型,而值类型的传递是拷贝了一个副本,当 vof := reflect.ValueOf(a) 函数通过传递一个 a 拷贝创建了 vof,
// 那么 vof 的改变并不能更改原始的 a。要想 vof 的更改能作用到 a,那就必须传递 a 的地址 v = reflect.ValueOf(&a)
// vof.SetInt(333)
vof2 := reflect.ValueOf(&a)
// 通过 Elem() 方法进行取地址
vof2.Elem().SetInt(333)
fmt.Println("i:", a)
}
func TestBaseReflect(t *testing.T) {
user := User{}
a := reflect.TypeOf(user)
if _, ok := a.FieldByName("Name"); ok {
println("存在")
} else {
println("不存在")
}
fmt.Println(a.Name(), a.Kind(), a.PkgPath())
field, b := a.FieldByName("ID")
fmt.Println(field, b)
user2 := &User{}
var user3 interface{}
user3 = user2
i := reflect.TypeOf(user3)
elem := i.Elem()
fmt.Println("elem:", elem.Name())
typeOf := reflect.TypeOf(test)
fmt.Println(typeOf)
name := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
fmt.Println("name:", name)
}
type Handler func(obj interface{}) error
func test(qqqq interface{}) error {
fmt.Println("1111")
return nil
}
func TestIsNull(t *testing.T) {
// 声明一个 *int 类型的指针,初始值为 nil。
var a *int
fmt.Println("var a *int:", reflect.ValueOf(a).IsNil())
// nil值
fmt.Println("nil:", reflect.ValueOf(nil).IsValid())
// (*int)(nil) 的含义是将 nil 转换为 *int,也就是*int 类型的空指针
fmt.Println("(*int)(nil):", reflect.ValueOf((*int)(nil)).Elem().IsValid())
// 实例化一个结构体
s := struct{}{}
// 尝试从结构体中查找一个不存在的字段
fmt.Println("不存在的结构体成员:", reflect.ValueOf(s).FieldByName("").IsValid())
// 尝试从结构体中查找一个不存在的方法
fmt.Println("不存在的结构体方法:", reflect.ValueOf(s).MethodByName("").IsValid())
// 实例化一个map,与 make 方式创建的 map 等效
m := map[int]int{}
// 尝试从map中查找一个不存在的键
fmt.Println("不存在的键:", reflect.ValueOf(m).MapIndex(reflect.ValueOf(3)).IsValid())
}
func TestReflectInvokeFun(t *testing.T) {
of := reflect.ValueOf(testFunc)
of.Call(getValues())
}
// 注意,又不符合则panic
// 如何通过反射来进行方法的调用?
// 本来可以用u.ReflectCallFuncXXX直接调用的,但是如果要通过反射,那么首先要将方法注册,也就是MethodByName,然后通过反射调动mv.Call
func TestReflectInvokeMethod(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
// 带有参数的调用方法
args := getValues("wudebao", 30)
invokeMethod(user, "ReflectCallFuncHasArgs", args)
// 无参数的调用方法
args = getValues()
invokeMethod(user, "ReflectCallFuncNoArgs", args)
// 调用指针类型方法
invokeMethod(&user, "ReflectCallFuncNoArgs2", args)
}
func testFunc() {
fmt.Println("testFunc..")
}
//根据参数获取对应的Values
func getValues(param ...interface{}) []reflect.Value {
vals := make([]reflect.Value, 0, len(param))
for i := range param {
vals = append(vals, reflect.ValueOf(param[i]))
}
return vals
}
func invokeMethod(obj interface{}, funcInter string, paramsValue []reflect.Value) {
getValue := reflect.ValueOf(obj)
method := getValue.MethodByName(funcInter)
if method.Kind() != reflect.Func {
log.Fatal("funcInter is not func")
return
}
if method.Type().NumIn() > 0 {
in := method.Type().In(0)
fmt.Println("in:", in)
}
if method.Type().NumOut() > 0 {
out := method.Type().Out(0)
fmt.Println("out:", out)
}
values := method.Call(paramsValue) //方法调用并返回值
for i := range values {
fmt.Println(values[i])
}
}
func (u User1) ReflectCallFuncHasArgs(name string, age int) {
fmt.Println("ReflectCallFuncHasArgs name: ", name, ", age:", age, "and origal User.Name:", u.Name)
}
func (u User1) ReflectCallFuncNoArgs() {
fmt.Println("ReflectCallFuncNoArgs")
}
func (u *User1) ReflectCallFuncNoArgs2() {
fmt.Println("ReflectCallFuncNoArgs2")
}
// 空接口相当于一个容器,能接受任何东西。.ValueOf(
// reflect.Value.Elem(),返回一个 interface 或者 pointer 的值
// Elem returns the value that the interface v contains or that the pointer v points to. It panics if v's Kind is not Interface or Ptr. It returns the zero Value if v is nil.
// reflect.Type.Elem(),返回一个类型(如:Array,Map,Chan等)的元素的类型
// Elem returns a type's element type. It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
func TestReflectNew(t *testing.T) {
var a string
of := reflect.TypeOf(a)
// New returns a Value representing a pointer to a new zero value for the specified type. That is, the returned Value's Type is PtrTo(typ).
sptr := reflect.New(of)
fmt.Println("sptr:", sptr)
// 返回值类型:reflect.Value
sval := sptr.Elem()
ss := sval.Interface().(string)
fmt.Println("ss:", ss) // 空字符串
}
| q).Elem()
typeOfT := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
fmt.Printf("%d: %s %s = %v\n", i,
typeOfT.Field(i).Name, f.Type(), f.Interface())
}
s.Field(0).SetInt(77)
s.Field(1).SetString("Sunset Strip")
fmt.Println("t is now", t)
}
func TestReflectSetValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("old value of pointer:", num)
// 通过reflect.ValueOf获取num中的reflect.Value,注意,参数必须是指针才能修改其值
pointer := reflect.ValueOf(&num)
newValue := pointer.Elem()
fmt.Println("type of pointer:", newValue.Type())
fmt.Println("settability of pointer:", newValue.CanSet())
// 重新赋值
newValue.SetFloat(77)
fmt.Println("new value of pointer:", num)
// 如果reflect.ValueOf的参数不是指针,会如何?
pointer = reflect.ValueOf(num)
fmt.Println("settability of pointer2:", pointer.CanSet())
//newValue = pointer.Elem() // 如果非指针,这里直接panic
}
func reflectSetValue2(x interface{}) {
v := reflect.ValueOf(x)
// 反射中使用 Elem()方法获取指针对应的值
if v.Elem().Kind() == reflect.Int64 {
v.Elem().SetInt(200)
}
}
func TestReflectStruct(t *testing.T) {
type cat struct {
Name string
// 以 ` 开始和结尾的字符串。这个字符串在Go语言中被称为 Tag(标签)。一般用于给字段添加自定义信息,方便其他模块根据信息进行不同功能的处理 | identifier_body |
reflect_test.go | package base
import (
"fmt"
"log"
"reflect"
"runtime"
"testing"
)
// Go语言提供了一种机制在运行时更新和检查变量的值、调用变量的方法和变量支持的内在操作,但是在编译时并不知道这些变量的具体类型,这种机制被称为反射。
// go语言提供了一种机制,在编译时不知道类型的情况下,可更新变量,在运行时查看值,调用方法以及直接对他们的布局进行操作。这种机制称为反射(reflection)
// 在计算机科学领域,反射是指一类应用,它们能够自描述和自控制。也就是说,这类应用通过采用某种机制来实现对自己行为的描述(self-representation)和监测(examination),并能根据自身行为的状态和结果,调整或修改应用所描述行为的状态和相关的语义。
// Golang关于类型设计的一些原则
//
//变量包括(type, value)两部分
//理解这一点就知道为什么nil != nil了
//type 包括 static type和concrete type. 简单来说 static type是你在编码是看见的类型(如int、string),concrete type是runtime系统看见的类型
//类型断言能否成功,取决于变量的concrete type,而不是static type. 因此,一个 reader变量如果它的concrete type也实现了write方法的话,它也可以被类型断言为writer.
//
// 反射,就是建立在类型之上的,Golang的指定类型的变量的类型是静态的(也就是指定int、string这些的变量,它的type是static type),
// 在创建变量的时候就已经确定,反射主要与Golang的interface类型相关(它的type是concrete type),只有interface类型才有反射一说。
// 在Golang的实现中,每个interface变量都有一个对应pair,pair中记录了实际变量的值和类型:
//(value, type),value是实际变量值,type是实际变量的类型
// 一个interface{}类型的变量包含了2个指针,一个指针指向值的类型【对应concrete type】,另外一个指针指向实际的值【对应value】
// 例如,创建类型为*os.File的变量,然后将其赋给一个接口变量r:
//tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
//var r io.Reader
//r = tty
//接口变量r的pair中将记录如下信息:(tty, *os.File),这个pair在接口变量的连续赋值过程中是不变的,将接口变量r赋给另一个接口变量w:
//var w io.Writer
//w = r.(io.Writer)
//接口变量w的pair与r的pair相同,都是:(tty, *os.File),即使w是空接口类型,pair也是不变的。
//interface及其pair的存在,是Golang中实现反射的前提,理解了pair,就更容易理解反射。反射就是用来检测存储在接口变量内部(值value;类型concrete type) pair对的一种机制。
// reflect.TypeOf()是获取pair中的type,reflect.ValueOf()获取pair中的value
// Golang reflect慢主要有两个原因
//涉及到内存分配以及后续的GC;
//reflect实现里面有大量的枚举,也就是for循环,比如类型之类的。
// Type:Type类型用来表示一个go类型。
// Value为go值提供了反射接口
//反射法则:
//反射从接口到反射对象中(Reflection goes from interface value to reflection object.)
//反射从反射对象到接口中(Reflection goes from reflection object to interface value.)
//要修改反射对象,值必须是“可设置”的(To modify a reflection object, the value must be settable.)
// 反射是审查接口变量中(type, value)组合的机制。
// Type和Value可以访问接口变量的内容。reflect.TypeOf和reflect.ValueOf返回的reflect.Type和reflect.Value可以拼凑一个接口值
func TestReflectBase1(t *testing.T) {
var x float64 = 3.4
// reflect.TypeOf的声明中包含了一个空接口:
// 调用reflect.TypeOf(x)时,作为参数传入的x在此之前已被存进了一个空接口。而reflect.TypeOf解包了空接口,恢复了它所含的类型信息
fmt.Println("type:", reflect.TypeOf(x))
fmt.Println("type,kind:", reflect.TypeOf(x).Kind())
// reflect.ValueOf函数则是恢复了值
valueOf := reflect.ValueOf(x)
fmt.Println("value:", valueOf)
// Value有一个Type方法返回reflect.Value的Type
fmt.Println("value,type:", valueOf.Type())
fmt.Println("kind is float64:", valueOf.Kind() == reflect.Float64)
fmt.Println("value:", valueOf.Float()) // 取值
}
func TestReflectAttetion(t *testing.T) {
// 为了保持API的简洁,Value的Getter和Setter方法是用最大的类型去操作数据:
// 例如让所有的整型都使用int64表示。Value的Int方法返回一个int64的值,SetInt需要传入int64参数;
var x uint8 = 'x'
v := reflect.ValueOf(x)
fmt.Println("type:", v.Type()) // uint8.
fmt.Println("kind is uint8: ", v.Kind() == reflect.Uint8) // true.
// 将数值转换成它的实际类型在某些时候是有必要的:
x = uint8(v.Uint()) // v.Uint returns a uint64.
// 反射对象的Kind方法描述的是基础类型,而不是静态类型。如果一个反射对象包含了用户定义类型的值,如下:
type MyInt int
var q MyInt = 7
// 虽然x的静态类型是MyInt而非int,但v的Kind依然是reflect.Int。Type可以区分开int和MyInt,但Kind无法做到。
ofV2 := reflect.ValueOf(q)
fmt.Println("v2,type:", ofV2.Type())
fmt.Println("v2,kind:", ofV2.Kind())
}
// 从反射对象到接口
//通过一个reflect.Value我们可以使用Interface方法恢复一个接口;这个方法将类型和值信息打包成一个接口并将其返回:
func TestReflectReturnInterface(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
y := v.Interface().(float64) // y will have type float64.
fmt.Println(y)
}
//Interface方法就是ValueOf函数的逆,除非ValueOf所得结果的类型是interface{}
func TestPrintInter(t *testing.T) {
var x float64 = 3.4
v := reflect.ValueOf(x)
// fmt.Println和fmt.Printf的参数都是interface{},传入之后由fmt的私有方法解包
//正是因为fmt把Interface方法的返回结果传递给了格式化打印事务(formatted print routine),所以程序才能正确打印出reflect.Value的内容:
fmt.Println(v)
// 由于值的类型是float64,我们可以用浮点格式化打印它:
// 无需对v.Interface()做类型断言,这个空接口值包含了具体的值的类型信息,Printf会恢复它
fmt.Printf("value is %7.1e\n", v.Interface())
f := v.Float()
fmt.Println(f)
}
type User struct {
ID int
Name string
}
// 定义一个Enum类型
type Enum int
const (
Zero Enum = 0
)
type cat struct {
}
// 使用 reflect.TypeOf() 函数可以获得任意值的类型对象(reflect.Type),通过类型对象可以访问任意值的类型信息
// Go 程序中的类型(Type)指的是系统原生数据类型,如 int、string、bool、float32 等类型,以及使用 type 关键字定义的类型,这些类型的名称就是其类型本身的名称
// TypeOf returns the reflection Type that represents the dynamic type of i.
// If i is a nil interface value, TypeOf returns nil.
// func TypeOf(i interface{}) Type {...}
func TestReflectBase2(t *testing.T) {
typeOfCat := reflect.TypeOf(cat{})
// 显示反射类型对象的名称和种类
fmt.Println(typeOfCat.String(), typeOfCat.Name(), typeOfCat.Kind())
// 获取Zero常量的反射类型对象
typeOfA := reflect.TypeOf(Zero)
// 显示反射类型对象的名称和种类
fmt.Println(typeOfA.Name(), typeOfA.Kind())
inf := new(Skills)
// Array, Chan, Map, Ptr, or Slice.
inf_type := reflect.TypeOf(inf).Elem() // 引用类型需要用Elem()获取指针所指的对象类型
stu1 := Student{Name: "wd", Age: 22}
stu_type := reflect.TypeOf(stu1)
fmt.Println(stu_type.String()) //main.Student
fmt.Println(stu_type.Name()) //Student
fmt.Println(stu_type.PkgPath()) //main
fmt.Println(stu_type.Kind()) //struct
fmt.Println(stu_type.Size()) //24
fmt.Println(inf_type.NumMethod()) //2
fmt.Println(inf_type.Method(0), inf_type.Method(0).Name) // {reading main func() <invalid Value> 0} reading
fmt.Println(inf_type.MethodByName("reading")) //{reading main func() <invalid Value> 0} true
}
// 对指针获取反射对象时,可以通过 reflect.Elem() 方法获取这个指针指向的元素类型,这个获取过程被称为取元素,等效于对指针类型变量做了一个*操作
func TestReflectPoint(t *testing.T) {
// 创建cat的实例并返回指针
ins := &cat{}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 显示反射类型对象的名称和种类(指针变量的类型名称和种类),go对所有指针变量的种类都是 Ptr
fmt.Printf("name:'%v' kind:'%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 注意对于:数组、切片、映射、通道、指针、接口 ,取指针类型的元素类型
typeOfCat = typeOfCat.Elem()
// 显示反射类型对象的名称和种类(指针变量指向元素的类型名称和种类)
fmt.Printf("element name: '%v', element kind: '%v'\n", typeOfCat.Name(), typeOfCat.Kind())
// 通过elem,但必须传递的是指针,修改值
var a int64 = 100
reflectSetValue2(&a)
fmt.Println("a:", a)
}
// 若要修改反射对象,值必须可设置
func TestReflectSetValueErr(t *testing.T) {
var x float64 = 3.4
// 传递了一份x的拷贝到reflect.ValueOf中,所以传到reflect.ValueOf的接口值不是由x,而是由x的拷贝创建的。
v := reflect.ValueOf(x)
// 问题在于7.1是不可寻址的,这意味着v就会变得不可设置。“可设置”(settability)是reflect.Value的特性之一,但并非所有的Value都是可设置的
// 对一个不可设置的Value调用的Set方法是错误的
// v.SetFloat(7.1) // Error: will panic.
// 反射对象的“可设置性”由它是否拥有原项目(orginal item)所决定。
fmt.Println("settability of v:", v.CanSet())
}
func TestReflectSetValueRight(t *testing.T) {
var x float64 = 3.4
// 如果我们想用反射修改x,我们必须把值的指针传给反射库
p := reflect.ValueOf(&x) // Note: take the address of x.
fmt.Println("type of p:", p.Type())
// 反射对象p不是可设置的
fmt.Println("settability of p:", p.CanSet())
// 想要设置的不是它,而是*p。 为了知道p指向了哪,我们调用Value的Elem方法,它通过指针定向并把结果保存在了一个Value中,命名为v:
v2 := p.Elem()
fmt.Println("settability of v2:", v2.CanSet())
v2.SetFloat(7.1)
fmt.Println(v2.Interface())
fmt.Println(x)
}
func TestReflectSetStructValueRight(t *testing.T) {
// 结构体中只有可导出的的字段是“可设置”的
type T struct {
A int
B string
}
q := T{23, "skidoo"}
s := reflect.ValueOf(&q).Elem()
typeOfT := s.Type()
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
fmt.Printf("%d: %s %s = %v\n", i,
typeOfT.Field(i).Name, f.Type(), f.Interface())
}
s.Field(0).SetInt(77)
s.Field(1).SetString("Sunset Strip")
fmt.Println("t is now", t)
}
func TestReflectSetValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("old value of pointer:", num)
// 通过reflect.ValueOf获取num中的reflect.Value,注意,参数必须是指针才能修改其值
pointer := reflect.ValueOf(&num)
newValue := pointer.Elem()
fmt.Println("type of pointer:", newValue.Type())
fmt.Println("settability of pointer:", newValue.CanSet())
// 重新赋值
newValue.SetFloat(77)
fmt.Println("new value of pointer:", num)
// 如果reflect.ValueOf的参数不是指针,会如何?
pointer = reflect.ValueOf(num)
fmt.Println("settability of pointer2:", pointer.CanSet())
//newValue = pointer.Elem() // 如果非指针,这里直接panic
}
func reflectSetValue2(x interface{}) {
v := reflect.ValueOf(x)
// 反射中使用 Elem()方法获取指针对应的值
if v.Elem().Kind() == reflect.Int64 {
v.Elem().SetInt(200)
}
}
func TestReflectStruct(t *testing.T) {
type cat struct {
Name string
// 以 ` 开始和结尾的字符串。这个字符串在Go语言中被称为 Tag(标签)。一般用于给字段添加自定义信息,方便其他模块根据信息进行不同功能的处理。
Type int `json:"type" id:"100"`
}
ins := cat{Name: "mimi", Type: 1}
// 获取结构体实例的反射类型对象
typeOfCat := reflect.TypeOf(ins)
// 获得一个结构体类型共有多少个字段。如果类型不是结构体,将会触发panic。
for i := 0; i < typeOfCat.NumField(); i++ {
// 获取每个成员的结构体字段类型,返回 StructField 结构,这个结构描述结构体的成员信息,通过这个信息可以获取成员与结构体的关系,如偏移、索引、是否为匿名字段、结构体标签(StructTag)等
fieldType := typeOfCat.Field(i)
fmt.Printf("name: %v tag: '%v'\n", fieldType.Name, fieldType.Tag)
}
if catType, ok := typeOfCat.FieldByName("Type"); ok {
fmt.Println(catType.Tag.Get("json"), catType.Tag.Get("id"))
}
}
func TestReflectType(t *testing.T) {
type myInt int64
var a *float32 // 指针
var b myInt // 自定义类型
var c rune // 类型别名//代表int32
reflectType(a) // type: kind:ptr
reflectType(b) // type:myInt kind:int64
reflectType(c) // type:int32 kind:int32
type person struct {
name string
age int
}
var d = person{
name: "wang",
age: 18,
}
reflectType(d) // type:person kind:struct
}
func reflectType(x interface{}) {
t := reflect.TypeOf(x)
fmt.Printf("type:%v kind:%v\n", t.Name(), t.Kind())
}
// ValueOf returns a new Value initialized to the concrete value
// stored in the interface i. ValueOf(nil) returns the zero
// func ValueOf(i interface{}) Value {...}
// reflect.ValueOf的返回值也是具体值,不过reflect.Value也可以包含一个接口值
func TestReflectValueBase(t *testing.T) {
var num float64 = 1.2345
//得到了一个类型为”relfect.Value”变量
pointer := reflect.ValueOf(&num)
value := reflect.ValueOf(num)
fmt.Println("value: ", value)
// 获得接口变量的真实内容
value.Interface()
// 注意的时候,转换的时候,如果转换的类型不完全符合,则直接panic
convertPointer := pointer.Interface().(*float64)
convertValue := value.Interface().(float64)
fmt.Println(convertPointer)
fmt.Println(convertValue)
}
func TestReflectGetAll(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
DoFiledAndMethod(user)
}
type User1 struct {
Id int
Name string
Age int
}
func (u User1) ReflectCallFunc() {
fmt.Println("Allen.Wu ReflectCallFunc")
}
// 反射字段需要对外
// 通过接口来获取任意参数,然后一一揭晓
func DoFiledAndMethod(input interface{}) {
getType := reflect.TypeOf(input)
fmt.Println("get Type is :", getType.Name())
getValue := reflect.ValueOf(input)
fmt.Println("get all Fields is:", getValue)
// 获取方法字段
// 1. 先获取interface的reflect.Type,然后通过NumField进行遍历
// 2. 再通过reflect.Type的Field获取其Field
for i := 0; i < getType.NumField(); i++ {
field := getType.Field(i)
// 取值
value := getValue.Field(i).Interface()
fmt.Printf("%s: %v = %v\n", field.Name, field.Type, value)
}
// 获取方法
// 1. 先获取interface的reflect.Type,然后通过.NumMethod进行遍历
for i := 0; i < getType.NumMethod(); i++ {
m := getType.Method(i)
fmt.Printf("%s: %v\n", m.Name, m.Type)
}
// 输出:ReflectCallFuncHasArgs: func(base.User1, string, int)
// 结构体的方法其实也是通过函数实现,把base.User1当参数了
}
func TestReflectValue(t *testing.T) {
var a float32 = 3.14
var b int64 = 100
reflectValue(a) // type is float32, value is 3.140000
reflectValue(b) // type is int64, value is 100
// 将int类型的原始值转换为reflect.Value类型
c := reflect.ValueOf(10)
fmt.Printf("type c :%T\n", c) // type c :reflect.Value
}
func reflectValue(x interface{}) {
v := reflect.ValueOf(x)
k := v.Kind()
switch k {
case reflect.Int64:
// v.Int()从反射中获取整型的原始值,然后通过int64()强制类型转换
fmt.Printf("type is int64, value is %d\n", int64(v.Int()))
case reflect.Float32:
// v.Float()从反射中获取浮点型的原始值,然后通过float32()强制类型转换
fmt.Printf("type is float32, value is %f\n", float32(v.Float()))
case reflect.Float64:
// v.Float()从反射中获取浮点型的原始值,然后通过float64()强制类型转换
fmt.Printf("type is float64, value is %f\n", float64(v.Float()))
}
}
// 反射可以将“接口类型变量”转换为“反射类型对象”,反射类型指的是reflect.Type和reflect.Value这两种
func TestReflectTypeAndValue(t *testing.T) {
var num float64 = 1.2345
fmt.Println("type: ", reflect.TypeOf(num))
fmt.Println("value: ", reflect.ValueOf(num))
}
func TestReflectMethodInfo(t *testing.T) {
of := reflect.TypeOf(TestIsNull)
in := of.In(0)
fmt.Println(in)
if in.Kind() == reflect.Ptr {
elem := in.Elem()
fmt.Println(elem.Name(), elem.Kind())
}
}
func TestReflectOptValue(t *testing.T) {
var a = 2
vof := reflect.ValueOf(a)
i := vof.Int()
fmt.Println("i:", i)
tof := reflect.TypeOf(a)
i2 := vof.Type()
// 一样
fmt.Println("type:", i2, tof)
// panic: reflect: reflect.flag.mustBeAssignable using unaddressable value [recovered]
// 报错,因为我们的a是一个值类型,而值类型的传递是拷贝了一个副本,当 vof := reflect.ValueOf(a) 函数通过传递一个 a 拷贝创建了 vof,
// 那么 vof 的改变并不能更改原始的 a。要想 vof 的更改能作用到 a,那就必须传递 a 的地址 v = reflect.ValueOf(&a)
// vof.SetInt(333)
vof2 := reflect.ValueOf(&a)
// 通过 Elem() 方法进行取地址
vof2.Elem().SetInt(333)
fmt.Println("i:", a)
}
func TestBaseReflect(t *testing.T) {
user := User{}
a := reflect.TypeOf(user)
if _, ok := a.FieldByName("Name"); ok {
println("存在")
} else {
println("不存在")
}
fmt. | e("ID")
fmt.Println(field, b)
user2 := &User{}
var user3 interface{}
user3 = user2
i := reflect.TypeOf(user3)
elem := i.Elem()
fmt.Println("elem:", elem.Name())
typeOf := reflect.TypeOf(test)
fmt.Println(typeOf)
name := runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()
fmt.Println("name:", name)
}
type Handler func(obj interface{}) error
func test(qqqq interface{}) error {
fmt.Println("1111")
return nil
}
func TestIsNull(t *testing.T) {
// 声明一个 *int 类型的指针,初始值为 nil。
var a *int
fmt.Println("var a *int:", reflect.ValueOf(a).IsNil())
// nil值
fmt.Println("nil:", reflect.ValueOf(nil).IsValid())
// (*int)(nil) 的含义是将 nil 转换为 *int,也就是*int 类型的空指针
fmt.Println("(*int)(nil):", reflect.ValueOf((*int)(nil)).Elem().IsValid())
// 实例化一个结构体
s := struct{}{}
// 尝试从结构体中查找一个不存在的字段
fmt.Println("不存在的结构体成员:", reflect.ValueOf(s).FieldByName("").IsValid())
// 尝试从结构体中查找一个不存在的方法
fmt.Println("不存在的结构体方法:", reflect.ValueOf(s).MethodByName("").IsValid())
// 实例化一个map,与 make 方式创建的 map 等效
m := map[int]int{}
// 尝试从map中查找一个不存在的键
fmt.Println("不存在的键:", reflect.ValueOf(m).MapIndex(reflect.ValueOf(3)).IsValid())
}
func TestReflectInvokeFun(t *testing.T) {
of := reflect.ValueOf(testFunc)
of.Call(getValues())
}
// 注意,又不符合则panic
// 如何通过反射来进行方法的调用?
// 本来可以用u.ReflectCallFuncXXX直接调用的,但是如果要通过反射,那么首先要将方法注册,也就是MethodByName,然后通过反射调动mv.Call
func TestReflectInvokeMethod(t *testing.T) {
user := User1{1, "Allen.Wu", 25}
// 带有参数的调用方法
args := getValues("wudebao", 30)
invokeMethod(user, "ReflectCallFuncHasArgs", args)
// 无参数的调用方法
args = getValues()
invokeMethod(user, "ReflectCallFuncNoArgs", args)
// 调用指针类型方法
invokeMethod(&user, "ReflectCallFuncNoArgs2", args)
}
func testFunc() {
fmt.Println("testFunc..")
}
//根据参数获取对应的Values
func getValues(param ...interface{}) []reflect.Value {
vals := make([]reflect.Value, 0, len(param))
for i := range param {
vals = append(vals, reflect.ValueOf(param[i]))
}
return vals
}
func invokeMethod(obj interface{}, funcInter string, paramsValue []reflect.Value) {
getValue := reflect.ValueOf(obj)
method := getValue.MethodByName(funcInter)
if method.Kind() != reflect.Func {
log.Fatal("funcInter is not func")
return
}
if method.Type().NumIn() > 0 {
in := method.Type().In(0)
fmt.Println("in:", in)
}
if method.Type().NumOut() > 0 {
out := method.Type().Out(0)
fmt.Println("out:", out)
}
values := method.Call(paramsValue) //方法调用并返回值
for i := range values {
fmt.Println(values[i])
}
}
func (u User1) ReflectCallFuncHasArgs(name string, age int) {
fmt.Println("ReflectCallFuncHasArgs name: ", name, ", age:", age, "and origal User.Name:", u.Name)
}
func (u User1) ReflectCallFuncNoArgs() {
fmt.Println("ReflectCallFuncNoArgs")
}
func (u *User1) ReflectCallFuncNoArgs2() {
fmt.Println("ReflectCallFuncNoArgs2")
}
// 空接口相当于一个容器,能接受任何东西。.ValueOf(
// reflect.Value.Elem(),返回一个 interface 或者 pointer 的值
// Elem returns the value that the interface v contains or that the pointer v points to. It panics if v's Kind is not Interface or Ptr. It returns the zero Value if v is nil.
// reflect.Type.Elem(),返回一个类型(如:Array,Map,Chan等)的元素的类型
// Elem returns a type's element type. It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
func TestReflectNew(t *testing.T) {
var a string
of := reflect.TypeOf(a)
// New returns a Value representing a pointer to a new zero value for the specified type. That is, the returned Value's Type is PtrTo(typ).
sptr := reflect.New(of)
fmt.Println("sptr:", sptr)
// 返回值类型:reflect.Value
sval := sptr.Elem()
ss := sval.Interface().(string)
fmt.Println("ss:", ss) // 空字符串
}
| Println(a.Name(), a.Kind(), a.PkgPath())
field, b := a.FieldByNam | conditional_block |
conn.go | package smtp
import (
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/textproto"
"regexp"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
)
// Number of errors we'll tolerate per connection before closing. Defaults to 3.
const errThreshold = 3
type Conn struct {
conn net.Conn
text *textproto.Conn
server *Server
helo string
// Number of errors witnessed on this connection
errCount int
session Session
locker sync.Mutex
binarymime bool
lineLimitReader *lineLimitReader
bdatPipe *io.PipeWriter
bdatStatus *statusCollector // used for BDAT on LMTP
dataResult chan error
bytesReceived int64 // counts total size of chunks when BDAT is used
fromReceived bool
recipients []string
didAuth bool
}
func newConn(c net.Conn, s *Server) *Conn {
sc := &Conn{
server: s,
conn: c,
}
sc.init()
return sc
}
func (c *Conn) init() {
c.lineLimitReader = &lineLimitReader{
R: c.conn,
LineLimit: c.server.MaxLineLength,
}
rwc := struct {
io.Reader
io.Writer
io.Closer
}{
Reader: c.lineLimitReader,
Writer: c.conn,
Closer: c.conn,
}
if c.server.Debug != nil {
rwc = struct {
io.Reader
io.Writer
io.Closer
}{
io.TeeReader(rwc.Reader, c.server.Debug),
io.MultiWriter(rwc.Writer, c.server.Debug),
rwc.Closer,
}
}
c.text = textproto.NewConn(rwc)
}
// Commands are dispatched to the appropriate handler functions.
func (c *Conn) handle(cmd string, arg string) {
// If panic happens during command handling - send 421 response
// and close connection.
defer func() {
if err := recover(); err != nil {
c.writeResponse(421, EnhancedCode{4, 0, 0}, "Internal server error")
c.Close()
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
}()
if cmd == "" {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Error: bad syntax")
return
}
cmd = strings.ToUpper(cmd)
switch cmd {
case "SEND", "SOML", "SAML", "EXPN", "HELP", "TURN":
// These commands are not implemented in any state
c.writeResponse(502, EnhancedCode{5, 5, 1}, fmt.Sprintf("%v command not implemented", cmd))
case "HELO", "EHLO", "LHLO":
lmtp := cmd == "LHLO"
enhanced := lmtp || cmd == "EHLO"
if c.server.LMTP && !lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is a LMTP server, use LHLO")
return
}
if !c.server.LMTP && lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is not a LMTP server")
return
}
c.handleGreet(enhanced, arg)
case "MAIL":
c.handleMail(arg)
case "RCPT":
c.handleRcpt(arg)
case "VRFY":
c.writeResponse(252, EnhancedCode{2, 5, 0}, "Cannot VRFY user, but will accept message")
case "NOOP":
c.writeResponse(250, EnhancedCode{2, 0, 0}, "I have sucessfully done nothing")
case "RSET": // Reset session
c.reset()
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Session reset")
case "BDAT":
c.handleBdat(arg)
case "DATA":
c.handleData(arg)
case "QUIT":
c.writeResponse(221, EnhancedCode{2, 0, 0}, "Bye")
c.Close()
case "AUTH":
if c.server.AuthDisabled {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Syntax error, AUTH command unrecognized")
} else {
c.handleAuth(arg)
}
case "STARTTLS":
c.handleStartTLS()
default:
msg := fmt.Sprintf("Syntax errors, %v command unrecognized", cmd)
c.protocolError(500, EnhancedCode{5, 5, 2}, msg)
}
}
func (c *Conn) Server() *Server {
return c.server
}
func (c *Conn) Session() Session {
c.locker.Lock()
defer c.locker.Unlock()
return c.session
}
func (c *Conn) setSession(session Session) {
c.locker.Lock()
defer c.locker.Unlock()
c.session = session
}
func (c *Conn) Close() error {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
if c.session != nil {
c.session.Logout()
c.session = nil
}
return c.conn.Close()
}
// TLSConnectionState returns the connection's TLS connection state.
// Zero values are returned if the connection doesn't use TLS.
func (c *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := c.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
func (c *Conn) Hostname() string {
return c.helo
}
func (c *Conn) Conn() net.Conn {
return c.conn
}
func (c *Conn) | () bool {
_, isTLS := c.TLSConnectionState()
return !c.server.AuthDisabled && (isTLS || c.server.AllowInsecureAuth)
}
// protocolError writes errors responses and closes the connection once too many
// have occurred.
func (c *Conn) protocolError(code int, ec EnhancedCode, msg string) {
c.writeResponse(code, ec, msg)
c.errCount++
if c.errCount > errThreshold {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "Too many errors. Quiting now")
c.Close()
}
}
// GREET state -> waiting for HELO
func (c *Conn) handleGreet(enhanced bool, arg string) {
domain, err := parseHelloArgument(arg)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Domain/address argument required for HELO")
return
}
c.helo = domain
sess, err := c.server.Backend.NewSession(c)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.setSession(sess)
if !enhanced {
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Hello %s", domain))
return
}
caps := []string{}
caps = append(caps, c.server.caps...)
if _, isTLS := c.TLSConnectionState(); c.server.TLSConfig != nil && !isTLS {
caps = append(caps, "STARTTLS")
}
if c.authAllowed() {
authCap := "AUTH"
for name := range c.server.auths {
authCap += " " + name
}
caps = append(caps, authCap)
}
if c.server.EnableSMTPUTF8 {
caps = append(caps, "SMTPUTF8")
}
if _, isTLS := c.TLSConnectionState(); isTLS && c.server.EnableREQUIRETLS {
caps = append(caps, "REQUIRETLS")
}
if c.server.EnableBINARYMIME {
caps = append(caps, "BINARYMIME")
}
if c.server.MaxMessageBytes > 0 {
caps = append(caps, fmt.Sprintf("SIZE %v", c.server.MaxMessageBytes))
} else {
caps = append(caps, "SIZE")
}
args := []string{"Hello " + domain}
args = append(args, caps...)
c.writeResponse(250, NoEnhancedCode, args...)
}
// READY state -> waiting for MAIL
func (c *Conn) handleMail(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "MAIL not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "FROM:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
from, err := p.parseReversePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
args, err := parseArgs(p.s)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse MAIL ESMTP parameters")
return
}
opts := &MailOptions{}
c.binarymime = false
// This is where the Conn may put BODY=8BITMIME, but we already
// read the DATA as bytes, so it does not effect our processing.
for key, value := range args {
switch key {
case "SIZE":
size, err := strconv.ParseUint(value, 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse SIZE as an integer")
return
}
if c.server.MaxMessageBytes > 0 && int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
return
}
opts.Size = int64(size)
case "SMTPUTF8":
if !c.server.EnableSMTPUTF8 {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "SMTPUTF8 is not implemented")
return
}
opts.UTF8 = true
case "REQUIRETLS":
if !c.server.EnableREQUIRETLS {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "REQUIRETLS is not implemented")
return
}
opts.RequireTLS = true
case "BODY":
switch value {
case "BINARYMIME":
if !c.server.EnableBINARYMIME {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "BINARYMIME is not implemented")
return
}
c.binarymime = true
case "7BIT", "8BITMIME":
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown BODY value")
return
}
opts.Body = BodyType(value)
case "AUTH":
value, err := decodeXtext(value)
if err != nil || value == "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter value")
return
}
if value == "<>" {
value = ""
} else {
p := parser{s: value}
value, err = p.parseMailbox()
if err != nil || p.s != "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter mailbox")
return
}
}
opts.Auth = &value
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown MAIL FROM argument")
return
}
}
if err := c.Session().Mail(from, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Roger, accepting mail from <%v>", from))
c.fromReceived = true
}
// This regexp matches 'hexchar' token defined in
// https://tools.ietf.org/html/rfc4954#section-8 however it is intentionally
// relaxed by requiring only '+' to be present. It allows us to detect
// malformed values such as +A or +HH and report them appropriately.
var hexcharRe = regexp.MustCompile(`\+[0-9A-F]?[0-9A-F]?`)
func decodeXtext(val string) (string, error) {
if !strings.Contains(val, "+") {
return val, nil
}
var replaceErr error
decoded := hexcharRe.ReplaceAllStringFunc(val, func(match string) string {
if len(match) != 3 {
replaceErr = errors.New("incomplete hexchar")
return ""
}
char, err := strconv.ParseInt(match, 16, 8)
if err != nil {
replaceErr = err
return ""
}
return string(rune(char))
})
if replaceErr != nil {
return "", replaceErr
}
return decoded, nil
}
func encodeXtext(raw string) string {
var out strings.Builder
out.Grow(len(raw))
for _, ch := range raw {
if ch == '+' || ch == '=' {
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
if ch > '!' && ch < '~' { // printable non-space US-ASCII
out.WriteRune(ch)
}
// Non-ASCII.
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
return out.String()
}
// MAIL state -> waiting for RCPTs followed by DATA
func (c *Conn) handleRcpt(arg string) {
if !c.fromReceived {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing MAIL FROM command.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "RCPT not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "TO:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
recipient, err := p.parsePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
if len(strings.Fields(p.s)) > 0 {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "RCPT parameters are not supported")
return
}
if c.server.MaxRecipients > 0 && len(c.recipients) >= c.server.MaxRecipients {
c.writeResponse(452, EnhancedCode{4, 5, 3}, fmt.Sprintf("Maximum limit of %v recipients reached", c.server.MaxRecipients))
return
}
opts := &RcptOptions{}
if err := c.Session().Rcpt(recipient, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.recipients = append(c.recipients, recipient)
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("I'll make sure <%v> gets this", recipient))
}
func (c *Conn) handleAuth(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.didAuth {
c.writeResponse(503, EnhancedCode{5, 5, 1}, "Already authenticated")
return
}
parts := strings.Fields(arg)
if len(parts) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 4}, "Missing parameter")
return
}
if _, isTLS := c.TLSConnectionState(); !isTLS && !c.server.AllowInsecureAuth {
c.writeResponse(523, EnhancedCode{5, 7, 10}, "TLS is required")
return
}
mechanism := strings.ToUpper(parts[0])
// Parse client initial response if there is one
var ir []byte
if len(parts) > 1 {
var err error
ir, err = base64.StdEncoding.DecodeString(parts[1])
if err != nil {
return
}
}
newSasl, ok := c.server.auths[mechanism]
if !ok {
c.writeResponse(504, EnhancedCode{5, 7, 4}, "Unsupported authentication mechanism")
return
}
sasl := newSasl(c)
response := ir
for {
challenge, done, err := sasl.Next(response)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(454, EnhancedCode{4, 7, 0}, err.Error())
return
}
if done {
break
}
encoded := ""
if len(challenge) > 0 {
encoded = base64.StdEncoding.EncodeToString(challenge)
}
c.writeResponse(334, NoEnhancedCode, encoded)
encoded, err = c.readLine()
if err != nil {
return // TODO: error handling
}
if encoded == "*" {
// https://tools.ietf.org/html/rfc4954#page-4
c.writeResponse(501, EnhancedCode{5, 0, 0}, "Negotiation cancelled")
return
}
response, err = base64.StdEncoding.DecodeString(encoded)
if err != nil {
c.writeResponse(454, EnhancedCode{4, 7, 0}, "Invalid base64 data")
return
}
}
c.writeResponse(235, EnhancedCode{2, 0, 0}, "Authentication succeeded")
c.didAuth = true
}
func (c *Conn) handleStartTLS() {
if _, isTLS := c.TLSConnectionState(); isTLS {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Already running in TLS")
return
}
if c.server.TLSConfig == nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "TLS not supported")
return
}
c.writeResponse(220, EnhancedCode{2, 0, 0}, "Ready to start TLS")
// Upgrade to TLS
tlsConn := tls.Server(c.conn, c.server.TLSConfig)
if err := tlsConn.Handshake(); err != nil {
c.writeResponse(550, EnhancedCode{5, 0, 0}, "Handshake error")
return
}
c.conn = tlsConn
c.init()
// Reset all state and close the previous Session.
// This is different from just calling reset() since we want the Backend to
// be able to see the information about TLS connection in the
// ConnectionState object passed to it.
if session := c.Session(); session != nil {
session.Logout()
c.setSession(nil)
}
c.helo = ""
c.didAuth = false
c.reset()
}
// DATA
func (c *Conn) handleData(arg string) {
if arg != "" {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "DATA command should not have any arguments")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed during message transfer")
return
}
if c.binarymime {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed for BINARYMIME messages")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
// We have recipients, go to accept data
c.writeResponse(354, NoEnhancedCode, "Go ahead. End your data with <CR><LF>.<CR><LF>")
defer c.reset()
if c.server.LMTP {
c.handleDataLMTP()
return
}
r := newDataReader(c)
code, enhancedCode, msg := toSMTPStatus(c.Session().Data(r))
r.limited = false
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
c.writeResponse(code, enhancedCode, msg)
}
func (c *Conn) handleBdat(arg string) {
args := strings.Fields(arg)
if len(args) == 0 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Missing chunk size argument")
return
}
if len(args) > 2 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Too many arguments")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
last := false
if len(args) == 2 {
if !strings.EqualFold(args[1], "LAST") {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unknown BDAT argument")
return
}
last = true
}
// ParseUint instead of Atoi so we will not accept negative values.
size, err := strconv.ParseUint(args[0], 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Malformed size argument")
return
}
if c.server.MaxMessageBytes != 0 && c.bytesReceived+int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
// Discard chunk itself without passing it to backend.
io.Copy(ioutil.Discard, io.LimitReader(c.text.R, int64(size)))
c.reset()
return
}
if c.bdatStatus == nil && c.server.LMTP {
c.bdatStatus = c.createStatusCollector()
}
if c.bdatPipe == nil {
var r *io.PipeReader
r, c.bdatPipe = io.Pipe()
c.dataResult = make(chan error, 1)
go func() {
defer func() {
if err := recover(); err != nil {
c.handlePanic(err, c.bdatStatus)
c.dataResult <- errPanic
r.CloseWithError(errPanic)
}
}()
var err error
if !c.server.LMTP {
err = c.Session().Data(r)
} else {
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
err = c.Session().Data(r)
for _, rcpt := range c.recipients {
c.bdatStatus.SetStatus(rcpt, err)
}
} else {
err = lmtpSession.LMTPData(r, c.bdatStatus)
}
}
c.dataResult <- err
r.CloseWithError(err)
}()
}
c.lineLimitReader.LineLimit = 0
chunk := io.LimitReader(c.text.R, int64(size))
_, err = io.Copy(c.bdatPipe, chunk)
if err != nil {
// Backend might return an error early using CloseWithError without consuming
// the whole chunk.
io.Copy(ioutil.Discard, chunk)
c.writeResponse(toSMTPStatus(err))
if err == errPanic {
c.Close()
}
c.reset()
c.lineLimitReader.LineLimit = c.server.MaxLineLength
return
}
c.bytesReceived += int64(size)
if last {
c.lineLimitReader.LineLimit = c.server.MaxLineLength
c.bdatPipe.Close()
err := <-c.dataResult
if c.server.LMTP {
c.bdatStatus.fillRemaining(err)
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-c.bdatStatus.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
} else {
c.writeResponse(toSMTPStatus(err))
}
if err == errPanic {
c.Close()
return
}
c.reset()
} else {
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Continue")
}
}
// ErrDataReset is returned by Reader pased to Data function if client does not
// send another BDAT command and instead closes connection or issues RSET command.
var ErrDataReset = errors.New("smtp: message transmission aborted")
var errPanic = &SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
}
func (c *Conn) handlePanic(err interface{}, status *statusCollector) {
if status != nil {
status.fillRemaining(errPanic)
}
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
func (c *Conn) createStatusCollector() *statusCollector {
rcptCounts := make(map[string]int, len(c.recipients))
status := &statusCollector{
statusMap: make(map[string]chan error, len(c.recipients)),
status: make([]chan error, 0, len(c.recipients)),
}
for _, rcpt := range c.recipients {
rcptCounts[rcpt]++
}
// Create channels with buffer sizes necessary to fit all
// statuses for a single recipient to avoid deadlocks.
for rcpt, count := range rcptCounts {
status.statusMap[rcpt] = make(chan error, count)
}
for _, rcpt := range c.recipients {
status.status = append(status.status, status.statusMap[rcpt])
}
return status
}
type statusCollector struct {
// Contains map from recipient to list of channels that are used for that
// recipient.
statusMap map[string]chan error
// Contains channels from statusMap, in the same
// order as Conn.recipients.
status []chan error
}
// fillRemaining sets status for all recipients SetStatus was not called for before.
func (s *statusCollector) fillRemaining(err error) {
// Amount of times certain recipient was specified is indicated by the channel
// buffer size, so once we fill it, we can be confident that we sent
// at least as much statuses as needed. Extra statuses will be ignored anyway.
chLoop:
for _, ch := range s.statusMap {
for {
select {
case ch <- err:
default:
continue chLoop
}
}
}
}
func (s *statusCollector) SetStatus(rcptTo string, err error) {
ch := s.statusMap[rcptTo]
if ch == nil {
panic("SetStatus is called for recipient that was not specified before")
}
select {
case ch <- err:
default:
// There enough buffer space to fit all statuses at once, if this is
// not the case - backend is doing something wrong.
panic("SetStatus is called more times than particular recipient was specified")
}
}
func (c *Conn) handleDataLMTP() {
r := newDataReader(c)
status := c.createStatusCollector()
done := make(chan bool, 1)
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
// Fallback to using a single status for all recipients.
err := c.Session().Data(r)
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
for _, rcpt := range c.recipients {
status.SetStatus(rcpt, err)
}
done <- true
} else {
go func() {
defer func() {
if err := recover(); err != nil {
status.fillRemaining(&SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
})
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
done <- false
}
}()
status.fillRemaining(lmtpSession.LMTPData(r, status))
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
done <- true
}()
}
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-status.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
// If done gets false, the panic occured in LMTPData and the connection
// should be closed.
if !<-done {
c.Close()
}
}
func toSMTPStatus(err error) (code int, enchCode EnhancedCode, msg string) {
if err != nil {
if smtperr, ok := err.(*SMTPError); ok {
return smtperr.Code, smtperr.EnhancedCode, smtperr.Message
} else {
return 554, EnhancedCode{5, 0, 0}, "Error: transaction failed, blame it on the weather: " + err.Error()
}
}
return 250, EnhancedCode{2, 0, 0}, "OK: queued"
}
func (c *Conn) Reject() {
c.writeResponse(421, EnhancedCode{4, 4, 5}, "Too busy. Try again later.")
c.Close()
}
func (c *Conn) greet() {
protocol := "ESMTP"
if c.server.LMTP {
protocol = "LMTP"
}
c.writeResponse(220, NoEnhancedCode, fmt.Sprintf("%v %s Service Ready", c.server.Domain, protocol))
}
func (c *Conn) writeResponse(code int, enhCode EnhancedCode, text ...string) {
// TODO: error handling
if c.server.WriteTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.server.WriteTimeout))
}
// All responses must include an enhanced code, if it is missing - use
// a generic code X.0.0.
if enhCode == EnhancedCodeNotSet {
cat := code / 100
switch cat {
case 2, 4, 5:
enhCode = EnhancedCode{cat, 0, 0}
default:
enhCode = NoEnhancedCode
}
}
for i := 0; i < len(text)-1; i++ {
c.text.PrintfLine("%d-%v", code, text[i])
}
if enhCode == NoEnhancedCode {
c.text.PrintfLine("%d %v", code, text[len(text)-1])
} else {
c.text.PrintfLine("%d %v.%v.%v %v", code, enhCode[0], enhCode[1], enhCode[2], text[len(text)-1])
}
}
// Reads a line of input
func (c *Conn) readLine() (string, error) {
if c.server.ReadTimeout != 0 {
if err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {
return "", err
}
}
return c.text.ReadLine()
}
func (c *Conn) reset() {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
c.bdatStatus = nil
c.bytesReceived = 0
if c.session != nil {
c.session.Reset()
}
c.fromReceived = false
c.recipients = nil
}
| authAllowed | identifier_name |
conn.go | package smtp
import (
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/textproto"
"regexp"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
)
// Number of errors we'll tolerate per connection before closing. Defaults to 3.
const errThreshold = 3
type Conn struct {
conn net.Conn
text *textproto.Conn
server *Server
helo string
// Number of errors witnessed on this connection
errCount int
session Session
locker sync.Mutex
binarymime bool
lineLimitReader *lineLimitReader
bdatPipe *io.PipeWriter
bdatStatus *statusCollector // used for BDAT on LMTP
dataResult chan error
bytesReceived int64 // counts total size of chunks when BDAT is used
fromReceived bool
recipients []string
didAuth bool
}
func newConn(c net.Conn, s *Server) *Conn {
sc := &Conn{
server: s,
conn: c,
}
sc.init()
return sc
}
func (c *Conn) init() {
c.lineLimitReader = &lineLimitReader{
R: c.conn,
LineLimit: c.server.MaxLineLength,
}
rwc := struct {
io.Reader
io.Writer
io.Closer
}{
Reader: c.lineLimitReader,
Writer: c.conn,
Closer: c.conn,
}
if c.server.Debug != nil {
rwc = struct {
io.Reader
io.Writer
io.Closer
}{
io.TeeReader(rwc.Reader, c.server.Debug),
io.MultiWriter(rwc.Writer, c.server.Debug),
rwc.Closer,
}
}
c.text = textproto.NewConn(rwc)
}
// Commands are dispatched to the appropriate handler functions.
func (c *Conn) handle(cmd string, arg string) {
// If panic happens during command handling - send 421 response
// and close connection.
defer func() {
if err := recover(); err != nil {
c.writeResponse(421, EnhancedCode{4, 0, 0}, "Internal server error")
c.Close()
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
}()
if cmd == "" {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Error: bad syntax")
return
}
cmd = strings.ToUpper(cmd)
switch cmd {
case "SEND", "SOML", "SAML", "EXPN", "HELP", "TURN":
// These commands are not implemented in any state
c.writeResponse(502, EnhancedCode{5, 5, 1}, fmt.Sprintf("%v command not implemented", cmd))
case "HELO", "EHLO", "LHLO":
lmtp := cmd == "LHLO"
enhanced := lmtp || cmd == "EHLO"
if c.server.LMTP && !lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is a LMTP server, use LHLO")
return
}
if !c.server.LMTP && lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is not a LMTP server")
return
}
c.handleGreet(enhanced, arg)
case "MAIL":
c.handleMail(arg)
case "RCPT":
c.handleRcpt(arg)
case "VRFY":
c.writeResponse(252, EnhancedCode{2, 5, 0}, "Cannot VRFY user, but will accept message")
case "NOOP":
c.writeResponse(250, EnhancedCode{2, 0, 0}, "I have sucessfully done nothing")
case "RSET": // Reset session
c.reset()
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Session reset")
case "BDAT":
c.handleBdat(arg)
case "DATA":
c.handleData(arg)
case "QUIT":
c.writeResponse(221, EnhancedCode{2, 0, 0}, "Bye")
c.Close()
case "AUTH":
if c.server.AuthDisabled {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Syntax error, AUTH command unrecognized")
} else {
c.handleAuth(arg)
}
case "STARTTLS":
c.handleStartTLS()
default:
msg := fmt.Sprintf("Syntax errors, %v command unrecognized", cmd)
c.protocolError(500, EnhancedCode{5, 5, 2}, msg)
}
}
func (c *Conn) Server() *Server {
return c.server
}
func (c *Conn) Session() Session {
c.locker.Lock()
defer c.locker.Unlock()
return c.session
}
func (c *Conn) setSession(session Session) {
c.locker.Lock()
defer c.locker.Unlock()
c.session = session
}
func (c *Conn) Close() error {
c.locker.Lock()
defer c.locker.Unlock()
| c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
if c.session != nil {
c.session.Logout()
c.session = nil
}
return c.conn.Close()
}
// TLSConnectionState returns the connection's TLS connection state.
// Zero values are returned if the connection doesn't use TLS.
func (c *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := c.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
func (c *Conn) Hostname() string {
return c.helo
}
func (c *Conn) Conn() net.Conn {
return c.conn
}
func (c *Conn) authAllowed() bool {
_, isTLS := c.TLSConnectionState()
return !c.server.AuthDisabled && (isTLS || c.server.AllowInsecureAuth)
}
// protocolError writes errors responses and closes the connection once too many
// have occurred.
func (c *Conn) protocolError(code int, ec EnhancedCode, msg string) {
c.writeResponse(code, ec, msg)
c.errCount++
if c.errCount > errThreshold {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "Too many errors. Quiting now")
c.Close()
}
}
// GREET state -> waiting for HELO
func (c *Conn) handleGreet(enhanced bool, arg string) {
domain, err := parseHelloArgument(arg)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Domain/address argument required for HELO")
return
}
c.helo = domain
sess, err := c.server.Backend.NewSession(c)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.setSession(sess)
if !enhanced {
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Hello %s", domain))
return
}
caps := []string{}
caps = append(caps, c.server.caps...)
if _, isTLS := c.TLSConnectionState(); c.server.TLSConfig != nil && !isTLS {
caps = append(caps, "STARTTLS")
}
if c.authAllowed() {
authCap := "AUTH"
for name := range c.server.auths {
authCap += " " + name
}
caps = append(caps, authCap)
}
if c.server.EnableSMTPUTF8 {
caps = append(caps, "SMTPUTF8")
}
if _, isTLS := c.TLSConnectionState(); isTLS && c.server.EnableREQUIRETLS {
caps = append(caps, "REQUIRETLS")
}
if c.server.EnableBINARYMIME {
caps = append(caps, "BINARYMIME")
}
if c.server.MaxMessageBytes > 0 {
caps = append(caps, fmt.Sprintf("SIZE %v", c.server.MaxMessageBytes))
} else {
caps = append(caps, "SIZE")
}
args := []string{"Hello " + domain}
args = append(args, caps...)
c.writeResponse(250, NoEnhancedCode, args...)
}
// READY state -> waiting for MAIL
func (c *Conn) handleMail(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "MAIL not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "FROM:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
from, err := p.parseReversePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
args, err := parseArgs(p.s)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse MAIL ESMTP parameters")
return
}
opts := &MailOptions{}
c.binarymime = false
// This is where the Conn may put BODY=8BITMIME, but we already
// read the DATA as bytes, so it does not effect our processing.
for key, value := range args {
switch key {
case "SIZE":
size, err := strconv.ParseUint(value, 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse SIZE as an integer")
return
}
if c.server.MaxMessageBytes > 0 && int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
return
}
opts.Size = int64(size)
case "SMTPUTF8":
if !c.server.EnableSMTPUTF8 {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "SMTPUTF8 is not implemented")
return
}
opts.UTF8 = true
case "REQUIRETLS":
if !c.server.EnableREQUIRETLS {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "REQUIRETLS is not implemented")
return
}
opts.RequireTLS = true
case "BODY":
switch value {
case "BINARYMIME":
if !c.server.EnableBINARYMIME {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "BINARYMIME is not implemented")
return
}
c.binarymime = true
case "7BIT", "8BITMIME":
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown BODY value")
return
}
opts.Body = BodyType(value)
case "AUTH":
value, err := decodeXtext(value)
if err != nil || value == "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter value")
return
}
if value == "<>" {
value = ""
} else {
p := parser{s: value}
value, err = p.parseMailbox()
if err != nil || p.s != "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter mailbox")
return
}
}
opts.Auth = &value
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown MAIL FROM argument")
return
}
}
if err := c.Session().Mail(from, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Roger, accepting mail from <%v>", from))
c.fromReceived = true
}
// This regexp matches 'hexchar' token defined in
// https://tools.ietf.org/html/rfc4954#section-8 however it is intentionally
// relaxed by requiring only '+' to be present. It allows us to detect
// malformed values such as +A or +HH and report them appropriately.
var hexcharRe = regexp.MustCompile(`\+[0-9A-F]?[0-9A-F]?`)
func decodeXtext(val string) (string, error) {
if !strings.Contains(val, "+") {
return val, nil
}
var replaceErr error
decoded := hexcharRe.ReplaceAllStringFunc(val, func(match string) string {
if len(match) != 3 {
replaceErr = errors.New("incomplete hexchar")
return ""
}
char, err := strconv.ParseInt(match, 16, 8)
if err != nil {
replaceErr = err
return ""
}
return string(rune(char))
})
if replaceErr != nil {
return "", replaceErr
}
return decoded, nil
}
func encodeXtext(raw string) string {
var out strings.Builder
out.Grow(len(raw))
for _, ch := range raw {
if ch == '+' || ch == '=' {
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
if ch > '!' && ch < '~' { // printable non-space US-ASCII
out.WriteRune(ch)
}
// Non-ASCII.
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
return out.String()
}
// MAIL state -> waiting for RCPTs followed by DATA
func (c *Conn) handleRcpt(arg string) {
if !c.fromReceived {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing MAIL FROM command.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "RCPT not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "TO:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
recipient, err := p.parsePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
if len(strings.Fields(p.s)) > 0 {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "RCPT parameters are not supported")
return
}
if c.server.MaxRecipients > 0 && len(c.recipients) >= c.server.MaxRecipients {
c.writeResponse(452, EnhancedCode{4, 5, 3}, fmt.Sprintf("Maximum limit of %v recipients reached", c.server.MaxRecipients))
return
}
opts := &RcptOptions{}
if err := c.Session().Rcpt(recipient, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.recipients = append(c.recipients, recipient)
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("I'll make sure <%v> gets this", recipient))
}
func (c *Conn) handleAuth(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.didAuth {
c.writeResponse(503, EnhancedCode{5, 5, 1}, "Already authenticated")
return
}
parts := strings.Fields(arg)
if len(parts) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 4}, "Missing parameter")
return
}
if _, isTLS := c.TLSConnectionState(); !isTLS && !c.server.AllowInsecureAuth {
c.writeResponse(523, EnhancedCode{5, 7, 10}, "TLS is required")
return
}
mechanism := strings.ToUpper(parts[0])
// Parse client initial response if there is one
var ir []byte
if len(parts) > 1 {
var err error
ir, err = base64.StdEncoding.DecodeString(parts[1])
if err != nil {
return
}
}
newSasl, ok := c.server.auths[mechanism]
if !ok {
c.writeResponse(504, EnhancedCode{5, 7, 4}, "Unsupported authentication mechanism")
return
}
sasl := newSasl(c)
response := ir
for {
challenge, done, err := sasl.Next(response)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(454, EnhancedCode{4, 7, 0}, err.Error())
return
}
if done {
break
}
encoded := ""
if len(challenge) > 0 {
encoded = base64.StdEncoding.EncodeToString(challenge)
}
c.writeResponse(334, NoEnhancedCode, encoded)
encoded, err = c.readLine()
if err != nil {
return // TODO: error handling
}
if encoded == "*" {
// https://tools.ietf.org/html/rfc4954#page-4
c.writeResponse(501, EnhancedCode{5, 0, 0}, "Negotiation cancelled")
return
}
response, err = base64.StdEncoding.DecodeString(encoded)
if err != nil {
c.writeResponse(454, EnhancedCode{4, 7, 0}, "Invalid base64 data")
return
}
}
c.writeResponse(235, EnhancedCode{2, 0, 0}, "Authentication succeeded")
c.didAuth = true
}
func (c *Conn) handleStartTLS() {
if _, isTLS := c.TLSConnectionState(); isTLS {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Already running in TLS")
return
}
if c.server.TLSConfig == nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "TLS not supported")
return
}
c.writeResponse(220, EnhancedCode{2, 0, 0}, "Ready to start TLS")
// Upgrade to TLS
tlsConn := tls.Server(c.conn, c.server.TLSConfig)
if err := tlsConn.Handshake(); err != nil {
c.writeResponse(550, EnhancedCode{5, 0, 0}, "Handshake error")
return
}
c.conn = tlsConn
c.init()
// Reset all state and close the previous Session.
// This is different from just calling reset() since we want the Backend to
// be able to see the information about TLS connection in the
// ConnectionState object passed to it.
if session := c.Session(); session != nil {
session.Logout()
c.setSession(nil)
}
c.helo = ""
c.didAuth = false
c.reset()
}
// DATA
func (c *Conn) handleData(arg string) {
if arg != "" {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "DATA command should not have any arguments")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed during message transfer")
return
}
if c.binarymime {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed for BINARYMIME messages")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
// We have recipients, go to accept data
c.writeResponse(354, NoEnhancedCode, "Go ahead. End your data with <CR><LF>.<CR><LF>")
defer c.reset()
if c.server.LMTP {
c.handleDataLMTP()
return
}
r := newDataReader(c)
code, enhancedCode, msg := toSMTPStatus(c.Session().Data(r))
r.limited = false
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
c.writeResponse(code, enhancedCode, msg)
}
func (c *Conn) handleBdat(arg string) {
args := strings.Fields(arg)
if len(args) == 0 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Missing chunk size argument")
return
}
if len(args) > 2 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Too many arguments")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
last := false
if len(args) == 2 {
if !strings.EqualFold(args[1], "LAST") {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unknown BDAT argument")
return
}
last = true
}
// ParseUint instead of Atoi so we will not accept negative values.
size, err := strconv.ParseUint(args[0], 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Malformed size argument")
return
}
if c.server.MaxMessageBytes != 0 && c.bytesReceived+int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
// Discard chunk itself without passing it to backend.
io.Copy(ioutil.Discard, io.LimitReader(c.text.R, int64(size)))
c.reset()
return
}
if c.bdatStatus == nil && c.server.LMTP {
c.bdatStatus = c.createStatusCollector()
}
if c.bdatPipe == nil {
var r *io.PipeReader
r, c.bdatPipe = io.Pipe()
c.dataResult = make(chan error, 1)
go func() {
defer func() {
if err := recover(); err != nil {
c.handlePanic(err, c.bdatStatus)
c.dataResult <- errPanic
r.CloseWithError(errPanic)
}
}()
var err error
if !c.server.LMTP {
err = c.Session().Data(r)
} else {
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
err = c.Session().Data(r)
for _, rcpt := range c.recipients {
c.bdatStatus.SetStatus(rcpt, err)
}
} else {
err = lmtpSession.LMTPData(r, c.bdatStatus)
}
}
c.dataResult <- err
r.CloseWithError(err)
}()
}
c.lineLimitReader.LineLimit = 0
chunk := io.LimitReader(c.text.R, int64(size))
_, err = io.Copy(c.bdatPipe, chunk)
if err != nil {
// Backend might return an error early using CloseWithError without consuming
// the whole chunk.
io.Copy(ioutil.Discard, chunk)
c.writeResponse(toSMTPStatus(err))
if err == errPanic {
c.Close()
}
c.reset()
c.lineLimitReader.LineLimit = c.server.MaxLineLength
return
}
c.bytesReceived += int64(size)
if last {
c.lineLimitReader.LineLimit = c.server.MaxLineLength
c.bdatPipe.Close()
err := <-c.dataResult
if c.server.LMTP {
c.bdatStatus.fillRemaining(err)
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-c.bdatStatus.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
} else {
c.writeResponse(toSMTPStatus(err))
}
if err == errPanic {
c.Close()
return
}
c.reset()
} else {
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Continue")
}
}
// ErrDataReset is returned by Reader pased to Data function if client does not
// send another BDAT command and instead closes connection or issues RSET command.
var ErrDataReset = errors.New("smtp: message transmission aborted")
var errPanic = &SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
}
func (c *Conn) handlePanic(err interface{}, status *statusCollector) {
if status != nil {
status.fillRemaining(errPanic)
}
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
func (c *Conn) createStatusCollector() *statusCollector {
rcptCounts := make(map[string]int, len(c.recipients))
status := &statusCollector{
statusMap: make(map[string]chan error, len(c.recipients)),
status: make([]chan error, 0, len(c.recipients)),
}
for _, rcpt := range c.recipients {
rcptCounts[rcpt]++
}
// Create channels with buffer sizes necessary to fit all
// statuses for a single recipient to avoid deadlocks.
for rcpt, count := range rcptCounts {
status.statusMap[rcpt] = make(chan error, count)
}
for _, rcpt := range c.recipients {
status.status = append(status.status, status.statusMap[rcpt])
}
return status
}
type statusCollector struct {
// Contains map from recipient to list of channels that are used for that
// recipient.
statusMap map[string]chan error
// Contains channels from statusMap, in the same
// order as Conn.recipients.
status []chan error
}
// fillRemaining sets status for all recipients SetStatus was not called for before.
func (s *statusCollector) fillRemaining(err error) {
// Amount of times certain recipient was specified is indicated by the channel
// buffer size, so once we fill it, we can be confident that we sent
// at least as much statuses as needed. Extra statuses will be ignored anyway.
chLoop:
for _, ch := range s.statusMap {
for {
select {
case ch <- err:
default:
continue chLoop
}
}
}
}
func (s *statusCollector) SetStatus(rcptTo string, err error) {
ch := s.statusMap[rcptTo]
if ch == nil {
panic("SetStatus is called for recipient that was not specified before")
}
select {
case ch <- err:
default:
// There enough buffer space to fit all statuses at once, if this is
// not the case - backend is doing something wrong.
panic("SetStatus is called more times than particular recipient was specified")
}
}
func (c *Conn) handleDataLMTP() {
r := newDataReader(c)
status := c.createStatusCollector()
done := make(chan bool, 1)
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
// Fallback to using a single status for all recipients.
err := c.Session().Data(r)
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
for _, rcpt := range c.recipients {
status.SetStatus(rcpt, err)
}
done <- true
} else {
go func() {
defer func() {
if err := recover(); err != nil {
status.fillRemaining(&SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
})
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
done <- false
}
}()
status.fillRemaining(lmtpSession.LMTPData(r, status))
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
done <- true
}()
}
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-status.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
// If done gets false, the panic occured in LMTPData and the connection
// should be closed.
if !<-done {
c.Close()
}
}
func toSMTPStatus(err error) (code int, enchCode EnhancedCode, msg string) {
if err != nil {
if smtperr, ok := err.(*SMTPError); ok {
return smtperr.Code, smtperr.EnhancedCode, smtperr.Message
} else {
return 554, EnhancedCode{5, 0, 0}, "Error: transaction failed, blame it on the weather: " + err.Error()
}
}
return 250, EnhancedCode{2, 0, 0}, "OK: queued"
}
func (c *Conn) Reject() {
c.writeResponse(421, EnhancedCode{4, 4, 5}, "Too busy. Try again later.")
c.Close()
}
func (c *Conn) greet() {
protocol := "ESMTP"
if c.server.LMTP {
protocol = "LMTP"
}
c.writeResponse(220, NoEnhancedCode, fmt.Sprintf("%v %s Service Ready", c.server.Domain, protocol))
}
func (c *Conn) writeResponse(code int, enhCode EnhancedCode, text ...string) {
// TODO: error handling
if c.server.WriteTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.server.WriteTimeout))
}
// All responses must include an enhanced code, if it is missing - use
// a generic code X.0.0.
if enhCode == EnhancedCodeNotSet {
cat := code / 100
switch cat {
case 2, 4, 5:
enhCode = EnhancedCode{cat, 0, 0}
default:
enhCode = NoEnhancedCode
}
}
for i := 0; i < len(text)-1; i++ {
c.text.PrintfLine("%d-%v", code, text[i])
}
if enhCode == NoEnhancedCode {
c.text.PrintfLine("%d %v", code, text[len(text)-1])
} else {
c.text.PrintfLine("%d %v.%v.%v %v", code, enhCode[0], enhCode[1], enhCode[2], text[len(text)-1])
}
}
// Reads a line of input
func (c *Conn) readLine() (string, error) {
if c.server.ReadTimeout != 0 {
if err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {
return "", err
}
}
return c.text.ReadLine()
}
func (c *Conn) reset() {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
c.bdatStatus = nil
c.bytesReceived = 0
if c.session != nil {
c.session.Reset()
}
c.fromReceived = false
c.recipients = nil
} | if c.bdatPipe != nil { | random_line_split |
conn.go | package smtp
import (
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/textproto"
"regexp"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
)
// Number of errors we'll tolerate per connection before closing. Defaults to 3.
const errThreshold = 3
type Conn struct {
conn net.Conn
text *textproto.Conn
server *Server
helo string
// Number of errors witnessed on this connection
errCount int
session Session
locker sync.Mutex
binarymime bool
lineLimitReader *lineLimitReader
bdatPipe *io.PipeWriter
bdatStatus *statusCollector // used for BDAT on LMTP
dataResult chan error
bytesReceived int64 // counts total size of chunks when BDAT is used
fromReceived bool
recipients []string
didAuth bool
}
func newConn(c net.Conn, s *Server) *Conn {
sc := &Conn{
server: s,
conn: c,
}
sc.init()
return sc
}
func (c *Conn) init() {
c.lineLimitReader = &lineLimitReader{
R: c.conn,
LineLimit: c.server.MaxLineLength,
}
rwc := struct {
io.Reader
io.Writer
io.Closer
}{
Reader: c.lineLimitReader,
Writer: c.conn,
Closer: c.conn,
}
if c.server.Debug != nil {
rwc = struct {
io.Reader
io.Writer
io.Closer
}{
io.TeeReader(rwc.Reader, c.server.Debug),
io.MultiWriter(rwc.Writer, c.server.Debug),
rwc.Closer,
}
}
c.text = textproto.NewConn(rwc)
}
// Commands are dispatched to the appropriate handler functions.
func (c *Conn) handle(cmd string, arg string) {
// If panic happens during command handling - send 421 response
// and close connection.
defer func() {
if err := recover(); err != nil {
c.writeResponse(421, EnhancedCode{4, 0, 0}, "Internal server error")
c.Close()
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
}()
if cmd == "" {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Error: bad syntax")
return
}
cmd = strings.ToUpper(cmd)
switch cmd {
case "SEND", "SOML", "SAML", "EXPN", "HELP", "TURN":
// These commands are not implemented in any state
c.writeResponse(502, EnhancedCode{5, 5, 1}, fmt.Sprintf("%v command not implemented", cmd))
case "HELO", "EHLO", "LHLO":
lmtp := cmd == "LHLO"
enhanced := lmtp || cmd == "EHLO"
if c.server.LMTP && !lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is a LMTP server, use LHLO")
return
}
if !c.server.LMTP && lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is not a LMTP server")
return
}
c.handleGreet(enhanced, arg)
case "MAIL":
c.handleMail(arg)
case "RCPT":
c.handleRcpt(arg)
case "VRFY":
c.writeResponse(252, EnhancedCode{2, 5, 0}, "Cannot VRFY user, but will accept message")
case "NOOP":
c.writeResponse(250, EnhancedCode{2, 0, 0}, "I have sucessfully done nothing")
case "RSET": // Reset session
c.reset()
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Session reset")
case "BDAT":
c.handleBdat(arg)
case "DATA":
c.handleData(arg)
case "QUIT":
c.writeResponse(221, EnhancedCode{2, 0, 0}, "Bye")
c.Close()
case "AUTH":
if c.server.AuthDisabled {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Syntax error, AUTH command unrecognized")
} else {
c.handleAuth(arg)
}
case "STARTTLS":
c.handleStartTLS()
default:
msg := fmt.Sprintf("Syntax errors, %v command unrecognized", cmd)
c.protocolError(500, EnhancedCode{5, 5, 2}, msg)
}
}
func (c *Conn) Server() *Server {
return c.server
}
func (c *Conn) Session() Session {
c.locker.Lock()
defer c.locker.Unlock()
return c.session
}
func (c *Conn) setSession(session Session) {
c.locker.Lock()
defer c.locker.Unlock()
c.session = session
}
func (c *Conn) Close() error {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
if c.session != nil {
c.session.Logout()
c.session = nil
}
return c.conn.Close()
}
// TLSConnectionState returns the connection's TLS connection state.
// Zero values are returned if the connection doesn't use TLS.
func (c *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := c.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
func (c *Conn) Hostname() string {
return c.helo
}
func (c *Conn) Conn() net.Conn {
return c.conn
}
func (c *Conn) authAllowed() bool {
_, isTLS := c.TLSConnectionState()
return !c.server.AuthDisabled && (isTLS || c.server.AllowInsecureAuth)
}
// protocolError writes errors responses and closes the connection once too many
// have occurred.
func (c *Conn) protocolError(code int, ec EnhancedCode, msg string) {
c.writeResponse(code, ec, msg)
c.errCount++
if c.errCount > errThreshold {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "Too many errors. Quiting now")
c.Close()
}
}
// GREET state -> waiting for HELO
func (c *Conn) handleGreet(enhanced bool, arg string) {
domain, err := parseHelloArgument(arg)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Domain/address argument required for HELO")
return
}
c.helo = domain
sess, err := c.server.Backend.NewSession(c)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.setSession(sess)
if !enhanced {
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Hello %s", domain))
return
}
caps := []string{}
caps = append(caps, c.server.caps...)
if _, isTLS := c.TLSConnectionState(); c.server.TLSConfig != nil && !isTLS {
caps = append(caps, "STARTTLS")
}
if c.authAllowed() {
authCap := "AUTH"
for name := range c.server.auths {
authCap += " " + name
}
caps = append(caps, authCap)
}
if c.server.EnableSMTPUTF8 {
caps = append(caps, "SMTPUTF8")
}
if _, isTLS := c.TLSConnectionState(); isTLS && c.server.EnableREQUIRETLS {
caps = append(caps, "REQUIRETLS")
}
if c.server.EnableBINARYMIME {
caps = append(caps, "BINARYMIME")
}
if c.server.MaxMessageBytes > 0 {
caps = append(caps, fmt.Sprintf("SIZE %v", c.server.MaxMessageBytes))
} else {
caps = append(caps, "SIZE")
}
args := []string{"Hello " + domain}
args = append(args, caps...)
c.writeResponse(250, NoEnhancedCode, args...)
}
// READY state -> waiting for MAIL
func (c *Conn) handleMail(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "MAIL not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "FROM:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
from, err := p.parseReversePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
args, err := parseArgs(p.s)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse MAIL ESMTP parameters")
return
}
opts := &MailOptions{}
c.binarymime = false
// This is where the Conn may put BODY=8BITMIME, but we already
// read the DATA as bytes, so it does not effect our processing.
for key, value := range args {
switch key {
case "SIZE":
size, err := strconv.ParseUint(value, 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse SIZE as an integer")
return
}
if c.server.MaxMessageBytes > 0 && int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
return
}
opts.Size = int64(size)
case "SMTPUTF8":
if !c.server.EnableSMTPUTF8 {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "SMTPUTF8 is not implemented")
return
}
opts.UTF8 = true
case "REQUIRETLS":
if !c.server.EnableREQUIRETLS {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "REQUIRETLS is not implemented")
return
}
opts.RequireTLS = true
case "BODY":
switch value {
case "BINARYMIME":
if !c.server.EnableBINARYMIME {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "BINARYMIME is not implemented")
return
}
c.binarymime = true
case "7BIT", "8BITMIME":
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown BODY value")
return
}
opts.Body = BodyType(value)
case "AUTH":
value, err := decodeXtext(value)
if err != nil || value == "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter value")
return
}
if value == "<>" {
value = ""
} else {
p := parser{s: value}
value, err = p.parseMailbox()
if err != nil || p.s != "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter mailbox")
return
}
}
opts.Auth = &value
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown MAIL FROM argument")
return
}
}
if err := c.Session().Mail(from, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Roger, accepting mail from <%v>", from))
c.fromReceived = true
}
// This regexp matches 'hexchar' token defined in
// https://tools.ietf.org/html/rfc4954#section-8 however it is intentionally
// relaxed by requiring only '+' to be present. It allows us to detect
// malformed values such as +A or +HH and report them appropriately.
var hexcharRe = regexp.MustCompile(`\+[0-9A-F]?[0-9A-F]?`)
func decodeXtext(val string) (string, error) {
if !strings.Contains(val, "+") {
return val, nil
}
var replaceErr error
decoded := hexcharRe.ReplaceAllStringFunc(val, func(match string) string {
if len(match) != 3 {
replaceErr = errors.New("incomplete hexchar")
return ""
}
char, err := strconv.ParseInt(match, 16, 8)
if err != nil {
replaceErr = err
return ""
}
return string(rune(char))
})
if replaceErr != nil {
return "", replaceErr
}
return decoded, nil
}
func encodeXtext(raw string) string {
var out strings.Builder
out.Grow(len(raw))
for _, ch := range raw {
if ch == '+' || ch == '=' {
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
if ch > '!' && ch < '~' { // printable non-space US-ASCII
out.WriteRune(ch)
}
// Non-ASCII.
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
return out.String()
}
// MAIL state -> waiting for RCPTs followed by DATA
func (c *Conn) handleRcpt(arg string) {
if !c.fromReceived {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing MAIL FROM command.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "RCPT not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "TO:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
recipient, err := p.parsePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
if len(strings.Fields(p.s)) > 0 {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "RCPT parameters are not supported")
return
}
if c.server.MaxRecipients > 0 && len(c.recipients) >= c.server.MaxRecipients {
c.writeResponse(452, EnhancedCode{4, 5, 3}, fmt.Sprintf("Maximum limit of %v recipients reached", c.server.MaxRecipients))
return
}
opts := &RcptOptions{}
if err := c.Session().Rcpt(recipient, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.recipients = append(c.recipients, recipient)
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("I'll make sure <%v> gets this", recipient))
}
func (c *Conn) handleAuth(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.didAuth {
c.writeResponse(503, EnhancedCode{5, 5, 1}, "Already authenticated")
return
}
parts := strings.Fields(arg)
if len(parts) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 4}, "Missing parameter")
return
}
if _, isTLS := c.TLSConnectionState(); !isTLS && !c.server.AllowInsecureAuth {
c.writeResponse(523, EnhancedCode{5, 7, 10}, "TLS is required")
return
}
mechanism := strings.ToUpper(parts[0])
// Parse client initial response if there is one
var ir []byte
if len(parts) > 1 {
var err error
ir, err = base64.StdEncoding.DecodeString(parts[1])
if err != nil {
return
}
}
newSasl, ok := c.server.auths[mechanism]
if !ok {
c.writeResponse(504, EnhancedCode{5, 7, 4}, "Unsupported authentication mechanism")
return
}
sasl := newSasl(c)
response := ir
for {
challenge, done, err := sasl.Next(response)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(454, EnhancedCode{4, 7, 0}, err.Error())
return
}
if done {
break
}
encoded := ""
if len(challenge) > 0 {
encoded = base64.StdEncoding.EncodeToString(challenge)
}
c.writeResponse(334, NoEnhancedCode, encoded)
encoded, err = c.readLine()
if err != nil {
return // TODO: error handling
}
if encoded == "*" {
// https://tools.ietf.org/html/rfc4954#page-4
c.writeResponse(501, EnhancedCode{5, 0, 0}, "Negotiation cancelled")
return
}
response, err = base64.StdEncoding.DecodeString(encoded)
if err != nil {
c.writeResponse(454, EnhancedCode{4, 7, 0}, "Invalid base64 data")
return
}
}
c.writeResponse(235, EnhancedCode{2, 0, 0}, "Authentication succeeded")
c.didAuth = true
}
func (c *Conn) handleStartTLS() {
if _, isTLS := c.TLSConnectionState(); isTLS {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Already running in TLS")
return
}
if c.server.TLSConfig == nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "TLS not supported")
return
}
c.writeResponse(220, EnhancedCode{2, 0, 0}, "Ready to start TLS")
// Upgrade to TLS
tlsConn := tls.Server(c.conn, c.server.TLSConfig)
if err := tlsConn.Handshake(); err != nil {
c.writeResponse(550, EnhancedCode{5, 0, 0}, "Handshake error")
return
}
c.conn = tlsConn
c.init()
// Reset all state and close the previous Session.
// This is different from just calling reset() since we want the Backend to
// be able to see the information about TLS connection in the
// ConnectionState object passed to it.
if session := c.Session(); session != nil {
session.Logout()
c.setSession(nil)
}
c.helo = ""
c.didAuth = false
c.reset()
}
// DATA
func (c *Conn) handleData(arg string) {
if arg != "" {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "DATA command should not have any arguments")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed during message transfer")
return
}
if c.binarymime {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed for BINARYMIME messages")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
// We have recipients, go to accept data
c.writeResponse(354, NoEnhancedCode, "Go ahead. End your data with <CR><LF>.<CR><LF>")
defer c.reset()
if c.server.LMTP {
c.handleDataLMTP()
return
}
r := newDataReader(c)
code, enhancedCode, msg := toSMTPStatus(c.Session().Data(r))
r.limited = false
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
c.writeResponse(code, enhancedCode, msg)
}
func (c *Conn) handleBdat(arg string) |
// ErrDataReset is returned by Reader pased to Data function if client does not
// send another BDAT command and instead closes connection or issues RSET command.
var ErrDataReset = errors.New("smtp: message transmission aborted")
var errPanic = &SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
}
func (c *Conn) handlePanic(err interface{}, status *statusCollector) {
if status != nil {
status.fillRemaining(errPanic)
}
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
func (c *Conn) createStatusCollector() *statusCollector {
rcptCounts := make(map[string]int, len(c.recipients))
status := &statusCollector{
statusMap: make(map[string]chan error, len(c.recipients)),
status: make([]chan error, 0, len(c.recipients)),
}
for _, rcpt := range c.recipients {
rcptCounts[rcpt]++
}
// Create channels with buffer sizes necessary to fit all
// statuses for a single recipient to avoid deadlocks.
for rcpt, count := range rcptCounts {
status.statusMap[rcpt] = make(chan error, count)
}
for _, rcpt := range c.recipients {
status.status = append(status.status, status.statusMap[rcpt])
}
return status
}
type statusCollector struct {
// Contains map from recipient to list of channels that are used for that
// recipient.
statusMap map[string]chan error
// Contains channels from statusMap, in the same
// order as Conn.recipients.
status []chan error
}
// fillRemaining sets status for all recipients SetStatus was not called for before.
func (s *statusCollector) fillRemaining(err error) {
// Amount of times certain recipient was specified is indicated by the channel
// buffer size, so once we fill it, we can be confident that we sent
// at least as much statuses as needed. Extra statuses will be ignored anyway.
chLoop:
for _, ch := range s.statusMap {
for {
select {
case ch <- err:
default:
continue chLoop
}
}
}
}
func (s *statusCollector) SetStatus(rcptTo string, err error) {
ch := s.statusMap[rcptTo]
if ch == nil {
panic("SetStatus is called for recipient that was not specified before")
}
select {
case ch <- err:
default:
// There enough buffer space to fit all statuses at once, if this is
// not the case - backend is doing something wrong.
panic("SetStatus is called more times than particular recipient was specified")
}
}
func (c *Conn) handleDataLMTP() {
r := newDataReader(c)
status := c.createStatusCollector()
done := make(chan bool, 1)
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
// Fallback to using a single status for all recipients.
err := c.Session().Data(r)
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
for _, rcpt := range c.recipients {
status.SetStatus(rcpt, err)
}
done <- true
} else {
go func() {
defer func() {
if err := recover(); err != nil {
status.fillRemaining(&SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
})
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
done <- false
}
}()
status.fillRemaining(lmtpSession.LMTPData(r, status))
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
done <- true
}()
}
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-status.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
// If done gets false, the panic occured in LMTPData and the connection
// should be closed.
if !<-done {
c.Close()
}
}
func toSMTPStatus(err error) (code int, enchCode EnhancedCode, msg string) {
if err != nil {
if smtperr, ok := err.(*SMTPError); ok {
return smtperr.Code, smtperr.EnhancedCode, smtperr.Message
} else {
return 554, EnhancedCode{5, 0, 0}, "Error: transaction failed, blame it on the weather: " + err.Error()
}
}
return 250, EnhancedCode{2, 0, 0}, "OK: queued"
}
func (c *Conn) Reject() {
c.writeResponse(421, EnhancedCode{4, 4, 5}, "Too busy. Try again later.")
c.Close()
}
func (c *Conn) greet() {
protocol := "ESMTP"
if c.server.LMTP {
protocol = "LMTP"
}
c.writeResponse(220, NoEnhancedCode, fmt.Sprintf("%v %s Service Ready", c.server.Domain, protocol))
}
func (c *Conn) writeResponse(code int, enhCode EnhancedCode, text ...string) {
// TODO: error handling
if c.server.WriteTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.server.WriteTimeout))
}
// All responses must include an enhanced code, if it is missing - use
// a generic code X.0.0.
if enhCode == EnhancedCodeNotSet {
cat := code / 100
switch cat {
case 2, 4, 5:
enhCode = EnhancedCode{cat, 0, 0}
default:
enhCode = NoEnhancedCode
}
}
for i := 0; i < len(text)-1; i++ {
c.text.PrintfLine("%d-%v", code, text[i])
}
if enhCode == NoEnhancedCode {
c.text.PrintfLine("%d %v", code, text[len(text)-1])
} else {
c.text.PrintfLine("%d %v.%v.%v %v", code, enhCode[0], enhCode[1], enhCode[2], text[len(text)-1])
}
}
// Reads a line of input
func (c *Conn) readLine() (string, error) {
if c.server.ReadTimeout != 0 {
if err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {
return "", err
}
}
return c.text.ReadLine()
}
func (c *Conn) reset() {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
c.bdatStatus = nil
c.bytesReceived = 0
if c.session != nil {
c.session.Reset()
}
c.fromReceived = false
c.recipients = nil
}
| {
args := strings.Fields(arg)
if len(args) == 0 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Missing chunk size argument")
return
}
if len(args) > 2 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Too many arguments")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
last := false
if len(args) == 2 {
if !strings.EqualFold(args[1], "LAST") {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unknown BDAT argument")
return
}
last = true
}
// ParseUint instead of Atoi so we will not accept negative values.
size, err := strconv.ParseUint(args[0], 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Malformed size argument")
return
}
if c.server.MaxMessageBytes != 0 && c.bytesReceived+int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
// Discard chunk itself without passing it to backend.
io.Copy(ioutil.Discard, io.LimitReader(c.text.R, int64(size)))
c.reset()
return
}
if c.bdatStatus == nil && c.server.LMTP {
c.bdatStatus = c.createStatusCollector()
}
if c.bdatPipe == nil {
var r *io.PipeReader
r, c.bdatPipe = io.Pipe()
c.dataResult = make(chan error, 1)
go func() {
defer func() {
if err := recover(); err != nil {
c.handlePanic(err, c.bdatStatus)
c.dataResult <- errPanic
r.CloseWithError(errPanic)
}
}()
var err error
if !c.server.LMTP {
err = c.Session().Data(r)
} else {
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
err = c.Session().Data(r)
for _, rcpt := range c.recipients {
c.bdatStatus.SetStatus(rcpt, err)
}
} else {
err = lmtpSession.LMTPData(r, c.bdatStatus)
}
}
c.dataResult <- err
r.CloseWithError(err)
}()
}
c.lineLimitReader.LineLimit = 0
chunk := io.LimitReader(c.text.R, int64(size))
_, err = io.Copy(c.bdatPipe, chunk)
if err != nil {
// Backend might return an error early using CloseWithError without consuming
// the whole chunk.
io.Copy(ioutil.Discard, chunk)
c.writeResponse(toSMTPStatus(err))
if err == errPanic {
c.Close()
}
c.reset()
c.lineLimitReader.LineLimit = c.server.MaxLineLength
return
}
c.bytesReceived += int64(size)
if last {
c.lineLimitReader.LineLimit = c.server.MaxLineLength
c.bdatPipe.Close()
err := <-c.dataResult
if c.server.LMTP {
c.bdatStatus.fillRemaining(err)
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-c.bdatStatus.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
} else {
c.writeResponse(toSMTPStatus(err))
}
if err == errPanic {
c.Close()
return
}
c.reset()
} else {
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Continue")
}
} | identifier_body |
conn.go | package smtp
import (
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/textproto"
"regexp"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
)
// Number of errors we'll tolerate per connection before closing. Defaults to 3.
const errThreshold = 3
type Conn struct {
conn net.Conn
text *textproto.Conn
server *Server
helo string
// Number of errors witnessed on this connection
errCount int
session Session
locker sync.Mutex
binarymime bool
lineLimitReader *lineLimitReader
bdatPipe *io.PipeWriter
bdatStatus *statusCollector // used for BDAT on LMTP
dataResult chan error
bytesReceived int64 // counts total size of chunks when BDAT is used
fromReceived bool
recipients []string
didAuth bool
}
func newConn(c net.Conn, s *Server) *Conn {
sc := &Conn{
server: s,
conn: c,
}
sc.init()
return sc
}
func (c *Conn) init() {
c.lineLimitReader = &lineLimitReader{
R: c.conn,
LineLimit: c.server.MaxLineLength,
}
rwc := struct {
io.Reader
io.Writer
io.Closer
}{
Reader: c.lineLimitReader,
Writer: c.conn,
Closer: c.conn,
}
if c.server.Debug != nil {
rwc = struct {
io.Reader
io.Writer
io.Closer
}{
io.TeeReader(rwc.Reader, c.server.Debug),
io.MultiWriter(rwc.Writer, c.server.Debug),
rwc.Closer,
}
}
c.text = textproto.NewConn(rwc)
}
// Commands are dispatched to the appropriate handler functions.
func (c *Conn) handle(cmd string, arg string) {
// If panic happens during command handling - send 421 response
// and close connection.
defer func() {
if err := recover(); err != nil {
c.writeResponse(421, EnhancedCode{4, 0, 0}, "Internal server error")
c.Close()
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
}()
if cmd == "" {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Error: bad syntax")
return
}
cmd = strings.ToUpper(cmd)
switch cmd {
case "SEND", "SOML", "SAML", "EXPN", "HELP", "TURN":
// These commands are not implemented in any state
c.writeResponse(502, EnhancedCode{5, 5, 1}, fmt.Sprintf("%v command not implemented", cmd))
case "HELO", "EHLO", "LHLO":
lmtp := cmd == "LHLO"
enhanced := lmtp || cmd == "EHLO"
if c.server.LMTP && !lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is a LMTP server, use LHLO")
return
}
if !c.server.LMTP && lmtp {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "This is not a LMTP server")
return
}
c.handleGreet(enhanced, arg)
case "MAIL":
c.handleMail(arg)
case "RCPT":
c.handleRcpt(arg)
case "VRFY":
c.writeResponse(252, EnhancedCode{2, 5, 0}, "Cannot VRFY user, but will accept message")
case "NOOP":
c.writeResponse(250, EnhancedCode{2, 0, 0}, "I have sucessfully done nothing")
case "RSET": // Reset session
c.reset()
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Session reset")
case "BDAT":
c.handleBdat(arg)
case "DATA":
c.handleData(arg)
case "QUIT":
c.writeResponse(221, EnhancedCode{2, 0, 0}, "Bye")
c.Close()
case "AUTH":
if c.server.AuthDisabled {
c.protocolError(500, EnhancedCode{5, 5, 2}, "Syntax error, AUTH command unrecognized")
} else {
c.handleAuth(arg)
}
case "STARTTLS":
c.handleStartTLS()
default:
msg := fmt.Sprintf("Syntax errors, %v command unrecognized", cmd)
c.protocolError(500, EnhancedCode{5, 5, 2}, msg)
}
}
func (c *Conn) Server() *Server {
return c.server
}
func (c *Conn) Session() Session {
c.locker.Lock()
defer c.locker.Unlock()
return c.session
}
func (c *Conn) setSession(session Session) {
c.locker.Lock()
defer c.locker.Unlock()
c.session = session
}
func (c *Conn) Close() error {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
if c.session != nil {
c.session.Logout()
c.session = nil
}
return c.conn.Close()
}
// TLSConnectionState returns the connection's TLS connection state.
// Zero values are returned if the connection doesn't use TLS.
func (c *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) {
tc, ok := c.conn.(*tls.Conn)
if !ok {
return
}
return tc.ConnectionState(), true
}
func (c *Conn) Hostname() string {
return c.helo
}
func (c *Conn) Conn() net.Conn {
return c.conn
}
func (c *Conn) authAllowed() bool {
_, isTLS := c.TLSConnectionState()
return !c.server.AuthDisabled && (isTLS || c.server.AllowInsecureAuth)
}
// protocolError writes errors responses and closes the connection once too many
// have occurred.
func (c *Conn) protocolError(code int, ec EnhancedCode, msg string) {
c.writeResponse(code, ec, msg)
c.errCount++
if c.errCount > errThreshold {
c.writeResponse(500, EnhancedCode{5, 5, 1}, "Too many errors. Quiting now")
c.Close()
}
}
// GREET state -> waiting for HELO
func (c *Conn) handleGreet(enhanced bool, arg string) {
domain, err := parseHelloArgument(arg)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Domain/address argument required for HELO")
return
}
c.helo = domain
sess, err := c.server.Backend.NewSession(c)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.setSession(sess)
if !enhanced {
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Hello %s", domain))
return
}
caps := []string{}
caps = append(caps, c.server.caps...)
if _, isTLS := c.TLSConnectionState(); c.server.TLSConfig != nil && !isTLS {
caps = append(caps, "STARTTLS")
}
if c.authAllowed() |
if c.server.EnableSMTPUTF8 {
caps = append(caps, "SMTPUTF8")
}
if _, isTLS := c.TLSConnectionState(); isTLS && c.server.EnableREQUIRETLS {
caps = append(caps, "REQUIRETLS")
}
if c.server.EnableBINARYMIME {
caps = append(caps, "BINARYMIME")
}
if c.server.MaxMessageBytes > 0 {
caps = append(caps, fmt.Sprintf("SIZE %v", c.server.MaxMessageBytes))
} else {
caps = append(caps, "SIZE")
}
args := []string{"Hello " + domain}
args = append(args, caps...)
c.writeResponse(250, NoEnhancedCode, args...)
}
// READY state -> waiting for MAIL
func (c *Conn) handleMail(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "MAIL not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "FROM:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
from, err := p.parseReversePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting MAIL arg syntax of FROM:<address>")
return
}
args, err := parseArgs(p.s)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse MAIL ESMTP parameters")
return
}
opts := &MailOptions{}
c.binarymime = false
// This is where the Conn may put BODY=8BITMIME, but we already
// read the DATA as bytes, so it does not effect our processing.
for key, value := range args {
switch key {
case "SIZE":
size, err := strconv.ParseUint(value, 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unable to parse SIZE as an integer")
return
}
if c.server.MaxMessageBytes > 0 && int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
return
}
opts.Size = int64(size)
case "SMTPUTF8":
if !c.server.EnableSMTPUTF8 {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "SMTPUTF8 is not implemented")
return
}
opts.UTF8 = true
case "REQUIRETLS":
if !c.server.EnableREQUIRETLS {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "REQUIRETLS is not implemented")
return
}
opts.RequireTLS = true
case "BODY":
switch value {
case "BINARYMIME":
if !c.server.EnableBINARYMIME {
c.writeResponse(504, EnhancedCode{5, 5, 4}, "BINARYMIME is not implemented")
return
}
c.binarymime = true
case "7BIT", "8BITMIME":
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown BODY value")
return
}
opts.Body = BodyType(value)
case "AUTH":
value, err := decodeXtext(value)
if err != nil || value == "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter value")
return
}
if value == "<>" {
value = ""
} else {
p := parser{s: value}
value, err = p.parseMailbox()
if err != nil || p.s != "" {
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Malformed AUTH parameter mailbox")
return
}
}
opts.Auth = &value
default:
c.writeResponse(500, EnhancedCode{5, 5, 4}, "Unknown MAIL FROM argument")
return
}
}
if err := c.Session().Mail(from, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("Roger, accepting mail from <%v>", from))
c.fromReceived = true
}
// This regexp matches 'hexchar' token defined in
// https://tools.ietf.org/html/rfc4954#section-8 however it is intentionally
// relaxed by requiring only '+' to be present. It allows us to detect
// malformed values such as +A or +HH and report them appropriately.
var hexcharRe = regexp.MustCompile(`\+[0-9A-F]?[0-9A-F]?`)
func decodeXtext(val string) (string, error) {
if !strings.Contains(val, "+") {
return val, nil
}
var replaceErr error
decoded := hexcharRe.ReplaceAllStringFunc(val, func(match string) string {
if len(match) != 3 {
replaceErr = errors.New("incomplete hexchar")
return ""
}
char, err := strconv.ParseInt(match, 16, 8)
if err != nil {
replaceErr = err
return ""
}
return string(rune(char))
})
if replaceErr != nil {
return "", replaceErr
}
return decoded, nil
}
func encodeXtext(raw string) string {
var out strings.Builder
out.Grow(len(raw))
for _, ch := range raw {
if ch == '+' || ch == '=' {
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
if ch > '!' && ch < '~' { // printable non-space US-ASCII
out.WriteRune(ch)
}
// Non-ASCII.
out.WriteRune('+')
out.WriteString(strings.ToUpper(strconv.FormatInt(int64(ch), 16)))
}
return out.String()
}
// MAIL state -> waiting for RCPTs followed by DATA
func (c *Conn) handleRcpt(arg string) {
if !c.fromReceived {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing MAIL FROM command.")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "RCPT not allowed during message transfer")
return
}
arg, ok := cutPrefixFold(arg, "TO:")
if !ok {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
p := parser{s: strings.TrimSpace(arg)}
recipient, err := p.parsePath()
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "Was expecting RCPT arg syntax of TO:<address>")
return
}
if len(strings.Fields(p.s)) > 0 {
c.writeResponse(501, EnhancedCode{5, 5, 2}, "RCPT parameters are not supported")
return
}
if c.server.MaxRecipients > 0 && len(c.recipients) >= c.server.MaxRecipients {
c.writeResponse(452, EnhancedCode{4, 5, 3}, fmt.Sprintf("Maximum limit of %v recipients reached", c.server.MaxRecipients))
return
}
opts := &RcptOptions{}
if err := c.Session().Rcpt(recipient, opts); err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(451, EnhancedCode{4, 0, 0}, err.Error())
return
}
c.recipients = append(c.recipients, recipient)
c.writeResponse(250, EnhancedCode{2, 0, 0}, fmt.Sprintf("I'll make sure <%v> gets this", recipient))
}
func (c *Conn) handleAuth(arg string) {
if c.helo == "" {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Please introduce yourself first.")
return
}
if c.didAuth {
c.writeResponse(503, EnhancedCode{5, 5, 1}, "Already authenticated")
return
}
parts := strings.Fields(arg)
if len(parts) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 4}, "Missing parameter")
return
}
if _, isTLS := c.TLSConnectionState(); !isTLS && !c.server.AllowInsecureAuth {
c.writeResponse(523, EnhancedCode{5, 7, 10}, "TLS is required")
return
}
mechanism := strings.ToUpper(parts[0])
// Parse client initial response if there is one
var ir []byte
if len(parts) > 1 {
var err error
ir, err = base64.StdEncoding.DecodeString(parts[1])
if err != nil {
return
}
}
newSasl, ok := c.server.auths[mechanism]
if !ok {
c.writeResponse(504, EnhancedCode{5, 7, 4}, "Unsupported authentication mechanism")
return
}
sasl := newSasl(c)
response := ir
for {
challenge, done, err := sasl.Next(response)
if err != nil {
if smtpErr, ok := err.(*SMTPError); ok {
c.writeResponse(smtpErr.Code, smtpErr.EnhancedCode, smtpErr.Message)
return
}
c.writeResponse(454, EnhancedCode{4, 7, 0}, err.Error())
return
}
if done {
break
}
encoded := ""
if len(challenge) > 0 {
encoded = base64.StdEncoding.EncodeToString(challenge)
}
c.writeResponse(334, NoEnhancedCode, encoded)
encoded, err = c.readLine()
if err != nil {
return // TODO: error handling
}
if encoded == "*" {
// https://tools.ietf.org/html/rfc4954#page-4
c.writeResponse(501, EnhancedCode{5, 0, 0}, "Negotiation cancelled")
return
}
response, err = base64.StdEncoding.DecodeString(encoded)
if err != nil {
c.writeResponse(454, EnhancedCode{4, 7, 0}, "Invalid base64 data")
return
}
}
c.writeResponse(235, EnhancedCode{2, 0, 0}, "Authentication succeeded")
c.didAuth = true
}
func (c *Conn) handleStartTLS() {
if _, isTLS := c.TLSConnectionState(); isTLS {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Already running in TLS")
return
}
if c.server.TLSConfig == nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "TLS not supported")
return
}
c.writeResponse(220, EnhancedCode{2, 0, 0}, "Ready to start TLS")
// Upgrade to TLS
tlsConn := tls.Server(c.conn, c.server.TLSConfig)
if err := tlsConn.Handshake(); err != nil {
c.writeResponse(550, EnhancedCode{5, 0, 0}, "Handshake error")
return
}
c.conn = tlsConn
c.init()
// Reset all state and close the previous Session.
// This is different from just calling reset() since we want the Backend to
// be able to see the information about TLS connection in the
// ConnectionState object passed to it.
if session := c.Session(); session != nil {
session.Logout()
c.setSession(nil)
}
c.helo = ""
c.didAuth = false
c.reset()
}
// DATA
func (c *Conn) handleData(arg string) {
if arg != "" {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "DATA command should not have any arguments")
return
}
if c.bdatPipe != nil {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed during message transfer")
return
}
if c.binarymime {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "DATA not allowed for BINARYMIME messages")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
// We have recipients, go to accept data
c.writeResponse(354, NoEnhancedCode, "Go ahead. End your data with <CR><LF>.<CR><LF>")
defer c.reset()
if c.server.LMTP {
c.handleDataLMTP()
return
}
r := newDataReader(c)
code, enhancedCode, msg := toSMTPStatus(c.Session().Data(r))
r.limited = false
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
c.writeResponse(code, enhancedCode, msg)
}
func (c *Conn) handleBdat(arg string) {
args := strings.Fields(arg)
if len(args) == 0 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Missing chunk size argument")
return
}
if len(args) > 2 {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Too many arguments")
return
}
if !c.fromReceived || len(c.recipients) == 0 {
c.writeResponse(502, EnhancedCode{5, 5, 1}, "Missing RCPT TO command.")
return
}
last := false
if len(args) == 2 {
if !strings.EqualFold(args[1], "LAST") {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Unknown BDAT argument")
return
}
last = true
}
// ParseUint instead of Atoi so we will not accept negative values.
size, err := strconv.ParseUint(args[0], 10, 32)
if err != nil {
c.writeResponse(501, EnhancedCode{5, 5, 4}, "Malformed size argument")
return
}
if c.server.MaxMessageBytes != 0 && c.bytesReceived+int64(size) > c.server.MaxMessageBytes {
c.writeResponse(552, EnhancedCode{5, 3, 4}, "Max message size exceeded")
// Discard chunk itself without passing it to backend.
io.Copy(ioutil.Discard, io.LimitReader(c.text.R, int64(size)))
c.reset()
return
}
if c.bdatStatus == nil && c.server.LMTP {
c.bdatStatus = c.createStatusCollector()
}
if c.bdatPipe == nil {
var r *io.PipeReader
r, c.bdatPipe = io.Pipe()
c.dataResult = make(chan error, 1)
go func() {
defer func() {
if err := recover(); err != nil {
c.handlePanic(err, c.bdatStatus)
c.dataResult <- errPanic
r.CloseWithError(errPanic)
}
}()
var err error
if !c.server.LMTP {
err = c.Session().Data(r)
} else {
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
err = c.Session().Data(r)
for _, rcpt := range c.recipients {
c.bdatStatus.SetStatus(rcpt, err)
}
} else {
err = lmtpSession.LMTPData(r, c.bdatStatus)
}
}
c.dataResult <- err
r.CloseWithError(err)
}()
}
c.lineLimitReader.LineLimit = 0
chunk := io.LimitReader(c.text.R, int64(size))
_, err = io.Copy(c.bdatPipe, chunk)
if err != nil {
// Backend might return an error early using CloseWithError without consuming
// the whole chunk.
io.Copy(ioutil.Discard, chunk)
c.writeResponse(toSMTPStatus(err))
if err == errPanic {
c.Close()
}
c.reset()
c.lineLimitReader.LineLimit = c.server.MaxLineLength
return
}
c.bytesReceived += int64(size)
if last {
c.lineLimitReader.LineLimit = c.server.MaxLineLength
c.bdatPipe.Close()
err := <-c.dataResult
if c.server.LMTP {
c.bdatStatus.fillRemaining(err)
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-c.bdatStatus.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
} else {
c.writeResponse(toSMTPStatus(err))
}
if err == errPanic {
c.Close()
return
}
c.reset()
} else {
c.writeResponse(250, EnhancedCode{2, 0, 0}, "Continue")
}
}
// ErrDataReset is returned by Reader pased to Data function if client does not
// send another BDAT command and instead closes connection or issues RSET command.
var ErrDataReset = errors.New("smtp: message transmission aborted")
var errPanic = &SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
}
func (c *Conn) handlePanic(err interface{}, status *statusCollector) {
if status != nil {
status.fillRemaining(errPanic)
}
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
}
func (c *Conn) createStatusCollector() *statusCollector {
rcptCounts := make(map[string]int, len(c.recipients))
status := &statusCollector{
statusMap: make(map[string]chan error, len(c.recipients)),
status: make([]chan error, 0, len(c.recipients)),
}
for _, rcpt := range c.recipients {
rcptCounts[rcpt]++
}
// Create channels with buffer sizes necessary to fit all
// statuses for a single recipient to avoid deadlocks.
for rcpt, count := range rcptCounts {
status.statusMap[rcpt] = make(chan error, count)
}
for _, rcpt := range c.recipients {
status.status = append(status.status, status.statusMap[rcpt])
}
return status
}
type statusCollector struct {
// Contains map from recipient to list of channels that are used for that
// recipient.
statusMap map[string]chan error
// Contains channels from statusMap, in the same
// order as Conn.recipients.
status []chan error
}
// fillRemaining sets status for all recipients SetStatus was not called for before.
func (s *statusCollector) fillRemaining(err error) {
// Amount of times certain recipient was specified is indicated by the channel
// buffer size, so once we fill it, we can be confident that we sent
// at least as much statuses as needed. Extra statuses will be ignored anyway.
chLoop:
for _, ch := range s.statusMap {
for {
select {
case ch <- err:
default:
continue chLoop
}
}
}
}
func (s *statusCollector) SetStatus(rcptTo string, err error) {
ch := s.statusMap[rcptTo]
if ch == nil {
panic("SetStatus is called for recipient that was not specified before")
}
select {
case ch <- err:
default:
// There enough buffer space to fit all statuses at once, if this is
// not the case - backend is doing something wrong.
panic("SetStatus is called more times than particular recipient was specified")
}
}
func (c *Conn) handleDataLMTP() {
r := newDataReader(c)
status := c.createStatusCollector()
done := make(chan bool, 1)
lmtpSession, ok := c.Session().(LMTPSession)
if !ok {
// Fallback to using a single status for all recipients.
err := c.Session().Data(r)
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
for _, rcpt := range c.recipients {
status.SetStatus(rcpt, err)
}
done <- true
} else {
go func() {
defer func() {
if err := recover(); err != nil {
status.fillRemaining(&SMTPError{
Code: 421,
EnhancedCode: EnhancedCode{4, 0, 0},
Message: "Internal server error",
})
stack := debug.Stack()
c.server.ErrorLog.Printf("panic serving %v: %v\n%s", c.conn.RemoteAddr(), err, stack)
done <- false
}
}()
status.fillRemaining(lmtpSession.LMTPData(r, status))
io.Copy(ioutil.Discard, r) // Make sure all the data has been consumed
done <- true
}()
}
for i, rcpt := range c.recipients {
code, enchCode, msg := toSMTPStatus(<-status.status[i])
c.writeResponse(code, enchCode, "<"+rcpt+"> "+msg)
}
// If done gets false, the panic occured in LMTPData and the connection
// should be closed.
if !<-done {
c.Close()
}
}
func toSMTPStatus(err error) (code int, enchCode EnhancedCode, msg string) {
if err != nil {
if smtperr, ok := err.(*SMTPError); ok {
return smtperr.Code, smtperr.EnhancedCode, smtperr.Message
} else {
return 554, EnhancedCode{5, 0, 0}, "Error: transaction failed, blame it on the weather: " + err.Error()
}
}
return 250, EnhancedCode{2, 0, 0}, "OK: queued"
}
func (c *Conn) Reject() {
c.writeResponse(421, EnhancedCode{4, 4, 5}, "Too busy. Try again later.")
c.Close()
}
func (c *Conn) greet() {
protocol := "ESMTP"
if c.server.LMTP {
protocol = "LMTP"
}
c.writeResponse(220, NoEnhancedCode, fmt.Sprintf("%v %s Service Ready", c.server.Domain, protocol))
}
func (c *Conn) writeResponse(code int, enhCode EnhancedCode, text ...string) {
// TODO: error handling
if c.server.WriteTimeout != 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.server.WriteTimeout))
}
// All responses must include an enhanced code, if it is missing - use
// a generic code X.0.0.
if enhCode == EnhancedCodeNotSet {
cat := code / 100
switch cat {
case 2, 4, 5:
enhCode = EnhancedCode{cat, 0, 0}
default:
enhCode = NoEnhancedCode
}
}
for i := 0; i < len(text)-1; i++ {
c.text.PrintfLine("%d-%v", code, text[i])
}
if enhCode == NoEnhancedCode {
c.text.PrintfLine("%d %v", code, text[len(text)-1])
} else {
c.text.PrintfLine("%d %v.%v.%v %v", code, enhCode[0], enhCode[1], enhCode[2], text[len(text)-1])
}
}
// Reads a line of input
func (c *Conn) readLine() (string, error) {
if c.server.ReadTimeout != 0 {
if err := c.conn.SetReadDeadline(time.Now().Add(c.server.ReadTimeout)); err != nil {
return "", err
}
}
return c.text.ReadLine()
}
func (c *Conn) reset() {
c.locker.Lock()
defer c.locker.Unlock()
if c.bdatPipe != nil {
c.bdatPipe.CloseWithError(ErrDataReset)
c.bdatPipe = nil
}
c.bdatStatus = nil
c.bytesReceived = 0
if c.session != nil {
c.session.Reset()
}
c.fromReceived = false
c.recipients = nil
}
| {
authCap := "AUTH"
for name := range c.server.auths {
authCap += " " + name
}
caps = append(caps, authCap)
} | conditional_block |
lib.rs | //! A high-level API for programmatically interacting with web pages
//! through WebDriver.
//!
//! [WebDriver protocol]: https://www.w3.org/TR/webdriver/
//! [CSS selectors]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
//! [powerful]: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes
//! [operators]: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors
//! [WebDriver compatible]: https://github.com/Fyrd/caniuse/issues/2757#issuecomment-304529217
//! [`geckodriver`]: https://github.com/mozilla/geckodriver
#[macro_use]
extern crate error_chain;
pub mod error;
mod protocol;
use crate::error::*;
pub use hyper::Method;
use protocol::Client;
use serde_json::Value;
use std::time::Duration;
use tokio::time::sleep;
use webdriver::{
command::{SwitchToFrameParameters, SwitchToWindowParameters, WebDriverCommand},
common::{FrameId, WebElement, ELEMENT_KEY},
error::{ErrorStatus, WebDriverError},
};
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
pub enum Locator {
Css(String),
LinkText(String),
XPath(String),
}
impl Into<webdriver::command::LocatorParameters> for Locator {
fn into(self) -> webdriver::command::LocatorParameters {
match self {
Locator::Css(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::CSSSelector,
value: s,
},
Locator::XPath(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::XPath,
value: s,
},
Locator::LinkText(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::LinkText,
value: s,
},
}
}
}
pub struct Driver(Client);
macro_rules! generate_wait_for_find {
($name:ident, $search_fn:ident, $return_typ:ty) => {
/// Wait for the specified element(s) to appear on the page
pub async fn $name(
&self,
search: Locator,
root: Option<WebElement>
) -> Result<$return_typ> {
loop {
match self.$search_fn(search.clone(), root.clone()).await {
Ok(e) => break Ok(e),
Err(Error(ErrorKind::WebDriver(
WebDriverError {error: ErrorStatus::NoSuchElement, ..}
), _)) => sleep(Duration::from_millis(100)).await,
Err(e) => break Err(e)
}
}
}
}
}
impl Driver {
/// Create a new webdriver session on the specified server
pub async fn new(webdriver_url: &str, user_agent: Option<String>) -> Result<Self> {
Ok(Driver(Client::new(webdriver_url, user_agent).await?))
}
/// Navigate directly to the given URL.
pub async fn goto<'a>(&'a self, url: &'a str) -> Result<()> {
let cmd = WebDriverCommand::Get(webdriver::command::GetParameters {
url: self.current_url().await?.join(url)?.into(),
});
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Retrieve the currently active URL for this session.
pub async fn current_url(&self) -> Result<url::Url> {
match self.0.issue_cmd(&WebDriverCommand::GetCurrentUrl).await?.as_str() {
Some(url) => Ok(url.parse()?),
None => bail!(ErrorKind::NotW3C(Value::Null)),
}
}
/// Get the HTML source for the current page.
pub async fn source(&self) -> Result<String> {
match self.0.issue_cmd(&WebDriverCommand::GetPageSource).await?.as_str() {
Some(src) => Ok(src.to_string()),
None => bail!(ErrorKind::NotW3C(Value::Null)),
}
}
/// Go back to the previous page.
pub async fn back(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::GoBack).await?;
Ok(())
}
/// Refresh the current previous page.
pub async fn refresh(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::Refresh).await?;
Ok(())
}
/// Switch the focus to the frame contained in Element
pub async fn switch_to_frame(&self, frame: WebElement) -> Result<()> {
let p = SwitchToFrameParameters {
id: Some(FrameId::Element(frame)),
};
let cmd = WebDriverCommand::SwitchToFrame(p);
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Switch the focus to this frame's parent frame
pub async fn switch_to_parent_frame(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::SwitchToParentFrame).await?;
Ok(())
}
/// Switch the focus to the window identified by handle
pub async fn switch_to_window(&self, window: String) -> Result<()> {
let p = SwitchToWindowParameters { handle: window };
let cmd = WebDriverCommand::SwitchToWindow(p);
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Execute the given JavaScript `script` in the current browser session.
///
/// `args` is available to the script inside the `arguments`
/// array. Since `Element` implements `ToJson`, you can also
/// provide serialized `Element`s as arguments, and they will
/// correctly serialize to DOM elements on the other side.
pub async fn execute(&self, script: String, mut args: Vec<Value>) -> Result<Value> {
self.fixup_elements(&mut args);
let cmd = webdriver::command::JavascriptCommandParameters {
script: script,
args: Some(args),
};
let cmd = WebDriverCommand::ExecuteScript(cmd);
self.0.issue_cmd(&cmd).await
}
/// Wait for the page to navigate to a new URL before proceeding.
///
/// If the `current` URL is not provided, `self.current_url()`
/// will be used. Note however that this introduces a race
/// condition: the browser could finish navigating *before* we
/// call `current_url()`, which would lead to an eternal wait.
pub async fn wait_for_navigation(&self, current: Option<url::Url>) -> Result<()> {
let current = match current {
Some(current) => current,
None => self.current_url().await?,
};
loop {
if self.current_url().await? != current {
break Ok(());
}
sleep(Duration::from_millis(100)).await
}
}
/// Starting from the document root, find the first element on the page that
/// matches the specified selector.
pub async fn | (
&self,
locator: Locator,
root: Option<WebElement>,
) -> Result<WebElement> {
let cmd = match root {
Option::None => WebDriverCommand::FindElement(locator.into()),
Option::Some(elt) => {
WebDriverCommand::FindElementElement(elt, locator.into())
}
};
let res = self.0.issue_cmd(&cmd).await?;
Ok(self.parse_lookup(res)?)
}
pub async fn find_all(
&self,
locator: Locator,
root: Option<WebElement>,
) -> Result<Vec<WebElement>> {
let cmd = match root {
Option::None => WebDriverCommand::FindElements(locator.into()),
Option::Some(elt) => {
WebDriverCommand::FindElementElements(elt, locator.into())
}
};
match self.0.issue_cmd(&cmd).await? {
Value::Array(a) => Ok(a
.into_iter()
.map(|e| self.parse_lookup(e))
.collect::<Result<Vec<WebElement>>>()?),
r => bail!(ErrorKind::NotW3C(r)),
}
}
generate_wait_for_find!(wait_for_find, find, WebElement);
generate_wait_for_find!(wait_for_find_all, find_all, Vec<WebElement>);
/// Extract the `WebElement` from a `FindElement` or `FindElementElement` command.
fn parse_lookup(&self, mut res: Value) -> Result<WebElement> {
let key = if self.0.legacy {
"ELEMENT"
} else {
ELEMENT_KEY
};
let o = {
if let Some(o) = res.as_object_mut() {
o
} else {
bail!(ErrorKind::NotW3C(res))
}
};
match o.remove(key) {
None => bail!(ErrorKind::NotW3C(res)),
Some(Value::String(wei)) => Ok(webdriver::common::WebElement(wei)),
Some(v) => {
o.insert(key.to_string(), v);
bail!(ErrorKind::NotW3C(res))
}
}
}
fn fixup_elements(&self, args: &mut [Value]) {
if self.0.legacy {
for arg in args {
// the serialization of WebElement uses the W3C index,
// but legacy implementations need us to use the "ELEMENT" index
if let Value::Object(ref mut o) = *arg {
if let Some(wei) = o.remove(ELEMENT_KEY) {
o.insert("ELEMENT".to_string(), wei);
}
}
}
}
}
/// Look up an attribute value for this element by name.
pub async fn attr(
&self,
eid: WebElement,
attribute: String,
) -> Result<Option<String>> {
let cmd = WebDriverCommand::GetElementAttribute(eid, attribute);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(Some(v)),
Value::Null => Ok(None),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Look up a DOM property for this element by name.
pub async fn prop(&self, eid: WebElement, prop: String) -> Result<Option<String>> {
let cmd = WebDriverCommand::GetElementProperty(eid, prop);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(Some(v)),
Value::Null => Ok(None),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Retrieve the text contents of this elment.
pub async fn text(&self, eid: WebElement) -> Result<String> {
let cmd = WebDriverCommand::GetElementText(eid);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(v),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Retrieve the HTML contents of this element. if inner is true,
/// also return the wrapping nodes html. Note: this is the same as
/// calling `prop("innerHTML")` or `prop("outerHTML")`.
pub async fn html(&self, eid: WebElement, inner: bool) -> Result<String> {
let prop = if inner { "innerHTML" } else { "outerHTML" };
self.prop(eid, prop.to_owned()).await?
.ok_or_else(|| Error::from(ErrorKind::NotW3C(Value::Null)))
}
/// Click on this element
pub async fn click(&self, eid: WebElement) -> Result<()> {
let cmd = WebDriverCommand::ElementClick(eid);
let r = self.0.issue_cmd(&cmd).await?;
if r.is_null() || r.as_object().map(|o| o.is_empty()).unwrap_or(false) {
// geckodriver returns {} :(
Ok(())
} else {
bail!(ErrorKind::NotW3C(r))
}
}
/// Scroll this element into view
pub async fn scroll_into_view(&self, eid: WebElement) -> Result<()> {
let args = vec![serde_json::to_value(eid)?];
let js = "arguments[0].scrollIntoView(true)".to_string();
self.clone().execute(js, args).await?;
Ok(())
}
/// Follow the `href` target of the element matching the given CSS
/// selector *without* causing a click interaction.
pub async fn follow(&self, eid: WebElement) -> Result<()> {
match self.clone().attr(eid.clone(), String::from("href")).await? {
None => bail!("no href attribute"),
Some(href) => {
let current = self.current_url().await?.join(&href)?;
self.goto(current.as_str()).await
}
}
}
/// Set the `value` of the input element named `name` which is a child of `eid`
pub async fn set_by_name(
&self,
eid: WebElement,
name: String,
value: String,
) -> Result<()> {
let locator = Locator::Css(format!("input[name='{}']", name));
let elt = self.clone().find(locator.into(), Some(eid)).await?;
let args = {
let mut a = vec![serde_json::to_value(elt)?, Value::String(value)];
self.fixup_elements(&mut a);
a
};
let js = "arguments[0].value = arguments[1]".to_string();
let res = self.clone().execute(js, args).await?;
if res.is_null() {
Ok(())
} else {
bail!(ErrorKind::NotW3C(res))
}
}
/// Submit the form specified by `eid` with the first submit button
pub async fn submit(&self, eid: WebElement) -> Result<()> {
let l = Locator::Css("input[type=submit],button[type=submit]".into());
self.submit_with(eid, l).await
}
/// Submit the form `eid` using the button matched by the given selector.
pub async fn submit_with(&self, eid: WebElement, button: Locator) -> Result<()> {
let elt = self.clone().find(button.into(), Some(eid)).await?;
Ok(self.clone().click(elt).await?)
}
/// Submit this form using the form submit button with the given
/// label (case-insensitive).
pub async fn submit_using(&self, eid: WebElement, button_label: String) -> Result<()> {
let escaped = button_label.replace('\\', "\\\\").replace('"', "\\\"");
let btn = format!(
"input[type=submit][value=\"{}\" i],\
button[type=submit][value=\"{}\" i]",
escaped, escaped
);
Ok(self.submit_with(eid, Locator::Css(btn)).await?)
}
/// Submit this form directly, without clicking any buttons.
///
/// This can be useful to bypass forms that perform various magic
/// when the submit button is clicked, or that hijack click events
/// altogether.
///
/// Note that since no button is actually clicked, the
/// `name=value` pair for the submit button will not be
/// submitted. This can be circumvented by using `submit_sneaky`
/// instead.
pub async fn submit_direct(&self, eid: WebElement) -> Result<()> {
// some sites are silly, and name their submit button
// "submit". this ends up overwriting the "submit" function of
// the form with a reference to the submit button itself, so
// we can't call .submit(). we get around this by creating a
// *new* form, and using *its* submit() handler but with this
// pointed to the real form. solution from here:
// https://stackoverflow.com/q/833032/472927#comment23038712_834197
let js = "document.createElement('form').submit.call(arguments[0])".to_string();
let args = {
let mut a = vec![serde_json::to_value(eid)?];
self.fixup_elements(&mut a);
a
};
self.clone().execute(js, args).await?;
Ok(())
}
/// Submit this form directly, without clicking any buttons, and
/// with an extra field.
///
/// Like `submit_direct`, this method will submit this form
/// without clicking a submit button. However, it will *also*
/// inject a hidden input element on the page that carries the
/// given `field=value` mapping. This allows you to emulate the
/// form data as it would have been *if* the submit button was
/// indeed clicked.
pub async fn submit_sneaky(
&self,
eid: WebElement,
field: String,
value: String,
) -> Result<()> {
let js = r#"
var h = document.createElement('input');
h.setAttribute('type', 'hidden');
h.setAttribute('name', arguments[1]);
h.value = arguments[2];
arguments[0].appendChild(h);
"#
.to_string();
let args = {
let mut a = vec![
serde_json::to_value(eid)?,
Value::String(field),
Value::String(value),
];
self.fixup_elements(&mut a);
a
};
self.execute(js, args).await?;
Ok(())
}
}
| find | identifier_name |
lib.rs | //! A high-level API for programmatically interacting with web pages
//! through WebDriver.
//!
//! [WebDriver protocol]: https://www.w3.org/TR/webdriver/
//! [CSS selectors]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
//! [powerful]: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes
//! [operators]: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors
//! [WebDriver compatible]: https://github.com/Fyrd/caniuse/issues/2757#issuecomment-304529217
//! [`geckodriver`]: https://github.com/mozilla/geckodriver
#[macro_use]
extern crate error_chain;
pub mod error;
mod protocol;
use crate::error::*;
pub use hyper::Method;
use protocol::Client;
use serde_json::Value;
use std::time::Duration;
use tokio::time::sleep;
use webdriver::{
command::{SwitchToFrameParameters, SwitchToWindowParameters, WebDriverCommand},
common::{FrameId, WebElement, ELEMENT_KEY},
error::{ErrorStatus, WebDriverError},
};
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
pub enum Locator {
Css(String),
LinkText(String),
XPath(String),
}
impl Into<webdriver::command::LocatorParameters> for Locator {
fn into(self) -> webdriver::command::LocatorParameters {
match self {
Locator::Css(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::CSSSelector,
value: s,
},
Locator::XPath(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::XPath,
value: s,
},
Locator::LinkText(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::LinkText,
value: s,
},
}
}
}
pub struct Driver(Client);
macro_rules! generate_wait_for_find { | /// Wait for the specified element(s) to appear on the page
pub async fn $name(
&self,
search: Locator,
root: Option<WebElement>
) -> Result<$return_typ> {
loop {
match self.$search_fn(search.clone(), root.clone()).await {
Ok(e) => break Ok(e),
Err(Error(ErrorKind::WebDriver(
WebDriverError {error: ErrorStatus::NoSuchElement, ..}
), _)) => sleep(Duration::from_millis(100)).await,
Err(e) => break Err(e)
}
}
}
}
}
impl Driver {
/// Create a new webdriver session on the specified server
pub async fn new(webdriver_url: &str, user_agent: Option<String>) -> Result<Self> {
Ok(Driver(Client::new(webdriver_url, user_agent).await?))
}
/// Navigate directly to the given URL.
pub async fn goto<'a>(&'a self, url: &'a str) -> Result<()> {
let cmd = WebDriverCommand::Get(webdriver::command::GetParameters {
url: self.current_url().await?.join(url)?.into(),
});
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Retrieve the currently active URL for this session.
pub async fn current_url(&self) -> Result<url::Url> {
match self.0.issue_cmd(&WebDriverCommand::GetCurrentUrl).await?.as_str() {
Some(url) => Ok(url.parse()?),
None => bail!(ErrorKind::NotW3C(Value::Null)),
}
}
/// Get the HTML source for the current page.
pub async fn source(&self) -> Result<String> {
match self.0.issue_cmd(&WebDriverCommand::GetPageSource).await?.as_str() {
Some(src) => Ok(src.to_string()),
None => bail!(ErrorKind::NotW3C(Value::Null)),
}
}
/// Go back to the previous page.
pub async fn back(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::GoBack).await?;
Ok(())
}
/// Refresh the current previous page.
pub async fn refresh(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::Refresh).await?;
Ok(())
}
/// Switch the focus to the frame contained in Element
pub async fn switch_to_frame(&self, frame: WebElement) -> Result<()> {
let p = SwitchToFrameParameters {
id: Some(FrameId::Element(frame)),
};
let cmd = WebDriverCommand::SwitchToFrame(p);
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Switch the focus to this frame's parent frame
pub async fn switch_to_parent_frame(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::SwitchToParentFrame).await?;
Ok(())
}
/// Switch the focus to the window identified by handle
pub async fn switch_to_window(&self, window: String) -> Result<()> {
let p = SwitchToWindowParameters { handle: window };
let cmd = WebDriverCommand::SwitchToWindow(p);
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Execute the given JavaScript `script` in the current browser session.
///
/// `args` is available to the script inside the `arguments`
/// array. Since `Element` implements `ToJson`, you can also
/// provide serialized `Element`s as arguments, and they will
/// correctly serialize to DOM elements on the other side.
pub async fn execute(&self, script: String, mut args: Vec<Value>) -> Result<Value> {
self.fixup_elements(&mut args);
let cmd = webdriver::command::JavascriptCommandParameters {
script: script,
args: Some(args),
};
let cmd = WebDriverCommand::ExecuteScript(cmd);
self.0.issue_cmd(&cmd).await
}
/// Wait for the page to navigate to a new URL before proceeding.
///
/// If the `current` URL is not provided, `self.current_url()`
/// will be used. Note however that this introduces a race
/// condition: the browser could finish navigating *before* we
/// call `current_url()`, which would lead to an eternal wait.
pub async fn wait_for_navigation(&self, current: Option<url::Url>) -> Result<()> {
let current = match current {
Some(current) => current,
None => self.current_url().await?,
};
loop {
if self.current_url().await? != current {
break Ok(());
}
sleep(Duration::from_millis(100)).await
}
}
/// Starting from the document root, find the first element on the page that
/// matches the specified selector.
pub async fn find(
&self,
locator: Locator,
root: Option<WebElement>,
) -> Result<WebElement> {
let cmd = match root {
Option::None => WebDriverCommand::FindElement(locator.into()),
Option::Some(elt) => {
WebDriverCommand::FindElementElement(elt, locator.into())
}
};
let res = self.0.issue_cmd(&cmd).await?;
Ok(self.parse_lookup(res)?)
}
pub async fn find_all(
&self,
locator: Locator,
root: Option<WebElement>,
) -> Result<Vec<WebElement>> {
let cmd = match root {
Option::None => WebDriverCommand::FindElements(locator.into()),
Option::Some(elt) => {
WebDriverCommand::FindElementElements(elt, locator.into())
}
};
match self.0.issue_cmd(&cmd).await? {
Value::Array(a) => Ok(a
.into_iter()
.map(|e| self.parse_lookup(e))
.collect::<Result<Vec<WebElement>>>()?),
r => bail!(ErrorKind::NotW3C(r)),
}
}
generate_wait_for_find!(wait_for_find, find, WebElement);
generate_wait_for_find!(wait_for_find_all, find_all, Vec<WebElement>);
/// Extract the `WebElement` from a `FindElement` or `FindElementElement` command.
fn parse_lookup(&self, mut res: Value) -> Result<WebElement> {
let key = if self.0.legacy {
"ELEMENT"
} else {
ELEMENT_KEY
};
let o = {
if let Some(o) = res.as_object_mut() {
o
} else {
bail!(ErrorKind::NotW3C(res))
}
};
match o.remove(key) {
None => bail!(ErrorKind::NotW3C(res)),
Some(Value::String(wei)) => Ok(webdriver::common::WebElement(wei)),
Some(v) => {
o.insert(key.to_string(), v);
bail!(ErrorKind::NotW3C(res))
}
}
}
fn fixup_elements(&self, args: &mut [Value]) {
if self.0.legacy {
for arg in args {
// the serialization of WebElement uses the W3C index,
// but legacy implementations need us to use the "ELEMENT" index
if let Value::Object(ref mut o) = *arg {
if let Some(wei) = o.remove(ELEMENT_KEY) {
o.insert("ELEMENT".to_string(), wei);
}
}
}
}
}
/// Look up an attribute value for this element by name.
pub async fn attr(
&self,
eid: WebElement,
attribute: String,
) -> Result<Option<String>> {
let cmd = WebDriverCommand::GetElementAttribute(eid, attribute);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(Some(v)),
Value::Null => Ok(None),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Look up a DOM property for this element by name.
pub async fn prop(&self, eid: WebElement, prop: String) -> Result<Option<String>> {
let cmd = WebDriverCommand::GetElementProperty(eid, prop);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(Some(v)),
Value::Null => Ok(None),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Retrieve the text contents of this elment.
pub async fn text(&self, eid: WebElement) -> Result<String> {
let cmd = WebDriverCommand::GetElementText(eid);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(v),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Retrieve the HTML contents of this element. if inner is true,
/// also return the wrapping nodes html. Note: this is the same as
/// calling `prop("innerHTML")` or `prop("outerHTML")`.
pub async fn html(&self, eid: WebElement, inner: bool) -> Result<String> {
let prop = if inner { "innerHTML" } else { "outerHTML" };
self.prop(eid, prop.to_owned()).await?
.ok_or_else(|| Error::from(ErrorKind::NotW3C(Value::Null)))
}
/// Click on this element
pub async fn click(&self, eid: WebElement) -> Result<()> {
let cmd = WebDriverCommand::ElementClick(eid);
let r = self.0.issue_cmd(&cmd).await?;
if r.is_null() || r.as_object().map(|o| o.is_empty()).unwrap_or(false) {
// geckodriver returns {} :(
Ok(())
} else {
bail!(ErrorKind::NotW3C(r))
}
}
/// Scroll this element into view
pub async fn scroll_into_view(&self, eid: WebElement) -> Result<()> {
let args = vec![serde_json::to_value(eid)?];
let js = "arguments[0].scrollIntoView(true)".to_string();
self.clone().execute(js, args).await?;
Ok(())
}
/// Follow the `href` target of the element matching the given CSS
/// selector *without* causing a click interaction.
pub async fn follow(&self, eid: WebElement) -> Result<()> {
match self.clone().attr(eid.clone(), String::from("href")).await? {
None => bail!("no href attribute"),
Some(href) => {
let current = self.current_url().await?.join(&href)?;
self.goto(current.as_str()).await
}
}
}
/// Set the `value` of the input element named `name` which is a child of `eid`
pub async fn set_by_name(
&self,
eid: WebElement,
name: String,
value: String,
) -> Result<()> {
let locator = Locator::Css(format!("input[name='{}']", name));
let elt = self.clone().find(locator.into(), Some(eid)).await?;
let args = {
let mut a = vec![serde_json::to_value(elt)?, Value::String(value)];
self.fixup_elements(&mut a);
a
};
let js = "arguments[0].value = arguments[1]".to_string();
let res = self.clone().execute(js, args).await?;
if res.is_null() {
Ok(())
} else {
bail!(ErrorKind::NotW3C(res))
}
}
/// Submit the form specified by `eid` with the first submit button
pub async fn submit(&self, eid: WebElement) -> Result<()> {
let l = Locator::Css("input[type=submit],button[type=submit]".into());
self.submit_with(eid, l).await
}
/// Submit the form `eid` using the button matched by the given selector.
pub async fn submit_with(&self, eid: WebElement, button: Locator) -> Result<()> {
let elt = self.clone().find(button.into(), Some(eid)).await?;
Ok(self.clone().click(elt).await?)
}
/// Submit this form using the form submit button with the given
/// label (case-insensitive).
pub async fn submit_using(&self, eid: WebElement, button_label: String) -> Result<()> {
let escaped = button_label.replace('\\', "\\\\").replace('"', "\\\"");
let btn = format!(
"input[type=submit][value=\"{}\" i],\
button[type=submit][value=\"{}\" i]",
escaped, escaped
);
Ok(self.submit_with(eid, Locator::Css(btn)).await?)
}
/// Submit this form directly, without clicking any buttons.
///
/// This can be useful to bypass forms that perform various magic
/// when the submit button is clicked, or that hijack click events
/// altogether.
///
/// Note that since no button is actually clicked, the
/// `name=value` pair for the submit button will not be
/// submitted. This can be circumvented by using `submit_sneaky`
/// instead.
pub async fn submit_direct(&self, eid: WebElement) -> Result<()> {
// some sites are silly, and name their submit button
// "submit". this ends up overwriting the "submit" function of
// the form with a reference to the submit button itself, so
// we can't call .submit(). we get around this by creating a
// *new* form, and using *its* submit() handler but with this
// pointed to the real form. solution from here:
// https://stackoverflow.com/q/833032/472927#comment23038712_834197
let js = "document.createElement('form').submit.call(arguments[0])".to_string();
let args = {
let mut a = vec![serde_json::to_value(eid)?];
self.fixup_elements(&mut a);
a
};
self.clone().execute(js, args).await?;
Ok(())
}
/// Submit this form directly, without clicking any buttons, and
/// with an extra field.
///
/// Like `submit_direct`, this method will submit this form
/// without clicking a submit button. However, it will *also*
/// inject a hidden input element on the page that carries the
/// given `field=value` mapping. This allows you to emulate the
/// form data as it would have been *if* the submit button was
/// indeed clicked.
pub async fn submit_sneaky(
&self,
eid: WebElement,
field: String,
value: String,
) -> Result<()> {
let js = r#"
var h = document.createElement('input');
h.setAttribute('type', 'hidden');
h.setAttribute('name', arguments[1]);
h.value = arguments[2];
arguments[0].appendChild(h);
"#
.to_string();
let args = {
let mut a = vec![
serde_json::to_value(eid)?,
Value::String(field),
Value::String(value),
];
self.fixup_elements(&mut a);
a
};
self.execute(js, args).await?;
Ok(())
}
} | ($name:ident, $search_fn:ident, $return_typ:ty) => { | random_line_split |
lib.rs | //! A high-level API for programmatically interacting with web pages
//! through WebDriver.
//!
//! [WebDriver protocol]: https://www.w3.org/TR/webdriver/
//! [CSS selectors]: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
//! [powerful]: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes
//! [operators]: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors
//! [WebDriver compatible]: https://github.com/Fyrd/caniuse/issues/2757#issuecomment-304529217
//! [`geckodriver`]: https://github.com/mozilla/geckodriver
#[macro_use]
extern crate error_chain;
pub mod error;
mod protocol;
use crate::error::*;
pub use hyper::Method;
use protocol::Client;
use serde_json::Value;
use std::time::Duration;
use tokio::time::sleep;
use webdriver::{
command::{SwitchToFrameParameters, SwitchToWindowParameters, WebDriverCommand},
common::{FrameId, WebElement, ELEMENT_KEY},
error::{ErrorStatus, WebDriverError},
};
#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Hash)]
pub enum Locator {
Css(String),
LinkText(String),
XPath(String),
}
impl Into<webdriver::command::LocatorParameters> for Locator {
fn into(self) -> webdriver::command::LocatorParameters {
match self {
Locator::Css(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::CSSSelector,
value: s,
},
Locator::XPath(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::XPath,
value: s,
},
Locator::LinkText(s) => webdriver::command::LocatorParameters {
using: webdriver::common::LocatorStrategy::LinkText,
value: s,
},
}
}
}
pub struct Driver(Client);
macro_rules! generate_wait_for_find {
($name:ident, $search_fn:ident, $return_typ:ty) => {
/// Wait for the specified element(s) to appear on the page
pub async fn $name(
&self,
search: Locator,
root: Option<WebElement>
) -> Result<$return_typ> {
loop {
match self.$search_fn(search.clone(), root.clone()).await {
Ok(e) => break Ok(e),
Err(Error(ErrorKind::WebDriver(
WebDriverError {error: ErrorStatus::NoSuchElement, ..}
), _)) => sleep(Duration::from_millis(100)).await,
Err(e) => break Err(e)
}
}
}
}
}
impl Driver {
/// Create a new webdriver session on the specified server
pub async fn new(webdriver_url: &str, user_agent: Option<String>) -> Result<Self> {
Ok(Driver(Client::new(webdriver_url, user_agent).await?))
}
/// Navigate directly to the given URL.
pub async fn goto<'a>(&'a self, url: &'a str) -> Result<()> {
let cmd = WebDriverCommand::Get(webdriver::command::GetParameters {
url: self.current_url().await?.join(url)?.into(),
});
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Retrieve the currently active URL for this session.
pub async fn current_url(&self) -> Result<url::Url> {
match self.0.issue_cmd(&WebDriverCommand::GetCurrentUrl).await?.as_str() {
Some(url) => Ok(url.parse()?),
None => bail!(ErrorKind::NotW3C(Value::Null)),
}
}
/// Get the HTML source for the current page.
pub async fn source(&self) -> Result<String> {
match self.0.issue_cmd(&WebDriverCommand::GetPageSource).await?.as_str() {
Some(src) => Ok(src.to_string()),
None => bail!(ErrorKind::NotW3C(Value::Null)),
}
}
/// Go back to the previous page.
pub async fn back(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::GoBack).await?;
Ok(())
}
/// Refresh the current previous page.
pub async fn refresh(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::Refresh).await?;
Ok(())
}
/// Switch the focus to the frame contained in Element
pub async fn switch_to_frame(&self, frame: WebElement) -> Result<()> {
let p = SwitchToFrameParameters {
id: Some(FrameId::Element(frame)),
};
let cmd = WebDriverCommand::SwitchToFrame(p);
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Switch the focus to this frame's parent frame
pub async fn switch_to_parent_frame(&self) -> Result<()> {
self.0.issue_cmd(&WebDriverCommand::SwitchToParentFrame).await?;
Ok(())
}
/// Switch the focus to the window identified by handle
pub async fn switch_to_window(&self, window: String) -> Result<()> {
let p = SwitchToWindowParameters { handle: window };
let cmd = WebDriverCommand::SwitchToWindow(p);
self.0.issue_cmd(&cmd).await?;
Ok(())
}
/// Execute the given JavaScript `script` in the current browser session.
///
/// `args` is available to the script inside the `arguments`
/// array. Since `Element` implements `ToJson`, you can also
/// provide serialized `Element`s as arguments, and they will
/// correctly serialize to DOM elements on the other side.
pub async fn execute(&self, script: String, mut args: Vec<Value>) -> Result<Value> {
self.fixup_elements(&mut args);
let cmd = webdriver::command::JavascriptCommandParameters {
script: script,
args: Some(args),
};
let cmd = WebDriverCommand::ExecuteScript(cmd);
self.0.issue_cmd(&cmd).await
}
/// Wait for the page to navigate to a new URL before proceeding.
///
/// If the `current` URL is not provided, `self.current_url()`
/// will be used. Note however that this introduces a race
/// condition: the browser could finish navigating *before* we
/// call `current_url()`, which would lead to an eternal wait.
pub async fn wait_for_navigation(&self, current: Option<url::Url>) -> Result<()> {
let current = match current {
Some(current) => current,
None => self.current_url().await?,
};
loop {
if self.current_url().await? != current {
break Ok(());
}
sleep(Duration::from_millis(100)).await
}
}
/// Starting from the document root, find the first element on the page that
/// matches the specified selector.
pub async fn find(
&self,
locator: Locator,
root: Option<WebElement>,
) -> Result<WebElement> {
let cmd = match root {
Option::None => WebDriverCommand::FindElement(locator.into()),
Option::Some(elt) => {
WebDriverCommand::FindElementElement(elt, locator.into())
}
};
let res = self.0.issue_cmd(&cmd).await?;
Ok(self.parse_lookup(res)?)
}
pub async fn find_all(
&self,
locator: Locator,
root: Option<WebElement>,
) -> Result<Vec<WebElement>> {
let cmd = match root {
Option::None => WebDriverCommand::FindElements(locator.into()),
Option::Some(elt) => {
WebDriverCommand::FindElementElements(elt, locator.into())
}
};
match self.0.issue_cmd(&cmd).await? {
Value::Array(a) => Ok(a
.into_iter()
.map(|e| self.parse_lookup(e))
.collect::<Result<Vec<WebElement>>>()?),
r => bail!(ErrorKind::NotW3C(r)),
}
}
generate_wait_for_find!(wait_for_find, find, WebElement);
generate_wait_for_find!(wait_for_find_all, find_all, Vec<WebElement>);
/// Extract the `WebElement` from a `FindElement` or `FindElementElement` command.
fn parse_lookup(&self, mut res: Value) -> Result<WebElement> {
let key = if self.0.legacy {
"ELEMENT"
} else {
ELEMENT_KEY
};
let o = {
if let Some(o) = res.as_object_mut() {
o
} else {
bail!(ErrorKind::NotW3C(res))
}
};
match o.remove(key) {
None => bail!(ErrorKind::NotW3C(res)),
Some(Value::String(wei)) => Ok(webdriver::common::WebElement(wei)),
Some(v) => {
o.insert(key.to_string(), v);
bail!(ErrorKind::NotW3C(res))
}
}
}
fn fixup_elements(&self, args: &mut [Value]) {
if self.0.legacy {
for arg in args {
// the serialization of WebElement uses the W3C index,
// but legacy implementations need us to use the "ELEMENT" index
if let Value::Object(ref mut o) = *arg {
if let Some(wei) = o.remove(ELEMENT_KEY) {
o.insert("ELEMENT".to_string(), wei);
}
}
}
}
}
/// Look up an attribute value for this element by name.
pub async fn attr(
&self,
eid: WebElement,
attribute: String,
) -> Result<Option<String>> {
let cmd = WebDriverCommand::GetElementAttribute(eid, attribute);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(Some(v)),
Value::Null => Ok(None),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Look up a DOM property for this element by name.
pub async fn prop(&self, eid: WebElement, prop: String) -> Result<Option<String>> {
let cmd = WebDriverCommand::GetElementProperty(eid, prop);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(Some(v)),
Value::Null => Ok(None),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Retrieve the text contents of this elment.
pub async fn text(&self, eid: WebElement) -> Result<String> {
let cmd = WebDriverCommand::GetElementText(eid);
match self.0.issue_cmd(&cmd).await? {
Value::String(v) => Ok(v),
v => bail!(ErrorKind::NotW3C(v)),
}
}
/// Retrieve the HTML contents of this element. if inner is true,
/// also return the wrapping nodes html. Note: this is the same as
/// calling `prop("innerHTML")` or `prop("outerHTML")`.
pub async fn html(&self, eid: WebElement, inner: bool) -> Result<String> {
let prop = if inner { "innerHTML" } else { "outerHTML" };
self.prop(eid, prop.to_owned()).await?
.ok_or_else(|| Error::from(ErrorKind::NotW3C(Value::Null)))
}
/// Click on this element
pub async fn click(&self, eid: WebElement) -> Result<()> {
let cmd = WebDriverCommand::ElementClick(eid);
let r = self.0.issue_cmd(&cmd).await?;
if r.is_null() || r.as_object().map(|o| o.is_empty()).unwrap_or(false) {
// geckodriver returns {} :(
Ok(())
} else {
bail!(ErrorKind::NotW3C(r))
}
}
/// Scroll this element into view
pub async fn scroll_into_view(&self, eid: WebElement) -> Result<()> {
let args = vec![serde_json::to_value(eid)?];
let js = "arguments[0].scrollIntoView(true)".to_string();
self.clone().execute(js, args).await?;
Ok(())
}
/// Follow the `href` target of the element matching the given CSS
/// selector *without* causing a click interaction.
pub async fn follow(&self, eid: WebElement) -> Result<()> |
/// Set the `value` of the input element named `name` which is a child of `eid`
pub async fn set_by_name(
&self,
eid: WebElement,
name: String,
value: String,
) -> Result<()> {
let locator = Locator::Css(format!("input[name='{}']", name));
let elt = self.clone().find(locator.into(), Some(eid)).await?;
let args = {
let mut a = vec![serde_json::to_value(elt)?, Value::String(value)];
self.fixup_elements(&mut a);
a
};
let js = "arguments[0].value = arguments[1]".to_string();
let res = self.clone().execute(js, args).await?;
if res.is_null() {
Ok(())
} else {
bail!(ErrorKind::NotW3C(res))
}
}
/// Submit the form specified by `eid` with the first submit button
pub async fn submit(&self, eid: WebElement) -> Result<()> {
let l = Locator::Css("input[type=submit],button[type=submit]".into());
self.submit_with(eid, l).await
}
/// Submit the form `eid` using the button matched by the given selector.
pub async fn submit_with(&self, eid: WebElement, button: Locator) -> Result<()> {
let elt = self.clone().find(button.into(), Some(eid)).await?;
Ok(self.clone().click(elt).await?)
}
/// Submit this form using the form submit button with the given
/// label (case-insensitive).
pub async fn submit_using(&self, eid: WebElement, button_label: String) -> Result<()> {
let escaped = button_label.replace('\\', "\\\\").replace('"', "\\\"");
let btn = format!(
"input[type=submit][value=\"{}\" i],\
button[type=submit][value=\"{}\" i]",
escaped, escaped
);
Ok(self.submit_with(eid, Locator::Css(btn)).await?)
}
/// Submit this form directly, without clicking any buttons.
///
/// This can be useful to bypass forms that perform various magic
/// when the submit button is clicked, or that hijack click events
/// altogether.
///
/// Note that since no button is actually clicked, the
/// `name=value` pair for the submit button will not be
/// submitted. This can be circumvented by using `submit_sneaky`
/// instead.
pub async fn submit_direct(&self, eid: WebElement) -> Result<()> {
// some sites are silly, and name their submit button
// "submit". this ends up overwriting the "submit" function of
// the form with a reference to the submit button itself, so
// we can't call .submit(). we get around this by creating a
// *new* form, and using *its* submit() handler but with this
// pointed to the real form. solution from here:
// https://stackoverflow.com/q/833032/472927#comment23038712_834197
let js = "document.createElement('form').submit.call(arguments[0])".to_string();
let args = {
let mut a = vec![serde_json::to_value(eid)?];
self.fixup_elements(&mut a);
a
};
self.clone().execute(js, args).await?;
Ok(())
}
/// Submit this form directly, without clicking any buttons, and
/// with an extra field.
///
/// Like `submit_direct`, this method will submit this form
/// without clicking a submit button. However, it will *also*
/// inject a hidden input element on the page that carries the
/// given `field=value` mapping. This allows you to emulate the
/// form data as it would have been *if* the submit button was
/// indeed clicked.
pub async fn submit_sneaky(
&self,
eid: WebElement,
field: String,
value: String,
) -> Result<()> {
let js = r#"
var h = document.createElement('input');
h.setAttribute('type', 'hidden');
h.setAttribute('name', arguments[1]);
h.value = arguments[2];
arguments[0].appendChild(h);
"#
.to_string();
let args = {
let mut a = vec![
serde_json::to_value(eid)?,
Value::String(field),
Value::String(value),
];
self.fixup_elements(&mut a);
a
};
self.execute(js, args).await?;
Ok(())
}
}
| {
match self.clone().attr(eid.clone(), String::from("href")).await? {
None => bail!("no href attribute"),
Some(href) => {
let current = self.current_url().await?.join(&href)?;
self.goto(current.as_str()).await
}
}
} | identifier_body |
lib.rs | //! Brainfuck interpreter types
//!
//! This crate contains all the data types necessary for the Brainfuck
//! interpreter project.
#![deny(missing_docs)]
use std::fmt;
use std::io;
use std::path::{Path, PathBuf};
use thiserror::Error;
/// Represents a Brainfuck Types Error.
#[derive(Error, fmt::Debug)]
pub enum BrainfuckTypesError {
/// When an unmatched left or right bracket is found
#[error("unmatched bracket, {0:?}")]
UnmatchedBracket(BrainfuckInstr),
}
/// Represents the eight raw Brainfuck instructions.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum BrainfuckInstrRaw {
/// Increment (increase by one) the byte at the data pointer
Increment,
/// Decrement (decrease by one) the byte at the data pointer
Decrement,
/// Increment the data pointer (to point to the next cell to the right)
MoveHeadLeft,
/// Decrement the data pointer (to point to the next cell to the left)
MoveHeadRight,
/// If the byte at the data pointer is zero, then instead of moving the
/// instruction pointer forward to the next command, jump it forward to the
/// command after the matching ] command.
WhileStart,
/// If the byte at the data pointer is nonzero, then instead of moving the
/// instruction pointer forward to the next command, jump it back to the
/// command after the matching [ command.
WhileEnd,
/// Accept one byte of input, storing its value in the byte at the data pointer
CellRead,
/// Output the byte at the data pointer
CellWrite,
}
impl BrainfuckInstrRaw {
/// Returns a BrainfuckInstrRaw from the given character.
fn from_byte(c: u8) -> Option<BrainfuckInstrRaw> {
match c {
b'+' => Some(BrainfuckInstrRaw::Increment),
b'-' => Some(BrainfuckInstrRaw::Decrement),
b'<' => Some(BrainfuckInstrRaw::MoveHeadLeft),
b'>' => Some(BrainfuckInstrRaw::MoveHeadRight),
b'[' => Some(BrainfuckInstrRaw::WhileStart),
b']' => Some(BrainfuckInstrRaw::WhileEnd),
b',' => Some(BrainfuckInstrRaw::CellRead),
b'.' => Some(BrainfuckInstrRaw::CellWrite),
_ => None,
}
}
}
/// Represents the raw Brainfuck instruction and where it is in the file.
#[derive(Debug, Copy, Clone)]
pub struct BrainfuckInstr {
/// The raw brainfuck instruction
instr: BrainfuckInstrRaw,
/// The line number, starting from 1 for humans
line: usize,
/// The column number, starting from 1 for humans
column: usize,
}
impl BrainfuckInstr {
/// Returns a vector of BrainfuckInstr's, parsed from the given string slice.
///
/// # Example
/// ```
/// # use bft_types::{BrainfuckInstr, BrainfuckInstrRaw};
/// let bf = BrainfuckInstr::instrs_from_str("<>");
///
/// assert_eq!(bf[0].line(), 1);
/// assert_eq!(bf[0].column(), 1);
///
/// assert_eq!(bf[1].line(), 1);
/// assert_eq!(bf[1].column(), 2);
/// ```
pub fn | (s: &str) -> Vec<Self> {
let mut instrs: Vec<BrainfuckInstr> = Vec::new();
for (l, pline) in s.lines().enumerate() {
for (c, pbyte) in pline.bytes().enumerate() {
if let Some(iraw) = BrainfuckInstrRaw::from_byte(pbyte) {
instrs.push(BrainfuckInstr {
instr: iraw,
line: l + 1,
column: c + 1,
});
}
}
}
instrs
}
/// Returns the Brainfuck instruction line number
pub fn line(&self) -> usize {
self.line
}
/// Returns the Brainfuck instruction column
pub fn column(&self) -> usize {
self.column
}
/// Returns a borrow of the raw Brainfuck instruction.
pub fn instr(&self) -> &BrainfuckInstrRaw {
&self.instr
}
}
impl fmt::Display for BrainfuckInstr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self.instr {
BrainfuckInstrRaw::Increment => "Increment byte at data pointer",
BrainfuckInstrRaw::Decrement => "Decrement byte at data pointer",
BrainfuckInstrRaw::MoveHeadLeft => "Decrement data pointer",
BrainfuckInstrRaw::MoveHeadRight => "Increment data pointer",
BrainfuckInstrRaw::WhileStart => "Start looping",
BrainfuckInstrRaw::WhileEnd => "End looping",
BrainfuckInstrRaw::CellRead => "Input byte at the data pointer",
BrainfuckInstrRaw::CellWrite => "Output byte at data pointer",
};
write!(f, "{}", text)
}
}
/// Represents an entire Brainfuck program, which is a Path and a series of
/// instructions.
#[derive(Debug)]
pub struct BrainfuckProg {
/// The path to the Brainfuck program.
path: PathBuf,
/// A series of BrainfuckInstr.
instrs: Vec<BrainfuckInstr>,
}
impl BrainfuckProg {
/// Instantiate a new BrainfuckProg with the given content and associate it
/// with the given path.
///
/// It is implemented like this so that we don't have to re-open a file if
/// it is already open. See also from_file.
///
/// # Example
/// ```
/// # use bft_types::BrainfuckProg;
/// # use std::path::Path;
/// let bf = BrainfuckProg::new(Path::new("path/to/prog.bf"), "<>[]");
/// ```
pub fn new<P: AsRef<Path>>(path: P, content: &str) -> Self {
Self {
path: path.as_ref().to_path_buf(),
instrs: BrainfuckInstr::instrs_from_str(content),
}
}
/// Returns a new instance of BrainfuckProg, parsed from the file located at
/// the given Path-like reference.
///
/// # Example
/// ```no_run
/// # use bft_types::BrainfuckProg;
/// # use std::path::Path;
/// let bf = BrainfuckProg::from_file(Path::new("path/to/prog.bf"));
/// ```
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let content = std::fs::read_to_string(&path)?;
Ok(Self::new(path, content.as_str()))
}
/// Returns a reference to the BrainfuckProg's path.
pub fn path(&self) -> &Path {
self.path.as_path()
}
/// Returns a reference to the BrainfuckProg's instructions.
pub fn instrs(&self) -> &[BrainfuckInstr] {
&self.instrs[..]
}
/// Checks the program and returns the Result.
pub fn check(&self) -> Result<(), BrainfuckTypesError> {
self.check_brackets()
}
/// Checks the left and right brackets and returns the Result.
fn check_brackets(&self) -> Result<(), BrainfuckTypesError> {
let mut left_brackets: Vec<&BrainfuckInstr> = Vec::new();
// Collect left brackets and pop when we find matching right brackets.
for bf_instr in &self.instrs {
if bf_instr.instr == BrainfuckInstrRaw::WhileStart {
left_brackets.push(&bf_instr);
} else if bf_instr.instr == BrainfuckInstrRaw::WhileEnd {
match left_brackets.pop() {
Some(_) => (),
None => return Err(BrainfuckTypesError::UnmatchedBracket(*bf_instr)),
};
}
}
// Error if there are remaining unmatched left_brackets
match left_brackets.iter().last() {
Some(&b) => Err(BrainfuckTypesError::UnmatchedBracket(*b)),
None => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use super::{BrainfuckInstrRaw, BrainfuckProg};
use std::path::Path;
// Store the line and column
struct Position {
line: usize,
column: usize,
}
// Some default sequence, which we can test against.
const CORRECT_INSTRS: [BrainfuckInstrRaw; 8] = [
BrainfuckInstrRaw::MoveHeadLeft,
BrainfuckInstrRaw::MoveHeadRight,
BrainfuckInstrRaw::WhileStart,
BrainfuckInstrRaw::WhileEnd,
BrainfuckInstrRaw::Decrement,
BrainfuckInstrRaw::Increment,
BrainfuckInstrRaw::CellRead,
BrainfuckInstrRaw::CellWrite,
];
#[test]
fn test_program() {
let fake_path = "path/to/file.bf";
let another_path = "path/to/somewhere/else.bf";
// Construct
let b = BrainfuckProg::new(fake_path, "<>[]-+,.");
// Check the path is stored correctly
assert_eq!(Path::new(fake_path), b.path.as_path());
assert_ne!(Path::new(another_path), b.path.as_path());
// Check the program
let p = b.instrs();
for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() {
assert_eq!(p[i].instr, *cinstr);
assert_eq!(p[i].line(), 1);
assert_eq!(p[i].column(), i + 1);
}
// Check the program backwards to verify BrainfuckInstrRaw PartialEq
// actually fails when comparing two BrainfuckInstrRaw which are
// different.
// Note: This is pointless because we derrive PartialEq, if the standard
// implementation is broken then something is very wrong...
for (i, cinstr) in CORRECT_INSTRS.iter().rev().enumerate() {
assert_ne!(p[i].instr, *cinstr);
}
}
#[test]
fn test_program_with_comments() {
let prog_str = "this < is > a [ valid ]\n\
brainfuck - program +\n\
these , are . comments";
let correct_pos = [
Position { line: 1, column: 6 },
Position {
line: 1,
column: 11,
},
Position {
line: 1,
column: 15,
},
Position {
line: 1,
column: 23,
},
Position {
line: 2,
column: 11,
},
Position {
line: 2,
column: 21,
},
Position { line: 3, column: 7 },
Position {
line: 3,
column: 13,
},
];
let b = BrainfuckProg::new("path/to/file.bf", prog_str);
// Check the program
let p = b.instrs();
for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() {
assert_eq!(p[i].instr, *cinstr);
assert_eq!(p[i].line(), correct_pos[i].line);
assert_eq!(p[i].column(), correct_pos[i].column);
}
}
#[test]
fn test_program_with_matched_brackets() {
let fake_path = "path/to/file.bf";
let b = BrainfuckProg::new(fake_path, "<>[[[]-]+],.");
assert!(b.check().is_ok());
}
#[test]
fn test_program_with_unmatched_brackets() {
let fake_path = "path/to/file.bf";
let b1 = BrainfuckProg::new(fake_path, "<>[[]-+,.");
assert!(b1.check().is_err());
let b2 = BrainfuckProg::new(fake_path, "<>[[]]]-+,.");
assert!(b2.check().is_err());
}
#[test]
fn test_bad_path() {
assert!(BrainfuckProg::from_file("/path/to/file.bf").is_err());
}
}
| instrs_from_str | identifier_name |
lib.rs | //! Brainfuck interpreter types
//!
//! This crate contains all the data types necessary for the Brainfuck
//! interpreter project.
#![deny(missing_docs)]
|
use thiserror::Error;
/// Represents a Brainfuck Types Error.
#[derive(Error, fmt::Debug)]
pub enum BrainfuckTypesError {
/// When an unmatched left or right bracket is found
#[error("unmatched bracket, {0:?}")]
UnmatchedBracket(BrainfuckInstr),
}
/// Represents the eight raw Brainfuck instructions.
#[derive(Debug, PartialEq, Copy, Clone)]
pub enum BrainfuckInstrRaw {
/// Increment (increase by one) the byte at the data pointer
Increment,
/// Decrement (decrease by one) the byte at the data pointer
Decrement,
/// Increment the data pointer (to point to the next cell to the right)
MoveHeadLeft,
/// Decrement the data pointer (to point to the next cell to the left)
MoveHeadRight,
/// If the byte at the data pointer is zero, then instead of moving the
/// instruction pointer forward to the next command, jump it forward to the
/// command after the matching ] command.
WhileStart,
/// If the byte at the data pointer is nonzero, then instead of moving the
/// instruction pointer forward to the next command, jump it back to the
/// command after the matching [ command.
WhileEnd,
/// Accept one byte of input, storing its value in the byte at the data pointer
CellRead,
/// Output the byte at the data pointer
CellWrite,
}
impl BrainfuckInstrRaw {
/// Returns a BrainfuckInstrRaw from the given character.
fn from_byte(c: u8) -> Option<BrainfuckInstrRaw> {
match c {
b'+' => Some(BrainfuckInstrRaw::Increment),
b'-' => Some(BrainfuckInstrRaw::Decrement),
b'<' => Some(BrainfuckInstrRaw::MoveHeadLeft),
b'>' => Some(BrainfuckInstrRaw::MoveHeadRight),
b'[' => Some(BrainfuckInstrRaw::WhileStart),
b']' => Some(BrainfuckInstrRaw::WhileEnd),
b',' => Some(BrainfuckInstrRaw::CellRead),
b'.' => Some(BrainfuckInstrRaw::CellWrite),
_ => None,
}
}
}
/// Represents the raw Brainfuck instruction and where it is in the file.
#[derive(Debug, Copy, Clone)]
pub struct BrainfuckInstr {
/// The raw brainfuck instruction
instr: BrainfuckInstrRaw,
/// The line number, starting from 1 for humans
line: usize,
/// The column number, starting from 1 for humans
column: usize,
}
impl BrainfuckInstr {
/// Returns a vector of BrainfuckInstr's, parsed from the given string slice.
///
/// # Example
/// ```
/// # use bft_types::{BrainfuckInstr, BrainfuckInstrRaw};
/// let bf = BrainfuckInstr::instrs_from_str("<>");
///
/// assert_eq!(bf[0].line(), 1);
/// assert_eq!(bf[0].column(), 1);
///
/// assert_eq!(bf[1].line(), 1);
/// assert_eq!(bf[1].column(), 2);
/// ```
pub fn instrs_from_str(s: &str) -> Vec<Self> {
let mut instrs: Vec<BrainfuckInstr> = Vec::new();
for (l, pline) in s.lines().enumerate() {
for (c, pbyte) in pline.bytes().enumerate() {
if let Some(iraw) = BrainfuckInstrRaw::from_byte(pbyte) {
instrs.push(BrainfuckInstr {
instr: iraw,
line: l + 1,
column: c + 1,
});
}
}
}
instrs
}
/// Returns the Brainfuck instruction line number
pub fn line(&self) -> usize {
self.line
}
/// Returns the Brainfuck instruction column
pub fn column(&self) -> usize {
self.column
}
/// Returns a borrow of the raw Brainfuck instruction.
pub fn instr(&self) -> &BrainfuckInstrRaw {
&self.instr
}
}
impl fmt::Display for BrainfuckInstr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self.instr {
BrainfuckInstrRaw::Increment => "Increment byte at data pointer",
BrainfuckInstrRaw::Decrement => "Decrement byte at data pointer",
BrainfuckInstrRaw::MoveHeadLeft => "Decrement data pointer",
BrainfuckInstrRaw::MoveHeadRight => "Increment data pointer",
BrainfuckInstrRaw::WhileStart => "Start looping",
BrainfuckInstrRaw::WhileEnd => "End looping",
BrainfuckInstrRaw::CellRead => "Input byte at the data pointer",
BrainfuckInstrRaw::CellWrite => "Output byte at data pointer",
};
write!(f, "{}", text)
}
}
/// Represents an entire Brainfuck program, which is a Path and a series of
/// instructions.
#[derive(Debug)]
pub struct BrainfuckProg {
/// The path to the Brainfuck program.
path: PathBuf,
/// A series of BrainfuckInstr.
instrs: Vec<BrainfuckInstr>,
}
impl BrainfuckProg {
/// Instantiate a new BrainfuckProg with the given content and associate it
/// with the given path.
///
/// It is implemented like this so that we don't have to re-open a file if
/// it is already open. See also from_file.
///
/// # Example
/// ```
/// # use bft_types::BrainfuckProg;
/// # use std::path::Path;
/// let bf = BrainfuckProg::new(Path::new("path/to/prog.bf"), "<>[]");
/// ```
pub fn new<P: AsRef<Path>>(path: P, content: &str) -> Self {
Self {
path: path.as_ref().to_path_buf(),
instrs: BrainfuckInstr::instrs_from_str(content),
}
}
/// Returns a new instance of BrainfuckProg, parsed from the file located at
/// the given Path-like reference.
///
/// # Example
/// ```no_run
/// # use bft_types::BrainfuckProg;
/// # use std::path::Path;
/// let bf = BrainfuckProg::from_file(Path::new("path/to/prog.bf"));
/// ```
pub fn from_file<P: AsRef<Path>>(path: P) -> io::Result<Self> {
let content = std::fs::read_to_string(&path)?;
Ok(Self::new(path, content.as_str()))
}
/// Returns a reference to the BrainfuckProg's path.
pub fn path(&self) -> &Path {
self.path.as_path()
}
/// Returns a reference to the BrainfuckProg's instructions.
pub fn instrs(&self) -> &[BrainfuckInstr] {
&self.instrs[..]
}
/// Checks the program and returns the Result.
pub fn check(&self) -> Result<(), BrainfuckTypesError> {
self.check_brackets()
}
/// Checks the left and right brackets and returns the Result.
fn check_brackets(&self) -> Result<(), BrainfuckTypesError> {
let mut left_brackets: Vec<&BrainfuckInstr> = Vec::new();
// Collect left brackets and pop when we find matching right brackets.
for bf_instr in &self.instrs {
if bf_instr.instr == BrainfuckInstrRaw::WhileStart {
left_brackets.push(&bf_instr);
} else if bf_instr.instr == BrainfuckInstrRaw::WhileEnd {
match left_brackets.pop() {
Some(_) => (),
None => return Err(BrainfuckTypesError::UnmatchedBracket(*bf_instr)),
};
}
}
// Error if there are remaining unmatched left_brackets
match left_brackets.iter().last() {
Some(&b) => Err(BrainfuckTypesError::UnmatchedBracket(*b)),
None => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use super::{BrainfuckInstrRaw, BrainfuckProg};
use std::path::Path;
// Store the line and column
struct Position {
line: usize,
column: usize,
}
// Some default sequence, which we can test against.
const CORRECT_INSTRS: [BrainfuckInstrRaw; 8] = [
BrainfuckInstrRaw::MoveHeadLeft,
BrainfuckInstrRaw::MoveHeadRight,
BrainfuckInstrRaw::WhileStart,
BrainfuckInstrRaw::WhileEnd,
BrainfuckInstrRaw::Decrement,
BrainfuckInstrRaw::Increment,
BrainfuckInstrRaw::CellRead,
BrainfuckInstrRaw::CellWrite,
];
#[test]
fn test_program() {
let fake_path = "path/to/file.bf";
let another_path = "path/to/somewhere/else.bf";
// Construct
let b = BrainfuckProg::new(fake_path, "<>[]-+,.");
// Check the path is stored correctly
assert_eq!(Path::new(fake_path), b.path.as_path());
assert_ne!(Path::new(another_path), b.path.as_path());
// Check the program
let p = b.instrs();
for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() {
assert_eq!(p[i].instr, *cinstr);
assert_eq!(p[i].line(), 1);
assert_eq!(p[i].column(), i + 1);
}
// Check the program backwards to verify BrainfuckInstrRaw PartialEq
// actually fails when comparing two BrainfuckInstrRaw which are
// different.
// Note: This is pointless because we derrive PartialEq, if the standard
// implementation is broken then something is very wrong...
for (i, cinstr) in CORRECT_INSTRS.iter().rev().enumerate() {
assert_ne!(p[i].instr, *cinstr);
}
}
#[test]
fn test_program_with_comments() {
let prog_str = "this < is > a [ valid ]\n\
brainfuck - program +\n\
these , are . comments";
let correct_pos = [
Position { line: 1, column: 6 },
Position {
line: 1,
column: 11,
},
Position {
line: 1,
column: 15,
},
Position {
line: 1,
column: 23,
},
Position {
line: 2,
column: 11,
},
Position {
line: 2,
column: 21,
},
Position { line: 3, column: 7 },
Position {
line: 3,
column: 13,
},
];
let b = BrainfuckProg::new("path/to/file.bf", prog_str);
// Check the program
let p = b.instrs();
for (i, cinstr) in CORRECT_INSTRS.iter().enumerate() {
assert_eq!(p[i].instr, *cinstr);
assert_eq!(p[i].line(), correct_pos[i].line);
assert_eq!(p[i].column(), correct_pos[i].column);
}
}
#[test]
fn test_program_with_matched_brackets() {
let fake_path = "path/to/file.bf";
let b = BrainfuckProg::new(fake_path, "<>[[[]-]+],.");
assert!(b.check().is_ok());
}
#[test]
fn test_program_with_unmatched_brackets() {
let fake_path = "path/to/file.bf";
let b1 = BrainfuckProg::new(fake_path, "<>[[]-+,.");
assert!(b1.check().is_err());
let b2 = BrainfuckProg::new(fake_path, "<>[[]]]-+,.");
assert!(b2.check().is_err());
}
#[test]
fn test_bad_path() {
assert!(BrainfuckProg::from_file("/path/to/file.bf").is_err());
}
} | use std::fmt;
use std::io;
use std::path::{Path, PathBuf}; | random_line_split |
sse_server.rs | //! Server-sent-event server for the note viewer feature.
//! This module contains also the web browser Javascript client code.
use crate::config::CFG;
use crate::config::VIEWER_SERVED_MIME_TYPES_MAP;
use crate::viewer::error::ViewerError;
use crate::viewer::http_response::HttpResponse;
use crate::viewer::init::LOCALHOST;
use parking_lot::RwLock;
use percent_encoding::percent_decode_str;
use std::collections::HashSet;
use std::io::{ErrorKind, Read, Write};
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::net::{TcpListener, TcpStream};
use std::path::PathBuf;
use std::str;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::SystemTime;
use tpnote_lib::config::TMPL_HTML_VAR_NOTE_JS;
use tpnote_lib::context::Context;
/// The TCP stream is read in chunks. This is the read buffer size.
const TCP_READ_BUFFER_SIZE: usize = 0x400;
/// Javascript client code, part 1
/// Refresh on WTFiles events.
pub const SSE_CLIENT_CODE1: &str = r#"
var evtSource = new EventSource("http://"#;
/// Javascript client code, part 2
/// Save last scroll position into local storage.
/// Jump to the last saved scroll position.
pub const SSE_CLIENT_CODE2: &str = r#"/events");
evtSource.addEventListener("update", function(e) {
localStorage.setItem('scrollPosition', window.scrollY);
window.location.reload(true);
});
window.addEventListener('load', function() {
if(localStorage.getItem('scrollPosition') !== null)
window.scrollTo(0, localStorage.getItem('scrollPosition'));
});
"#;
/// URL path for Server-Sent-Events.
const SSE_EVENT_PATH: &str = "/events";
/// Server-Sent-Event tokens our HTTP client has registered to receive.
#[derive(Debug, Clone, Copy)]
pub enum SseToken {
/// Server-Sent-Event token to request nothing but check if the client is still
/// there.
Ping,
/// Server-Sent-Event token to request a page update.
Update,
}
pub fn manage_connections(
event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>,
listener: TcpListener,
doc_path: PathBuf,
) {
// A list of referenced local links to images or other documents as
// they appeared in the displayed documents.
// Every thread gets an (ARC) reference to it.
let allowed_urls = Arc::new(RwLock::new(HashSet::new()));
// Subset of the above list containing only displayed Tp-Note documents.
let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new()));
// We use an ARC to count the number of running threads.
let conn_counter = Arc::new(());
// Store `doc_path` in the `context.path` and
// in the Tera variable `TMPL_VAR_PATH`.
let context = Context::from(&doc_path);
log::info!(
"Viewer notice:\n\
only files under the directory: {}\n\
with the following extensions:\n\
{}\n\
are served!",
context.root_path.display(),
&VIEWER_SERVED_MIME_TYPES_MAP
.keys()
.map(|s| {
let mut s = s.to_string();
s.push_str(", ");
s
})
.collect::<String>()
);
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let (event_tx, event_rx) = sync_channel(0);
event_tx_list.lock().unwrap().push(event_tx);
let allowed_urls = allowed_urls.clone();
let delivered_tpnote_docs = delivered_tpnote_docs.clone();
let conn_counter = conn_counter.clone();
let context = context.clone();
thread::spawn(move || {
let mut st = ServerThread::new(
event_rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
);
st.serve_connection()
});
}
Err(e) => log::warn!("TCP connection failed: {}", e),
}
}
}
/// Server thread state.
pub(crate) struct | {
/// Receiver side of the channel where `update` events are sent.
rx: Receiver<SseToken>,
/// Byte stream coming from a TCP connection.
pub(crate) stream: TcpStream,
/// A list of referenced relative URLs to images or other
/// documents as they appear in the delivered Tp-Note documents.
/// This list contains local links that may or may not have been displayed.
/// The local links in this list are relative to `self.context.root_path`
pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
/// Subset of `allowed_urls` containing only URLs that
/// have been actually delivered. The list only contains URLs to Tp-Note
/// documents.
/// The local links in this list are absolute.
pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
/// We do not store anything here, instead we use the ARC pointing to
/// `conn_counter` to count the number of instances of `ServerThread`.
pub(crate) conn_counter: Arc<()>,
/// The constructor stores the path of the note document in `context.path`
/// and in the Tera variable `TMPL_VAR_PATH`.
/// Both are needed for rendering to HTML.
pub(crate) context: Context,
}
impl ServerThread {
/// Constructor.
fn new(
rx: Receiver<SseToken>,
stream: TcpStream,
allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
conn_counter: Arc<()>,
mut context: Context,
) -> Self {
let local_addr = stream.local_addr();
// Compose JavaScript code.
let note_js = match local_addr {
Ok(addr) => format!(
"{}{}:{}{}",
SSE_CLIENT_CODE1,
LOCALHOST,
addr.port(),
SSE_CLIENT_CODE2
),
Err(_) => {
panic!("No TCP connection: socket address of local half is missing.")
}
};
// Save JavaScript code.
context.insert(TMPL_HTML_VAR_NOTE_JS, ¬e_js);
Self {
rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
}
}
/// Wrapper for `serve_connection2()` that logs
/// errors as log message warnings.
fn serve_connection(&mut self) {
match Self::serve_connection2(self) {
Ok(_) => (),
Err(e) => {
log::debug!(
"TCP port local {} to peer {}: Closed connection because of error: {}",
self.stream
.local_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
self.stream
.peer_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
e
);
}
}
}
/// HTTP server: serves content and events via the specified subscriber stream.
#[inline]
#[allow(clippy::needless_return)]
fn serve_connection2(&mut self) -> Result<(), ViewerError> {
// One reference is hold by the `manage_connections` thread and does not count.
// This is why we subtract 1.
let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1;
log::trace!(
"TCP port local {} to peer {}: New incoming TCP connection ({} open).",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
open_connections
);
// Check if we exceed our connection limit.
if open_connections > CFG.viewer.tcp_connections_max {
self.respond_service_unavailable()?;
// This ends this thread and closes the connection.
return Err(ViewerError::TcpConnectionsExceeded {
max_conn: CFG.viewer.tcp_connections_max,
});
}
'tcp_connection: loop {
// This is inspired by the Spook crate.
// Read the request.
let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE];
let mut buffer = Vec::new();
let (method, path) = 'assemble_tcp_chunks: loop {
// Read the request, or part thereof.
match self.stream.read(&mut read_buffer) {
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Connection by peer.
break 'tcp_connection;
}
Err(e) => {
// Connection closed or error.
return Err(ViewerError::StreamRead { error: e });
}
Ok(n) => {
// Successful read.
buffer.extend_from_slice(&read_buffer[..n]);
log::trace!(
"TCP port local {} to peer {}: chunk: {:?} ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
std::str::from_utf8(&read_buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>()
);
}
}
// Try to parse the request.
let mut headers = [httparse::EMPTY_HEADER; 16];
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buffer)?;
if res.is_partial() {
continue 'assemble_tcp_chunks;
}
// Check if the HTTP header is complete and valid.
if res.is_complete() {
if let (Some(method), Some(path)) = (req.method, req.path) {
// This is the only regular exit.
break 'assemble_tcp_chunks (method, path);
}
};
// We quit with error. There is nothing more we can do here.
return Err(ViewerError::StreamParse {
source_str: std::str::from_utf8(&buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>(),
});
};
// End of input chunk loop.
// The only supported request method for SSE is GET.
if method != "GET" {
self.respond_method_not_allowed(method)?;
continue 'tcp_connection;
}
// Decode the percent encoding in the URL path.
let path = percent_decode_str(path).decode_utf8()?;
// Check the path.
// Serve note rendition.
match &*path {
// This is a connection for Server-Sent-Events.
SSE_EVENT_PATH => {
// Serve event response, but keep the connection.
self.respond_event_ok()?;
// Make the stream non-blocking to be able to detect whether the
// connection was closed by the client.
self.stream.set_nonblocking(true)?;
// Serve events until the connection is closed.
// Keep in mind that the client will often close
// the request after the first event if the event
// is used to trigger a page refresh, so try to eagerly
// detect closed connections.
'_event: loop {
// Wait for the next update.
let msg = self.rx.recv()?;
// Detect whether the connection was closed.
match self.stream.read(&mut read_buffer) {
// Connection closed.
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Event connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Our peer closed this connection, we finish also then.
break 'tcp_connection;
}
// Connection alive.
Ok(_) => {}
// `WouldBlock` is OK, all others not.
Err(e) => {
if e.kind() != ErrorKind::WouldBlock {
// Something bad happened.
return Err(ViewerError::StreamRead { error: e });
}
}
}
// Send event.
let event = match msg {
SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(),
SseToken::Ping => ": ping\r\n\r\n".to_string(),
};
self.stream.write_all(event.as_bytes())?;
log::debug!(
"TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
Arc::<()>::strong_count(&self.conn_counter) - 1,
msg,
);
}
}
// Serve all other documents.
_ => self.respond(&path)?,
}; // end of match path
} // Go to 'tcp_connection loop start
log::trace!(
"TCP port local {} to peer {}: ({} open). Closing this TCP connection.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
// We subtract 1 for the `manage connection()` thread, and
// 1 for the thread we will close in a moment.
Arc::<()>::strong_count(&self.conn_counter) - 2,
);
// We came here because the client closed this connection.
Ok(())
}
/// Write HTTP event response.
fn respond_event_ok(&mut self) -> Result<(), ViewerError> {
// Declare SSE capability and allow cross-origin access.
let response = format!(
"\
HTTP/1.1 200 OK\r\n\
Date: {}\r\n\
Access-Control-Allow-Origin: *\r\n\
Cache-Control: no-cache\r\n\
Content-Type: text/event-stream\r\n\
\r\n",
httpdate::fmt_http_date(SystemTime::now()),
);
self.stream.write_all(response.as_bytes())?;
log::debug!(
"TCP port local {} to peer {}: 200 OK, served event header, \
keeping event connection open ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
);
Ok(())
}
}
| ServerThread | identifier_name |
sse_server.rs | //! Server-sent-event server for the note viewer feature.
//! This module contains also the web browser Javascript client code.
use crate::config::CFG;
use crate::config::VIEWER_SERVED_MIME_TYPES_MAP;
use crate::viewer::error::ViewerError;
use crate::viewer::http_response::HttpResponse;
use crate::viewer::init::LOCALHOST;
use parking_lot::RwLock;
use percent_encoding::percent_decode_str;
use std::collections::HashSet;
use std::io::{ErrorKind, Read, Write};
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::net::{TcpListener, TcpStream};
use std::path::PathBuf;
use std::str;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::SystemTime;
use tpnote_lib::config::TMPL_HTML_VAR_NOTE_JS;
use tpnote_lib::context::Context;
/// The TCP stream is read in chunks. This is the read buffer size.
const TCP_READ_BUFFER_SIZE: usize = 0x400;
/// Javascript client code, part 1
/// Refresh on WTFiles events.
pub const SSE_CLIENT_CODE1: &str = r#"
var evtSource = new EventSource("http://"#;
/// Javascript client code, part 2
/// Save last scroll position into local storage.
/// Jump to the last saved scroll position.
pub const SSE_CLIENT_CODE2: &str = r#"/events");
evtSource.addEventListener("update", function(e) {
localStorage.setItem('scrollPosition', window.scrollY);
window.location.reload(true);
});
window.addEventListener('load', function() {
if(localStorage.getItem('scrollPosition') !== null)
window.scrollTo(0, localStorage.getItem('scrollPosition'));
});
"#;
/// URL path for Server-Sent-Events.
const SSE_EVENT_PATH: &str = "/events";
/// Server-Sent-Event tokens our HTTP client has registered to receive.
#[derive(Debug, Clone, Copy)]
pub enum SseToken {
/// Server-Sent-Event token to request nothing but check if the client is still
/// there.
Ping,
/// Server-Sent-Event token to request a page update.
Update,
}
pub fn manage_connections(
event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>,
listener: TcpListener,
doc_path: PathBuf,
) {
// A list of referenced local links to images or other documents as
// they appeared in the displayed documents.
// Every thread gets an (ARC) reference to it.
let allowed_urls = Arc::new(RwLock::new(HashSet::new()));
// Subset of the above list containing only displayed Tp-Note documents.
let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new()));
// We use an ARC to count the number of running threads.
let conn_counter = Arc::new(());
// Store `doc_path` in the `context.path` and
// in the Tera variable `TMPL_VAR_PATH`.
let context = Context::from(&doc_path);
log::info!(
"Viewer notice:\n\
only files under the directory: {}\n\
with the following extensions:\n\
{}\n\
are served!",
context.root_path.display(),
&VIEWER_SERVED_MIME_TYPES_MAP
.keys()
.map(|s| {
let mut s = s.to_string();
s.push_str(", ");
s
})
.collect::<String>()
);
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let (event_tx, event_rx) = sync_channel(0);
event_tx_list.lock().unwrap().push(event_tx);
let allowed_urls = allowed_urls.clone();
let delivered_tpnote_docs = delivered_tpnote_docs.clone();
let conn_counter = conn_counter.clone();
let context = context.clone();
thread::spawn(move || {
let mut st = ServerThread::new(
event_rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
);
st.serve_connection()
});
}
Err(e) => log::warn!("TCP connection failed: {}", e),
}
}
}
/// Server thread state.
pub(crate) struct ServerThread {
/// Receiver side of the channel where `update` events are sent.
rx: Receiver<SseToken>,
/// Byte stream coming from a TCP connection.
pub(crate) stream: TcpStream,
/// A list of referenced relative URLs to images or other
/// documents as they appear in the delivered Tp-Note documents.
/// This list contains local links that may or may not have been displayed.
/// The local links in this list are relative to `self.context.root_path`
pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
/// Subset of `allowed_urls` containing only URLs that
/// have been actually delivered. The list only contains URLs to Tp-Note
/// documents.
/// The local links in this list are absolute.
pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
/// We do not store anything here, instead we use the ARC pointing to
/// `conn_counter` to count the number of instances of `ServerThread`.
pub(crate) conn_counter: Arc<()>,
/// The constructor stores the path of the note document in `context.path`
/// and in the Tera variable `TMPL_VAR_PATH`.
/// Both are needed for rendering to HTML.
pub(crate) context: Context,
}
impl ServerThread {
/// Constructor.
fn new(
rx: Receiver<SseToken>,
stream: TcpStream,
allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
conn_counter: Arc<()>,
mut context: Context,
) -> Self {
let local_addr = stream.local_addr();
// Compose JavaScript code.
let note_js = match local_addr {
Ok(addr) => format!(
"{}{}:{}{}",
SSE_CLIENT_CODE1,
LOCALHOST,
addr.port(),
SSE_CLIENT_CODE2
),
Err(_) => {
panic!("No TCP connection: socket address of local half is missing.")
}
};
// Save JavaScript code.
context.insert(TMPL_HTML_VAR_NOTE_JS, ¬e_js);
Self {
rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
}
}
/// Wrapper for `serve_connection2()` that logs
/// errors as log message warnings.
fn serve_connection(&mut self) {
match Self::serve_connection2(self) {
Ok(_) => (),
Err(e) => {
log::debug!(
"TCP port local {} to peer {}: Closed connection because of error: {}",
self.stream
.local_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
self.stream
.peer_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
e
);
}
}
}
/// HTTP server: serves content and events via the specified subscriber stream.
#[inline]
#[allow(clippy::needless_return)]
fn serve_connection2(&mut self) -> Result<(), ViewerError> |
/// Write HTTP event response.
fn respond_event_ok(&mut self) -> Result<(), ViewerError> {
// Declare SSE capability and allow cross-origin access.
let response = format!(
"\
HTTP/1.1 200 OK\r\n\
Date: {}\r\n\
Access-Control-Allow-Origin: *\r\n\
Cache-Control: no-cache\r\n\
Content-Type: text/event-stream\r\n\
\r\n",
httpdate::fmt_http_date(SystemTime::now()),
);
self.stream.write_all(response.as_bytes())?;
log::debug!(
"TCP port local {} to peer {}: 200 OK, served event header, \
keeping event connection open ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
);
Ok(())
}
}
| {
// One reference is hold by the `manage_connections` thread and does not count.
// This is why we subtract 1.
let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1;
log::trace!(
"TCP port local {} to peer {}: New incoming TCP connection ({} open).",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
open_connections
);
// Check if we exceed our connection limit.
if open_connections > CFG.viewer.tcp_connections_max {
self.respond_service_unavailable()?;
// This ends this thread and closes the connection.
return Err(ViewerError::TcpConnectionsExceeded {
max_conn: CFG.viewer.tcp_connections_max,
});
}
'tcp_connection: loop {
// This is inspired by the Spook crate.
// Read the request.
let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE];
let mut buffer = Vec::new();
let (method, path) = 'assemble_tcp_chunks: loop {
// Read the request, or part thereof.
match self.stream.read(&mut read_buffer) {
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Connection by peer.
break 'tcp_connection;
}
Err(e) => {
// Connection closed or error.
return Err(ViewerError::StreamRead { error: e });
}
Ok(n) => {
// Successful read.
buffer.extend_from_slice(&read_buffer[..n]);
log::trace!(
"TCP port local {} to peer {}: chunk: {:?} ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
std::str::from_utf8(&read_buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>()
);
}
}
// Try to parse the request.
let mut headers = [httparse::EMPTY_HEADER; 16];
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buffer)?;
if res.is_partial() {
continue 'assemble_tcp_chunks;
}
// Check if the HTTP header is complete and valid.
if res.is_complete() {
if let (Some(method), Some(path)) = (req.method, req.path) {
// This is the only regular exit.
break 'assemble_tcp_chunks (method, path);
}
};
// We quit with error. There is nothing more we can do here.
return Err(ViewerError::StreamParse {
source_str: std::str::from_utf8(&buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>(),
});
};
// End of input chunk loop.
// The only supported request method for SSE is GET.
if method != "GET" {
self.respond_method_not_allowed(method)?;
continue 'tcp_connection;
}
// Decode the percent encoding in the URL path.
let path = percent_decode_str(path).decode_utf8()?;
// Check the path.
// Serve note rendition.
match &*path {
// This is a connection for Server-Sent-Events.
SSE_EVENT_PATH => {
// Serve event response, but keep the connection.
self.respond_event_ok()?;
// Make the stream non-blocking to be able to detect whether the
// connection was closed by the client.
self.stream.set_nonblocking(true)?;
// Serve events until the connection is closed.
// Keep in mind that the client will often close
// the request after the first event if the event
// is used to trigger a page refresh, so try to eagerly
// detect closed connections.
'_event: loop {
// Wait for the next update.
let msg = self.rx.recv()?;
// Detect whether the connection was closed.
match self.stream.read(&mut read_buffer) {
// Connection closed.
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Event connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Our peer closed this connection, we finish also then.
break 'tcp_connection;
}
// Connection alive.
Ok(_) => {}
// `WouldBlock` is OK, all others not.
Err(e) => {
if e.kind() != ErrorKind::WouldBlock {
// Something bad happened.
return Err(ViewerError::StreamRead { error: e });
}
}
}
// Send event.
let event = match msg {
SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(),
SseToken::Ping => ": ping\r\n\r\n".to_string(),
};
self.stream.write_all(event.as_bytes())?;
log::debug!(
"TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
Arc::<()>::strong_count(&self.conn_counter) - 1,
msg,
);
}
}
// Serve all other documents.
_ => self.respond(&path)?,
}; // end of match path
} // Go to 'tcp_connection loop start
log::trace!(
"TCP port local {} to peer {}: ({} open). Closing this TCP connection.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
// We subtract 1 for the `manage connection()` thread, and
// 1 for the thread we will close in a moment.
Arc::<()>::strong_count(&self.conn_counter) - 2,
);
// We came here because the client closed this connection.
Ok(())
} | identifier_body |
sse_server.rs | //! Server-sent-event server for the note viewer feature.
//! This module contains also the web browser Javascript client code.
use crate::config::CFG;
use crate::config::VIEWER_SERVED_MIME_TYPES_MAP;
use crate::viewer::error::ViewerError;
use crate::viewer::http_response::HttpResponse;
use crate::viewer::init::LOCALHOST;
use parking_lot::RwLock;
use percent_encoding::percent_decode_str;
use std::collections::HashSet;
use std::io::{ErrorKind, Read, Write};
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::net::{TcpListener, TcpStream};
use std::path::PathBuf;
use std::str;
use std::sync::mpsc::{sync_channel, Receiver, SyncSender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::SystemTime;
use tpnote_lib::config::TMPL_HTML_VAR_NOTE_JS;
use tpnote_lib::context::Context;
/// The TCP stream is read in chunks. This is the read buffer size.
const TCP_READ_BUFFER_SIZE: usize = 0x400;
/// Javascript client code, part 1
/// Refresh on WTFiles events.
pub const SSE_CLIENT_CODE1: &str = r#"
var evtSource = new EventSource("http://"#;
/// Javascript client code, part 2
/// Save last scroll position into local storage.
/// Jump to the last saved scroll position.
pub const SSE_CLIENT_CODE2: &str = r#"/events");
evtSource.addEventListener("update", function(e) {
localStorage.setItem('scrollPosition', window.scrollY);
window.location.reload(true);
});
window.addEventListener('load', function() {
if(localStorage.getItem('scrollPosition') !== null)
window.scrollTo(0, localStorage.getItem('scrollPosition'));
});
"#;
/// URL path for Server-Sent-Events.
const SSE_EVENT_PATH: &str = "/events";
| pub enum SseToken {
/// Server-Sent-Event token to request nothing but check if the client is still
/// there.
Ping,
/// Server-Sent-Event token to request a page update.
Update,
}
pub fn manage_connections(
event_tx_list: Arc<Mutex<Vec<SyncSender<SseToken>>>>,
listener: TcpListener,
doc_path: PathBuf,
) {
// A list of referenced local links to images or other documents as
// they appeared in the displayed documents.
// Every thread gets an (ARC) reference to it.
let allowed_urls = Arc::new(RwLock::new(HashSet::new()));
// Subset of the above list containing only displayed Tp-Note documents.
let delivered_tpnote_docs = Arc::new(RwLock::new(HashSet::new()));
// We use an ARC to count the number of running threads.
let conn_counter = Arc::new(());
// Store `doc_path` in the `context.path` and
// in the Tera variable `TMPL_VAR_PATH`.
let context = Context::from(&doc_path);
log::info!(
"Viewer notice:\n\
only files under the directory: {}\n\
with the following extensions:\n\
{}\n\
are served!",
context.root_path.display(),
&VIEWER_SERVED_MIME_TYPES_MAP
.keys()
.map(|s| {
let mut s = s.to_string();
s.push_str(", ");
s
})
.collect::<String>()
);
for stream in listener.incoming() {
match stream {
Ok(stream) => {
let (event_tx, event_rx) = sync_channel(0);
event_tx_list.lock().unwrap().push(event_tx);
let allowed_urls = allowed_urls.clone();
let delivered_tpnote_docs = delivered_tpnote_docs.clone();
let conn_counter = conn_counter.clone();
let context = context.clone();
thread::spawn(move || {
let mut st = ServerThread::new(
event_rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
);
st.serve_connection()
});
}
Err(e) => log::warn!("TCP connection failed: {}", e),
}
}
}
/// Server thread state.
pub(crate) struct ServerThread {
/// Receiver side of the channel where `update` events are sent.
rx: Receiver<SseToken>,
/// Byte stream coming from a TCP connection.
pub(crate) stream: TcpStream,
/// A list of referenced relative URLs to images or other
/// documents as they appear in the delivered Tp-Note documents.
/// This list contains local links that may or may not have been displayed.
/// The local links in this list are relative to `self.context.root_path`
pub(crate) allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
/// Subset of `allowed_urls` containing only URLs that
/// have been actually delivered. The list only contains URLs to Tp-Note
/// documents.
/// The local links in this list are absolute.
pub(crate) delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
/// We do not store anything here, instead we use the ARC pointing to
/// `conn_counter` to count the number of instances of `ServerThread`.
pub(crate) conn_counter: Arc<()>,
/// The constructor stores the path of the note document in `context.path`
/// and in the Tera variable `TMPL_VAR_PATH`.
/// Both are needed for rendering to HTML.
pub(crate) context: Context,
}
impl ServerThread {
/// Constructor.
fn new(
rx: Receiver<SseToken>,
stream: TcpStream,
allowed_urls: Arc<RwLock<HashSet<PathBuf>>>,
delivered_tpnote_docs: Arc<RwLock<HashSet<PathBuf>>>,
conn_counter: Arc<()>,
mut context: Context,
) -> Self {
let local_addr = stream.local_addr();
// Compose JavaScript code.
let note_js = match local_addr {
Ok(addr) => format!(
"{}{}:{}{}",
SSE_CLIENT_CODE1,
LOCALHOST,
addr.port(),
SSE_CLIENT_CODE2
),
Err(_) => {
panic!("No TCP connection: socket address of local half is missing.")
}
};
// Save JavaScript code.
context.insert(TMPL_HTML_VAR_NOTE_JS, ¬e_js);
Self {
rx,
stream,
allowed_urls,
delivered_tpnote_docs,
conn_counter,
context,
}
}
/// Wrapper for `serve_connection2()` that logs
/// errors as log message warnings.
fn serve_connection(&mut self) {
match Self::serve_connection2(self) {
Ok(_) => (),
Err(e) => {
log::debug!(
"TCP port local {} to peer {}: Closed connection because of error: {}",
self.stream
.local_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
self.stream
.peer_addr()
.unwrap_or_else(|_| SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0
)))
.port(),
e
);
}
}
}
/// HTTP server: serves content and events via the specified subscriber stream.
#[inline]
#[allow(clippy::needless_return)]
fn serve_connection2(&mut self) -> Result<(), ViewerError> {
// One reference is hold by the `manage_connections` thread and does not count.
// This is why we subtract 1.
let open_connections = Arc::<()>::strong_count(&self.conn_counter) - 1;
log::trace!(
"TCP port local {} to peer {}: New incoming TCP connection ({} open).",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
open_connections
);
// Check if we exceed our connection limit.
if open_connections > CFG.viewer.tcp_connections_max {
self.respond_service_unavailable()?;
// This ends this thread and closes the connection.
return Err(ViewerError::TcpConnectionsExceeded {
max_conn: CFG.viewer.tcp_connections_max,
});
}
'tcp_connection: loop {
// This is inspired by the Spook crate.
// Read the request.
let mut read_buffer = [0u8; TCP_READ_BUFFER_SIZE];
let mut buffer = Vec::new();
let (method, path) = 'assemble_tcp_chunks: loop {
// Read the request, or part thereof.
match self.stream.read(&mut read_buffer) {
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Connection by peer.
break 'tcp_connection;
}
Err(e) => {
// Connection closed or error.
return Err(ViewerError::StreamRead { error: e });
}
Ok(n) => {
// Successful read.
buffer.extend_from_slice(&read_buffer[..n]);
log::trace!(
"TCP port local {} to peer {}: chunk: {:?} ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
std::str::from_utf8(&read_buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>()
);
}
}
// Try to parse the request.
let mut headers = [httparse::EMPTY_HEADER; 16];
let mut req = httparse::Request::new(&mut headers);
let res = req.parse(&buffer)?;
if res.is_partial() {
continue 'assemble_tcp_chunks;
}
// Check if the HTTP header is complete and valid.
if res.is_complete() {
if let (Some(method), Some(path)) = (req.method, req.path) {
// This is the only regular exit.
break 'assemble_tcp_chunks (method, path);
}
};
// We quit with error. There is nothing more we can do here.
return Err(ViewerError::StreamParse {
source_str: std::str::from_utf8(&buffer)
.unwrap_or_default()
.chars()
.take(60)
.collect::<String>(),
});
};
// End of input chunk loop.
// The only supported request method for SSE is GET.
if method != "GET" {
self.respond_method_not_allowed(method)?;
continue 'tcp_connection;
}
// Decode the percent encoding in the URL path.
let path = percent_decode_str(path).decode_utf8()?;
// Check the path.
// Serve note rendition.
match &*path {
// This is a connection for Server-Sent-Events.
SSE_EVENT_PATH => {
// Serve event response, but keep the connection.
self.respond_event_ok()?;
// Make the stream non-blocking to be able to detect whether the
// connection was closed by the client.
self.stream.set_nonblocking(true)?;
// Serve events until the connection is closed.
// Keep in mind that the client will often close
// the request after the first event if the event
// is used to trigger a page refresh, so try to eagerly
// detect closed connections.
'_event: loop {
// Wait for the next update.
let msg = self.rx.recv()?;
// Detect whether the connection was closed.
match self.stream.read(&mut read_buffer) {
// Connection closed.
Ok(0) => {
log::trace!(
"TCP port local {} to peer {}: Event connection closed by peer.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port()
);
// Our peer closed this connection, we finish also then.
break 'tcp_connection;
}
// Connection alive.
Ok(_) => {}
// `WouldBlock` is OK, all others not.
Err(e) => {
if e.kind() != ErrorKind::WouldBlock {
// Something bad happened.
return Err(ViewerError::StreamRead { error: e });
}
}
}
// Send event.
let event = match msg {
SseToken::Update => "event: update\r\ndata:\r\n\r\n".to_string(),
SseToken::Ping => ": ping\r\n\r\n".to_string(),
};
self.stream.write_all(event.as_bytes())?;
log::debug!(
"TCP port local {} to peer {} ({} open TCP conn.): pushed '{:?}' in event connection to web browser.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
Arc::<()>::strong_count(&self.conn_counter) - 1,
msg,
);
}
}
// Serve all other documents.
_ => self.respond(&path)?,
}; // end of match path
} // Go to 'tcp_connection loop start
log::trace!(
"TCP port local {} to peer {}: ({} open). Closing this TCP connection.",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
// We subtract 1 for the `manage connection()` thread, and
// 1 for the thread we will close in a moment.
Arc::<()>::strong_count(&self.conn_counter) - 2,
);
// We came here because the client closed this connection.
Ok(())
}
/// Write HTTP event response.
fn respond_event_ok(&mut self) -> Result<(), ViewerError> {
// Declare SSE capability and allow cross-origin access.
let response = format!(
"\
HTTP/1.1 200 OK\r\n\
Date: {}\r\n\
Access-Control-Allow-Origin: *\r\n\
Cache-Control: no-cache\r\n\
Content-Type: text/event-stream\r\n\
\r\n",
httpdate::fmt_http_date(SystemTime::now()),
);
self.stream.write_all(response.as_bytes())?;
log::debug!(
"TCP port local {} to peer {}: 200 OK, served event header, \
keeping event connection open ...",
self.stream.local_addr()?.port(),
self.stream.peer_addr()?.port(),
);
Ok(())
}
} | /// Server-Sent-Event tokens our HTTP client has registered to receive.
#[derive(Debug, Clone, Copy)] | random_line_split |
inner_product_proof.rs | #![allow(non_snake_case)]
#![doc(include = "../docs/inner-product-protocol.md")]
use std::borrow::Borrow;
use std::iter;
use curve25519_dalek::ristretto::RistrettoPoint;
use curve25519_dalek::scalar::Scalar;
use curve25519_dalek::traits::VartimeMultiscalarMul;
use proof_transcript::ProofTranscript;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct InnerProductProof {
pub(crate) L_vec: Vec<RistrettoPoint>,
pub(crate) R_vec: Vec<RistrettoPoint>,
pub(crate) a: Scalar,
pub(crate) b: Scalar,
}
impl InnerProductProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\), \\(H'\\),
/// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\).
///
/// The `verifier` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
pub fn create<I>(
verifier: &mut ProofTranscript,
Q: &RistrettoPoint,
Hprime_factors: I,
mut G_vec: Vec<RistrettoPoint>,
mut H_vec: Vec<RistrettoPoint>,
mut a_vec: Vec<Scalar>,
mut b_vec: Vec<Scalar>,
) -> InnerProductProof
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
{
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec[..];
let mut H = &mut H_vec[..];
let mut a = &mut a_vec[..];
let mut b = &mut b_vec[..];
let mut n = G.len();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(H.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
// XXX save these scalar mults by unrolling them into the
// first iteration of the loop below
for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) {
*H_i = (&*H_i) * h_i.borrow();
}
let lg_n = n.next_power_of_two().trailing_zeros() as usize;
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
while n != 1 {
n = n / 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let (H_L, H_R) = H.split_at_mut(n);
let c_L = inner_product(&a_L, &b_R);
let c_R = inner_product(&a_R, &b_L);
let L = RistrettoPoint::vartime_multiscalar_mul(
a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)),
G_R.iter().chain(H_L.iter()).chain(iter::once(Q)),
);
let R = RistrettoPoint::vartime_multiscalar_mul(
a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)),
G_L.iter().chain(H_R.iter()).chain(iter::once(Q)),
);
L_vec.push(L);
R_vec.push(R);
verifier.commit(L.compress().as_bytes());
verifier.commit(R.compress().as_bytes());
let u = verifier.challenge_scalar();
let u_inv = u.invert();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
H_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]);
}
a = a_L;
b = b_L;
G = G_L;
H = H_L;
}
return InnerProductProof {
L_vec: L_vec,
R_vec: R_vec,
a: a[0],
b: b[0],
};
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
pub(crate) fn verification_scalars(
&self,
transcript: &mut ProofTranscript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let lg_n = self.L_vec.len();
let n = 1 << lg_n;
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
// XXX maybe avoid this compression when proof ser/de is sorted out
transcript.commit(L.compress().as_bytes());
transcript.commit(R.compress().as_bytes());
challenges.push(transcript.challenge_scalar());
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
// XXX missing square fn upstream
challenges[i] = challenges[i] * challenges[i];
challenges_inv[i] = challenges_inv[i] * challenges_inv[i];
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
(challenges_sq, challenges_inv_sq, s)
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
#[allow(dead_code)]
pub fn verify<I>(
&self,
transcript: &mut ProofTranscript,
Hprime_factors: I,
P: &RistrettoPoint,
Q: &RistrettoPoint,
G: &[RistrettoPoint],
H: &[RistrettoPoint],
) -> Result<(), ()>
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
{
let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript);
let a_times_s = s.iter().map(|s_i| self.a * s_i);
// 1/s[i] is s[!i], and !i runs from n-1 to 0 as i runs from 0 to n-1
let inv_s = s.iter().rev();
let h_times_b_div_s = Hprime_factors
.into_iter()
.zip(inv_s)
.map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow());
let neg_u_sq = u_sq.iter().map(|ui| -ui);
let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui);
let expect_P = RistrettoPoint::vartime_multiscalar_mul(
iter::once(self.a * self.b)
.chain(a_times_s)
.chain(h_times_b_div_s)
.chain(neg_u_sq)
.chain(neg_u_inv_sq),
iter::once(Q)
.chain(G.iter())
.chain(H.iter())
.chain(self.L_vec.iter())
.chain(self.R_vec.iter()),
);
if expect_P == *P {
Ok(())
} else |
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
let mut out = Scalar::zero();
if a.len() != b.len() {
panic!("inner_product(a,b): lengths of vectors do not match");
}
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use rand::OsRng;
use sha2::Sha512;
use util;
fn test_helper_create(n: usize) {
let mut rng = OsRng::new().unwrap();
use generators::{Generators, PedersenGenerators};
let gens = Generators::new(PedersenGenerators::default(), n, 1);
let G = gens.share(0).G.to_vec();
let H = gens.share(0).H.to_vec();
// Q would be determined upstream in the protocol, so we pick a random one.
let Q = RistrettoPoint::hash_from_bytes::<Sha512>(b"test point");
// a and b are the vectors for which we want to prove c = <a,b>
let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
let c = inner_product(&a, &b);
// y_inv is (the inverse of) a random challenge
let y_inv = Scalar::random(&mut rng);
// P would be determined upstream, but we need a correct P to check the proof.
//
// To generate P = <a,G> + <b,H'> + <a,b> Q, compute
// P = <a,G> + <b',H> + <a,b> Q,
// where b' = b \circ y^(-n)
let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi);
// a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime
let a_prime = a.iter().cloned();
let P = RistrettoPoint::vartime_multiscalar_mul(
a_prime.chain(b_prime).chain(iter::once(c)),
G.iter().chain(H.iter()).chain(iter::once(&Q)),
);
let mut verifier = ProofTranscript::new(b"innerproducttest");
let proof = InnerProductProof::create(
&mut verifier,
&Q,
util::exp_iter(y_inv),
G.clone(),
H.clone(),
a.clone(),
b.clone(),
);
let mut verifier = ProofTranscript::new(b"innerproducttest");
assert!(
proof
.verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H)
.is_ok()
);
}
#[test]
fn make_ipp_1() {
test_helper_create(1);
}
#[test]
fn make_ipp_2() {
test_helper_create(2);
}
#[test]
fn make_ipp_4() {
test_helper_create(4);
}
#[test]
fn make_ipp_32() {
test_helper_create(32);
}
#[test]
fn make_ipp_64() {
test_helper_create(64);
}
#[test]
fn test_inner_product() {
let a = vec![
Scalar::from_u64(1),
Scalar::from_u64(2),
Scalar::from_u64(3),
Scalar::from_u64(4),
];
let b = vec![
Scalar::from_u64(2),
Scalar::from_u64(3),
Scalar::from_u64(4),
Scalar::from_u64(5),
];
assert_eq!(Scalar::from_u64(40), inner_product(&a, &b));
}
}
| {
Err(())
} | conditional_block |
inner_product_proof.rs | #![allow(non_snake_case)]
#![doc(include = "../docs/inner-product-protocol.md")]
use std::borrow::Borrow;
use std::iter;
use curve25519_dalek::ristretto::RistrettoPoint;
use curve25519_dalek::scalar::Scalar;
use curve25519_dalek::traits::VartimeMultiscalarMul;
use proof_transcript::ProofTranscript;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct InnerProductProof {
pub(crate) L_vec: Vec<RistrettoPoint>,
pub(crate) R_vec: Vec<RistrettoPoint>,
pub(crate) a: Scalar,
pub(crate) b: Scalar,
}
impl InnerProductProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\), \\(H'\\),
/// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\).
///
/// The `verifier` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
pub fn create<I>(
verifier: &mut ProofTranscript,
Q: &RistrettoPoint,
Hprime_factors: I,
mut G_vec: Vec<RistrettoPoint>,
mut H_vec: Vec<RistrettoPoint>,
mut a_vec: Vec<Scalar>,
mut b_vec: Vec<Scalar>,
) -> InnerProductProof
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
{
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec[..];
let mut H = &mut H_vec[..];
let mut a = &mut a_vec[..];
let mut b = &mut b_vec[..];
let mut n = G.len();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(H.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
// XXX save these scalar mults by unrolling them into the
// first iteration of the loop below
for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) {
*H_i = (&*H_i) * h_i.borrow();
}
let lg_n = n.next_power_of_two().trailing_zeros() as usize;
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
while n != 1 {
n = n / 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let (H_L, H_R) = H.split_at_mut(n);
let c_L = inner_product(&a_L, &b_R);
let c_R = inner_product(&a_R, &b_L);
let L = RistrettoPoint::vartime_multiscalar_mul(
a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)),
G_R.iter().chain(H_L.iter()).chain(iter::once(Q)),
);
let R = RistrettoPoint::vartime_multiscalar_mul(
a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)),
G_L.iter().chain(H_R.iter()).chain(iter::once(Q)),
);
L_vec.push(L);
R_vec.push(R);
verifier.commit(L.compress().as_bytes());
verifier.commit(R.compress().as_bytes());
let u = verifier.challenge_scalar();
let u_inv = u.invert();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
H_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]);
}
a = a_L;
b = b_L;
G = G_L;
H = H_L;
}
return InnerProductProof {
L_vec: L_vec,
R_vec: R_vec,
a: a[0],
b: b[0],
};
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
pub(crate) fn | (
&self,
transcript: &mut ProofTranscript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let lg_n = self.L_vec.len();
let n = 1 << lg_n;
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
// XXX maybe avoid this compression when proof ser/de is sorted out
transcript.commit(L.compress().as_bytes());
transcript.commit(R.compress().as_bytes());
challenges.push(transcript.challenge_scalar());
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
// XXX missing square fn upstream
challenges[i] = challenges[i] * challenges[i];
challenges_inv[i] = challenges_inv[i] * challenges_inv[i];
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
(challenges_sq, challenges_inv_sq, s)
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
#[allow(dead_code)]
pub fn verify<I>(
&self,
transcript: &mut ProofTranscript,
Hprime_factors: I,
P: &RistrettoPoint,
Q: &RistrettoPoint,
G: &[RistrettoPoint],
H: &[RistrettoPoint],
) -> Result<(), ()>
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
{
let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript);
let a_times_s = s.iter().map(|s_i| self.a * s_i);
// 1/s[i] is s[!i], and !i runs from n-1 to 0 as i runs from 0 to n-1
let inv_s = s.iter().rev();
let h_times_b_div_s = Hprime_factors
.into_iter()
.zip(inv_s)
.map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow());
let neg_u_sq = u_sq.iter().map(|ui| -ui);
let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui);
let expect_P = RistrettoPoint::vartime_multiscalar_mul(
iter::once(self.a * self.b)
.chain(a_times_s)
.chain(h_times_b_div_s)
.chain(neg_u_sq)
.chain(neg_u_inv_sq),
iter::once(Q)
.chain(G.iter())
.chain(H.iter())
.chain(self.L_vec.iter())
.chain(self.R_vec.iter()),
);
if expect_P == *P {
Ok(())
} else {
Err(())
}
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
let mut out = Scalar::zero();
if a.len() != b.len() {
panic!("inner_product(a,b): lengths of vectors do not match");
}
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use rand::OsRng;
use sha2::Sha512;
use util;
fn test_helper_create(n: usize) {
let mut rng = OsRng::new().unwrap();
use generators::{Generators, PedersenGenerators};
let gens = Generators::new(PedersenGenerators::default(), n, 1);
let G = gens.share(0).G.to_vec();
let H = gens.share(0).H.to_vec();
// Q would be determined upstream in the protocol, so we pick a random one.
let Q = RistrettoPoint::hash_from_bytes::<Sha512>(b"test point");
// a and b are the vectors for which we want to prove c = <a,b>
let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
let c = inner_product(&a, &b);
// y_inv is (the inverse of) a random challenge
let y_inv = Scalar::random(&mut rng);
// P would be determined upstream, but we need a correct P to check the proof.
//
// To generate P = <a,G> + <b,H'> + <a,b> Q, compute
// P = <a,G> + <b',H> + <a,b> Q,
// where b' = b \circ y^(-n)
let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi);
// a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime
let a_prime = a.iter().cloned();
let P = RistrettoPoint::vartime_multiscalar_mul(
a_prime.chain(b_prime).chain(iter::once(c)),
G.iter().chain(H.iter()).chain(iter::once(&Q)),
);
let mut verifier = ProofTranscript::new(b"innerproducttest");
let proof = InnerProductProof::create(
&mut verifier,
&Q,
util::exp_iter(y_inv),
G.clone(),
H.clone(),
a.clone(),
b.clone(),
);
let mut verifier = ProofTranscript::new(b"innerproducttest");
assert!(
proof
.verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H)
.is_ok()
);
}
#[test]
fn make_ipp_1() {
test_helper_create(1);
}
#[test]
fn make_ipp_2() {
test_helper_create(2);
}
#[test]
fn make_ipp_4() {
test_helper_create(4);
}
#[test]
fn make_ipp_32() {
test_helper_create(32);
}
#[test]
fn make_ipp_64() {
test_helper_create(64);
}
#[test]
fn test_inner_product() {
let a = vec![
Scalar::from_u64(1),
Scalar::from_u64(2),
Scalar::from_u64(3),
Scalar::from_u64(4),
];
let b = vec![
Scalar::from_u64(2),
Scalar::from_u64(3),
Scalar::from_u64(4),
Scalar::from_u64(5),
];
assert_eq!(Scalar::from_u64(40), inner_product(&a, &b));
}
}
| verification_scalars | identifier_name |
inner_product_proof.rs | #![allow(non_snake_case)]
#![doc(include = "../docs/inner-product-protocol.md")]
use std::borrow::Borrow;
use std::iter;
use curve25519_dalek::ristretto::RistrettoPoint;
use curve25519_dalek::scalar::Scalar;
use curve25519_dalek::traits::VartimeMultiscalarMul;
use proof_transcript::ProofTranscript;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct InnerProductProof {
pub(crate) L_vec: Vec<RistrettoPoint>,
pub(crate) R_vec: Vec<RistrettoPoint>,
pub(crate) a: Scalar,
pub(crate) b: Scalar,
}
impl InnerProductProof {
/// Create an inner-product proof.
///
/// The proof is created with respect to the bases \\(G\\), \\(H'\\),
/// where \\(H'\_i = H\_i \cdot \texttt{Hprime\\_factors}\_i\\).
///
/// The `verifier` is passed in as a parameter so that the
/// challenges depend on the *entire* transcript (including parent
/// protocols).
pub fn create<I>(
verifier: &mut ProofTranscript,
Q: &RistrettoPoint,
Hprime_factors: I,
mut G_vec: Vec<RistrettoPoint>,
mut H_vec: Vec<RistrettoPoint>,
mut a_vec: Vec<Scalar>,
mut b_vec: Vec<Scalar>,
) -> InnerProductProof
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
{
// Create slices G, H, a, b backed by their respective
// vectors. This lets us reslice as we compress the lengths
// of the vectors in the main loop below.
let mut G = &mut G_vec[..];
let mut H = &mut H_vec[..];
let mut a = &mut a_vec[..];
let mut b = &mut b_vec[..];
let mut n = G.len();
// All of the input vectors must have the same length.
assert_eq!(G.len(), n);
assert_eq!(H.len(), n);
assert_eq!(a.len(), n);
assert_eq!(b.len(), n);
// XXX save these scalar mults by unrolling them into the
// first iteration of the loop below
for (H_i, h_i) in H.iter_mut().zip(Hprime_factors.into_iter()) {
*H_i = (&*H_i) * h_i.borrow();
}
let lg_n = n.next_power_of_two().trailing_zeros() as usize;
let mut L_vec = Vec::with_capacity(lg_n);
let mut R_vec = Vec::with_capacity(lg_n);
while n != 1 {
n = n / 2;
let (a_L, a_R) = a.split_at_mut(n);
let (b_L, b_R) = b.split_at_mut(n);
let (G_L, G_R) = G.split_at_mut(n);
let (H_L, H_R) = H.split_at_mut(n);
let c_L = inner_product(&a_L, &b_R);
let c_R = inner_product(&a_R, &b_L);
let L = RistrettoPoint::vartime_multiscalar_mul(
a_L.iter().chain(b_R.iter()).chain(iter::once(&c_L)),
G_R.iter().chain(H_L.iter()).chain(iter::once(Q)),
);
let R = RistrettoPoint::vartime_multiscalar_mul(
a_R.iter().chain(b_L.iter()).chain(iter::once(&c_R)),
G_L.iter().chain(H_R.iter()).chain(iter::once(Q)),
);
L_vec.push(L);
R_vec.push(R);
verifier.commit(L.compress().as_bytes());
verifier.commit(R.compress().as_bytes());
let u = verifier.challenge_scalar();
let u_inv = u.invert();
for i in 0..n {
a_L[i] = a_L[i] * u + u_inv * a_R[i];
b_L[i] = b_L[i] * u_inv + u * b_R[i];
G_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u_inv, u], &[G_L[i], G_R[i]]);
H_L[i] = RistrettoPoint::vartime_multiscalar_mul(&[u, u_inv], &[H_L[i], H_R[i]]);
}
a = a_L;
b = b_L;
G = G_L;
H = H_L;
}
return InnerProductProof {
L_vec: L_vec,
R_vec: R_vec,
a: a[0],
b: b[0],
};
}
/// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication
/// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details.
pub(crate) fn verification_scalars(
&self,
transcript: &mut ProofTranscript,
) -> (Vec<Scalar>, Vec<Scalar>, Vec<Scalar>) {
let lg_n = self.L_vec.len();
let n = 1 << lg_n;
// 1. Recompute x_k,...,x_1 based on the proof transcript
let mut challenges = Vec::with_capacity(lg_n);
for (L, R) in self.L_vec.iter().zip(self.R_vec.iter()) {
// XXX maybe avoid this compression when proof ser/de is sorted out
transcript.commit(L.compress().as_bytes());
transcript.commit(R.compress().as_bytes());
challenges.push(transcript.challenge_scalar());
}
// 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1
let mut challenges_inv = challenges.clone();
let allinv = Scalar::batch_invert(&mut challenges_inv);
// 3. Compute u_i^2 and (1/u_i)^2
for i in 0..lg_n {
// XXX missing square fn upstream
challenges[i] = challenges[i] * challenges[i];
challenges_inv[i] = challenges_inv[i] * challenges_inv[i];
}
let challenges_sq = challenges;
let challenges_inv_sq = challenges_inv;
// 4. Compute s values inductively.
let mut s = Vec::with_capacity(n);
s.push(allinv);
for i in 1..n {
let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize;
let k = 1 << lg_i;
// The challenges are stored in "creation order" as [u_k,...,u_1],
// so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i
let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i];
s.push(s[i - k] * u_lg_i_sq);
}
(challenges_sq, challenges_inv_sq, s)
}
/// This method is for testing that proof generation work,
/// but for efficiency the actual protocols would use `verification_scalars`
/// method to combine inner product verification with other checks
/// in a single multiscalar multiplication.
#[allow(dead_code)]
pub fn verify<I>(
&self,
transcript: &mut ProofTranscript,
Hprime_factors: I,
P: &RistrettoPoint,
Q: &RistrettoPoint,
G: &[RistrettoPoint],
H: &[RistrettoPoint],
) -> Result<(), ()>
where
I: IntoIterator,
I::Item: Borrow<Scalar>,
{
let (u_sq, u_inv_sq, s) = self.verification_scalars(transcript);
let a_times_s = s.iter().map(|s_i| self.a * s_i);
// 1/s[i] is s[!i], and !i runs from n-1 to 0 as i runs from 0 to n-1
let inv_s = s.iter().rev();
let h_times_b_div_s = Hprime_factors
.into_iter()
.zip(inv_s)
.map(|(h_i, s_i_inv)| (self.b * s_i_inv) * h_i.borrow());
let neg_u_sq = u_sq.iter().map(|ui| -ui);
let neg_u_inv_sq = u_inv_sq.iter().map(|ui| -ui);
let expect_P = RistrettoPoint::vartime_multiscalar_mul(
iter::once(self.a * self.b)
.chain(a_times_s)
.chain(h_times_b_div_s)
.chain(neg_u_sq)
.chain(neg_u_inv_sq),
iter::once(Q)
.chain(G.iter())
.chain(H.iter())
.chain(self.L_vec.iter())
.chain(self.R_vec.iter()),
);
if expect_P == *P {
Ok(())
} else {
Err(())
}
}
}
/// Computes an inner product of two vectors
/// \\[
/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i.
/// \\]
/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal.
pub fn inner_product(a: &[Scalar], b: &[Scalar]) -> Scalar {
let mut out = Scalar::zero();
if a.len() != b.len() {
panic!("inner_product(a,b): lengths of vectors do not match");
}
for i in 0..a.len() {
out += a[i] * b[i];
}
out
}
#[cfg(test)]
mod tests {
use super::*;
use rand::OsRng;
use sha2::Sha512;
use util;
fn test_helper_create(n: usize) {
let mut rng = OsRng::new().unwrap();
use generators::{Generators, PedersenGenerators};
let gens = Generators::new(PedersenGenerators::default(), n, 1);
let G = gens.share(0).G.to_vec();
let H = gens.share(0).H.to_vec();
// Q would be determined upstream in the protocol, so we pick a random one.
let Q = RistrettoPoint::hash_from_bytes::<Sha512>(b"test point");
// a and b are the vectors for which we want to prove c = <a,b>
let a: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
let b: Vec<_> = (0..n).map(|_| Scalar::random(&mut rng)).collect();
let c = inner_product(&a, &b);
// y_inv is (the inverse of) a random challenge
let y_inv = Scalar::random(&mut rng); | // where b' = b \circ y^(-n)
let b_prime = b.iter().zip(util::exp_iter(y_inv)).map(|(bi, yi)| bi * yi);
// a.iter() has Item=&Scalar, need Item=Scalar to chain with b_prime
let a_prime = a.iter().cloned();
let P = RistrettoPoint::vartime_multiscalar_mul(
a_prime.chain(b_prime).chain(iter::once(c)),
G.iter().chain(H.iter()).chain(iter::once(&Q)),
);
let mut verifier = ProofTranscript::new(b"innerproducttest");
let proof = InnerProductProof::create(
&mut verifier,
&Q,
util::exp_iter(y_inv),
G.clone(),
H.clone(),
a.clone(),
b.clone(),
);
let mut verifier = ProofTranscript::new(b"innerproducttest");
assert!(
proof
.verify(&mut verifier, util::exp_iter(y_inv), &P, &Q, &G, &H)
.is_ok()
);
}
#[test]
fn make_ipp_1() {
test_helper_create(1);
}
#[test]
fn make_ipp_2() {
test_helper_create(2);
}
#[test]
fn make_ipp_4() {
test_helper_create(4);
}
#[test]
fn make_ipp_32() {
test_helper_create(32);
}
#[test]
fn make_ipp_64() {
test_helper_create(64);
}
#[test]
fn test_inner_product() {
let a = vec![
Scalar::from_u64(1),
Scalar::from_u64(2),
Scalar::from_u64(3),
Scalar::from_u64(4),
];
let b = vec![
Scalar::from_u64(2),
Scalar::from_u64(3),
Scalar::from_u64(4),
Scalar::from_u64(5),
];
assert_eq!(Scalar::from_u64(40), inner_product(&a, &b));
}
} |
// P would be determined upstream, but we need a correct P to check the proof.
//
// To generate P = <a,G> + <b,H'> + <a,b> Q, compute
// P = <a,G> + <b',H> + <a,b> Q, | random_line_split |
spotutils.py | import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts) | c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2) | c = numpy.zeros((npts))
for i in range(npts): | random_line_split |
spotutils.py | import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
|
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
| for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('') | identifier_body |
spotutils.py | import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
|
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
| if addInfo.FastMode: c = dwl[i] | conditional_block |
spotutils.py | import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def | (self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
| Tlambda | identifier_name |
pim_rpf_hash_bag.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pim_rpf_hash_bag.proto
package cisco_ios_xr_ipv4_pim_oper_pim_standby_vrfs_vrf_safs_saf_rpf_hash_sources_rpf_hash_source
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type PimRpfHashBag_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
SafName string `protobuf:"bytes,2,opt,name=saf_name,json=safName,proto3" json:"saf_name,omitempty"`
TopologyName string `protobuf:"bytes,3,opt,name=topology_name,json=topologyName,proto3" json:"topology_name,omitempty"`
SourceAddress string `protobuf:"bytes,4,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
Mofrr uint32 `protobuf:"varint,5,opt,name=mofrr,proto3" json:"mofrr,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag_KEYS) Reset() { *m = PimRpfHashBag_KEYS{} }
func (m *PimRpfHashBag_KEYS) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag_KEYS) ProtoMessage() {}
func (*PimRpfHashBag_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{0}
}
func (m *PimRpfHashBag_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag_KEYS.Unmarshal(m, b)
}
func (m *PimRpfHashBag_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag_KEYS.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag_KEYS.Merge(m, src)
}
func (m *PimRpfHashBag_KEYS) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag_KEYS.Size(m)
}
func (m *PimRpfHashBag_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag_KEYS proto.InternalMessageInfo
func (m *PimRpfHashBag_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSafName() string {
if m != nil {
return m.SafName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetTopologyName() string {
if m != nil |
return ""
}
func (m *PimRpfHashBag_KEYS) GetSourceAddress() string {
if m != nil {
return m.SourceAddress
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetMofrr() uint32 {
if m != nil {
return m.Mofrr
}
return 0
}
type PimAddrtype struct {
AfName string `protobuf:"bytes,1,opt,name=af_name,json=afName,proto3" json:"af_name,omitempty"`
Ipv4Address string `protobuf:"bytes,2,opt,name=ipv4_address,json=ipv4Address,proto3" json:"ipv4_address,omitempty"`
Ipv6Address string `protobuf:"bytes,3,opt,name=ipv6_address,json=ipv6Address,proto3" json:"ipv6_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimAddrtype) Reset() { *m = PimAddrtype{} }
func (m *PimAddrtype) String() string { return proto.CompactTextString(m) }
func (*PimAddrtype) ProtoMessage() {}
func (*PimAddrtype) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{1}
}
func (m *PimAddrtype) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimAddrtype.Unmarshal(m, b)
}
func (m *PimAddrtype) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimAddrtype.Marshal(b, m, deterministic)
}
func (m *PimAddrtype) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimAddrtype.Merge(m, src)
}
func (m *PimAddrtype) XXX_Size() int {
return xxx_messageInfo_PimAddrtype.Size(m)
}
func (m *PimAddrtype) XXX_DiscardUnknown() {
xxx_messageInfo_PimAddrtype.DiscardUnknown(m)
}
var xxx_messageInfo_PimAddrtype proto.InternalMessageInfo
func (m *PimAddrtype) GetAfName() string {
if m != nil {
return m.AfName
}
return ""
}
func (m *PimAddrtype) GetIpv4Address() string {
if m != nil {
return m.Ipv4Address
}
return ""
}
func (m *PimAddrtype) GetIpv6Address() string {
if m != nil {
return m.Ipv6Address
}
return ""
}
type PimRpfHashBag struct {
NextHopMultipathEnabled bool `protobuf:"varint,50,opt,name=next_hop_multipath_enabled,json=nextHopMultipathEnabled,proto3" json:"next_hop_multipath_enabled,omitempty"`
NextHopAddress *PimAddrtype `protobuf:"bytes,51,opt,name=next_hop_address,json=nextHopAddress,proto3" json:"next_hop_address,omitempty"`
NextHopInterface string `protobuf:"bytes,52,opt,name=next_hop_interface,json=nextHopInterface,proto3" json:"next_hop_interface,omitempty"`
SecondaryNextHopAddress *PimAddrtype `protobuf:"bytes,53,opt,name=secondary_next_hop_address,json=secondaryNextHopAddress,proto3" json:"secondary_next_hop_address,omitempty"`
SecondaryNextHopInterface string `protobuf:"bytes,54,opt,name=secondary_next_hop_interface,json=secondaryNextHopInterface,proto3" json:"secondary_next_hop_interface,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag) Reset() { *m = PimRpfHashBag{} }
func (m *PimRpfHashBag) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag) ProtoMessage() {}
func (*PimRpfHashBag) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{2}
}
func (m *PimRpfHashBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag.Unmarshal(m, b)
}
func (m *PimRpfHashBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag.Merge(m, src)
}
func (m *PimRpfHashBag) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag.Size(m)
}
func (m *PimRpfHashBag) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag proto.InternalMessageInfo
func (m *PimRpfHashBag) GetNextHopMultipathEnabled() bool {
if m != nil {
return m.NextHopMultipathEnabled
}
return false
}
func (m *PimRpfHashBag) GetNextHopAddress() *PimAddrtype {
if m != nil {
return m.NextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetNextHopInterface() string {
if m != nil {
return m.NextHopInterface
}
return ""
}
func (m *PimRpfHashBag) GetSecondaryNextHopAddress() *PimAddrtype {
if m != nil {
return m.SecondaryNextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetSecondaryNextHopInterface() string {
if m != nil {
return m.SecondaryNextHopInterface
}
return ""
}
func init() {
proto.RegisterType((*PimRpfHashBag_KEYS)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag_KEYS")
proto.RegisterType((*PimAddrtype)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_addrtype")
proto.RegisterType((*PimRpfHashBag)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag")
}
func init() { proto.RegisterFile("pim_rpf_hash_bag.proto", fileDescriptor_9bbebdbd6b24e885) }
var fileDescriptor_9bbebdbd6b24e885 = []byte{
// 407 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xbd, 0xce, 0xd3, 0x30,
0x00, 0x94, 0x29, 0xfd, 0xc1, 0xfd, 0x51, 0x65, 0x01, 0x4d, 0x2b, 0x86, 0x50, 0x84, 0x94, 0x01,
0x79, 0x68, 0x4b, 0x17, 0x06, 0xc4, 0x50, 0x09, 0x84, 0xe8, 0x10, 0xa6, 0x4e, 0x96, 0x93, 0x38,
0x4d, 0xa4, 0x26, 0xb6, 0x6c, 0x37, 0x6a, 0x1e, 0x83, 0x77, 0x60, 0xe7, 0xf9, 0xd8, 0x90, 0xe3,
0x24, 0xd0, 0xc0, 0xca, 0xb7, 0x58, 0xca, 0xdd, 0x39, 0xf7, 0x13, 0x05, 0x3e, 0x17, 0x69, 0x46,
0xa4, 0x88, 0x49, 0x42, 0x55, 0x42, 0x02, 0x7a, 0xc6, 0x42, 0x72, 0xcd, 0xd1, 0x29, 0x4c, 0x55,
0xc8, 0x49, 0xca, 0x15, 0xb9, 0x49, 0x92, 0x8a, 0x62, 0x47, 0x8c, 0x92, 0x0b, 0x26, 0xb1, 0x48,
0x33, 0xac, 0x34, 0xcd, 0xa3, 0xa0, 0xc4, 0x85, 0x8c, 0x95, 0x39, 0xb0, 0xa2, 0xb1, 0x32, 0x07,
0x6e, 0x5f, 0xa6, 0xf8, 0x55, 0x86, 0x4c, 0x75, 0x81, 0xf5, 0x0f, 0x00, 0x9f, 0x75, 0x5d, 0xc9,
0xe7, 0xc3, 0xe9, 0x2b, 0x5a, 0xc2, 0x51, 0x21, 0x63, 0x92, 0xd3, 0x8c, 0x39, 0xc0, 0x05, 0xde,
0x13, 0x7f, 0x58, 0xc8, 0xf8, 0x48, 0x33, 0x66, 0x28, 0x45, 0x6b, 0xea, 0x91, 0xa5, 0x14, 0xb5,
0xd4, 0x2b, 0x38, 0xd5, 0x5c, 0xf0, 0x0b, 0x3f, 0x97, 0x96, 0xef, 0x55, 0xfc, 0xa4, 0x01, 0x2b,
0xd1, 0x6b, 0x38, 0xb3, 0xf6, 0x84, 0x46, 0x91, 0x64, 0x4a, 0x39, 0x8f, 0x2b, 0xd5, 0xd4, 0xa2,
0x1f, 0x2c, 0x88, 0x9e, 0xc2, 0x7e, 0xc6, 0x63, 0x29, 0x9d, 0xbe, 0x0b, 0xbc, 0xa9, 0x6f, 0x1f,
0xd6, 0x19, 0x9c, 0x98, 0xc0, 0xe6, 0xa6, 0x2e, 0x05, 0x43, 0x0b, 0x38, 0xa4, 0x77, 0x31, 0x07,
0x75, 0x94, 0x97, 0x70, 0x52, 0x4d, 0xd5, 0x78, 0xd8, 0xa4, 0x63, 0x83, 0x35, 0x0e, 0x56, 0xb2,
0x6f, 0x25, 0xbd, 0x56, 0xb2, 0xaf, 0x25, 0xeb, 0x9f, 0x3d, 0x38, 0xef, 0x0e, 0x84, 0xde, 0xc1,
0x55, 0xce, 0x6e, 0x9a, 0x24, 0x5c, 0x90, 0xec, 0x7a, 0xd1, 0xa9, 0xa0, 0x3a, 0x21, 0x2c, 0xa7,
0xc1, 0x85, 0x45, 0xce, 0xc6, 0x05, 0xde, 0xc8, 0x5f, 0x18, 0xc5, 0x47, 0x2e, 0xbe, 0x34, 0xfc,
0xc1, 0xd2, 0xe8, 0x1b, 0x80, 0xf3, 0xf6, 0x76, 0xe3, 0xbc, 0x75, 0x81, 0x37, 0xde, 0x9c, 0xf1,
0x7f, 0xfb, 0xd2, 0xf8, 0xcf, 0xd1, 0xfc, 0x59, 0x1d, 0xae, 0x19, 0xe2, 0x0d, 0x44, 0x6d, 0xa4,
0x34, 0xd7, 0x4c, 0xc6, 0x34, 0x64, 0xce, 0xae, 0x9a, 0x63, 0x5e, 0x6b, 0x3f, 0x35, 0x38, 0xfa,
0x0e, 0xe0, 0x4a, 0xb1, 0x90, 0xe7, 0x11, 0x95, 0x25, 0xf9, 0xab, 0xcb, 0xdb, 0x87, 0xed, 0xb2,
0x68, 0xa3, 0x1c, 0xef, 0x4b, 0xbd, 0x87, 0x2f, 0xfe, 0x91, 0xf2, 0x77, 0xbd, 0x7d, 0x55, 0x6f,
0xd9, 0xbd, 0xde, 0xf6, 0x0c, 0x06, 0xd5, 0xef, 0xb7, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0x30,
0x45, 0xe8, 0xb6, 0x98, 0x03, 0x00, 0x00,
}
| {
return m.TopologyName
} | conditional_block |
pim_rpf_hash_bag.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pim_rpf_hash_bag.proto
package cisco_ios_xr_ipv4_pim_oper_pim_standby_vrfs_vrf_safs_saf_rpf_hash_sources_rpf_hash_source
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type PimRpfHashBag_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
SafName string `protobuf:"bytes,2,opt,name=saf_name,json=safName,proto3" json:"saf_name,omitempty"`
TopologyName string `protobuf:"bytes,3,opt,name=topology_name,json=topologyName,proto3" json:"topology_name,omitempty"`
SourceAddress string `protobuf:"bytes,4,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
Mofrr uint32 `protobuf:"varint,5,opt,name=mofrr,proto3" json:"mofrr,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag_KEYS) Reset() { *m = PimRpfHashBag_KEYS{} }
func (m *PimRpfHashBag_KEYS) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag_KEYS) ProtoMessage() {}
func (*PimRpfHashBag_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{0}
}
func (m *PimRpfHashBag_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag_KEYS.Unmarshal(m, b)
}
func (m *PimRpfHashBag_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag_KEYS.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag_KEYS.Merge(m, src)
}
func (m *PimRpfHashBag_KEYS) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag_KEYS.Size(m)
}
func (m *PimRpfHashBag_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag_KEYS proto.InternalMessageInfo
func (m *PimRpfHashBag_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSafName() string {
if m != nil {
return m.SafName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetTopologyName() string {
if m != nil {
return m.TopologyName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSourceAddress() string {
if m != nil {
return m.SourceAddress
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetMofrr() uint32 {
if m != nil {
return m.Mofrr
}
return 0
}
type PimAddrtype struct {
AfName string `protobuf:"bytes,1,opt,name=af_name,json=afName,proto3" json:"af_name,omitempty"`
Ipv4Address string `protobuf:"bytes,2,opt,name=ipv4_address,json=ipv4Address,proto3" json:"ipv4_address,omitempty"`
Ipv6Address string `protobuf:"bytes,3,opt,name=ipv6_address,json=ipv6Address,proto3" json:"ipv6_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimAddrtype) Reset() { *m = PimAddrtype{} }
func (m *PimAddrtype) String() string { return proto.CompactTextString(m) }
func (*PimAddrtype) ProtoMessage() {}
func (*PimAddrtype) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{1}
}
func (m *PimAddrtype) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimAddrtype.Unmarshal(m, b)
}
func (m *PimAddrtype) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimAddrtype.Marshal(b, m, deterministic)
}
func (m *PimAddrtype) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimAddrtype.Merge(m, src)
}
func (m *PimAddrtype) XXX_Size() int {
return xxx_messageInfo_PimAddrtype.Size(m)
}
func (m *PimAddrtype) XXX_DiscardUnknown() {
xxx_messageInfo_PimAddrtype.DiscardUnknown(m)
}
var xxx_messageInfo_PimAddrtype proto.InternalMessageInfo
func (m *PimAddrtype) GetAfName() string {
if m != nil {
return m.AfName
}
return ""
}
func (m *PimAddrtype) GetIpv4Address() string |
func (m *PimAddrtype) GetIpv6Address() string {
if m != nil {
return m.Ipv6Address
}
return ""
}
type PimRpfHashBag struct {
NextHopMultipathEnabled bool `protobuf:"varint,50,opt,name=next_hop_multipath_enabled,json=nextHopMultipathEnabled,proto3" json:"next_hop_multipath_enabled,omitempty"`
NextHopAddress *PimAddrtype `protobuf:"bytes,51,opt,name=next_hop_address,json=nextHopAddress,proto3" json:"next_hop_address,omitempty"`
NextHopInterface string `protobuf:"bytes,52,opt,name=next_hop_interface,json=nextHopInterface,proto3" json:"next_hop_interface,omitempty"`
SecondaryNextHopAddress *PimAddrtype `protobuf:"bytes,53,opt,name=secondary_next_hop_address,json=secondaryNextHopAddress,proto3" json:"secondary_next_hop_address,omitempty"`
SecondaryNextHopInterface string `protobuf:"bytes,54,opt,name=secondary_next_hop_interface,json=secondaryNextHopInterface,proto3" json:"secondary_next_hop_interface,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag) Reset() { *m = PimRpfHashBag{} }
func (m *PimRpfHashBag) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag) ProtoMessage() {}
func (*PimRpfHashBag) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{2}
}
func (m *PimRpfHashBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag.Unmarshal(m, b)
}
func (m *PimRpfHashBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag.Merge(m, src)
}
func (m *PimRpfHashBag) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag.Size(m)
}
func (m *PimRpfHashBag) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag proto.InternalMessageInfo
func (m *PimRpfHashBag) GetNextHopMultipathEnabled() bool {
if m != nil {
return m.NextHopMultipathEnabled
}
return false
}
func (m *PimRpfHashBag) GetNextHopAddress() *PimAddrtype {
if m != nil {
return m.NextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetNextHopInterface() string {
if m != nil {
return m.NextHopInterface
}
return ""
}
func (m *PimRpfHashBag) GetSecondaryNextHopAddress() *PimAddrtype {
if m != nil {
return m.SecondaryNextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetSecondaryNextHopInterface() string {
if m != nil {
return m.SecondaryNextHopInterface
}
return ""
}
func init() {
proto.RegisterType((*PimRpfHashBag_KEYS)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag_KEYS")
proto.RegisterType((*PimAddrtype)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_addrtype")
proto.RegisterType((*PimRpfHashBag)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag")
}
func init() { proto.RegisterFile("pim_rpf_hash_bag.proto", fileDescriptor_9bbebdbd6b24e885) }
var fileDescriptor_9bbebdbd6b24e885 = []byte{
// 407 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xbd, 0xce, 0xd3, 0x30,
0x00, 0x94, 0x29, 0xfd, 0xc1, 0xfd, 0x51, 0x65, 0x01, 0x4d, 0x2b, 0x86, 0x50, 0x84, 0x94, 0x01,
0x79, 0x68, 0x4b, 0x17, 0x06, 0xc4, 0x50, 0x09, 0x84, 0xe8, 0x10, 0xa6, 0x4e, 0x96, 0x93, 0x38,
0x4d, 0xa4, 0x26, 0xb6, 0x6c, 0x37, 0x6a, 0x1e, 0x83, 0x77, 0x60, 0xe7, 0xf9, 0xd8, 0x90, 0xe3,
0x24, 0xd0, 0xc0, 0xca, 0xb7, 0x58, 0xca, 0xdd, 0x39, 0xf7, 0x13, 0x05, 0x3e, 0x17, 0x69, 0x46,
0xa4, 0x88, 0x49, 0x42, 0x55, 0x42, 0x02, 0x7a, 0xc6, 0x42, 0x72, 0xcd, 0xd1, 0x29, 0x4c, 0x55,
0xc8, 0x49, 0xca, 0x15, 0xb9, 0x49, 0x92, 0x8a, 0x62, 0x47, 0x8c, 0x92, 0x0b, 0x26, 0xb1, 0x48,
0x33, 0xac, 0x34, 0xcd, 0xa3, 0xa0, 0xc4, 0x85, 0x8c, 0x95, 0x39, 0xb0, 0xa2, 0xb1, 0x32, 0x07,
0x6e, 0x5f, 0xa6, 0xf8, 0x55, 0x86, 0x4c, 0x75, 0x81, 0xf5, 0x0f, 0x00, 0x9f, 0x75, 0x5d, 0xc9,
0xe7, 0xc3, 0xe9, 0x2b, 0x5a, 0xc2, 0x51, 0x21, 0x63, 0x92, 0xd3, 0x8c, 0x39, 0xc0, 0x05, 0xde,
0x13, 0x7f, 0x58, 0xc8, 0xf8, 0x48, 0x33, 0x66, 0x28, 0x45, 0x6b, 0xea, 0x91, 0xa5, 0x14, 0xb5,
0xd4, 0x2b, 0x38, 0xd5, 0x5c, 0xf0, 0x0b, 0x3f, 0x97, 0x96, 0xef, 0x55, 0xfc, 0xa4, 0x01, 0x2b,
0xd1, 0x6b, 0x38, 0xb3, 0xf6, 0x84, 0x46, 0x91, 0x64, 0x4a, 0x39, 0x8f, 0x2b, 0xd5, 0xd4, 0xa2,
0x1f, 0x2c, 0x88, 0x9e, 0xc2, 0x7e, 0xc6, 0x63, 0x29, 0x9d, 0xbe, 0x0b, 0xbc, 0xa9, 0x6f, 0x1f,
0xd6, 0x19, 0x9c, 0x98, 0xc0, 0xe6, 0xa6, 0x2e, 0x05, 0x43, 0x0b, 0x38, 0xa4, 0x77, 0x31, 0x07,
0x75, 0x94, 0x97, 0x70, 0x52, 0x4d, 0xd5, 0x78, 0xd8, 0xa4, 0x63, 0x83, 0x35, 0x0e, 0x56, 0xb2,
0x6f, 0x25, 0xbd, 0x56, 0xb2, 0xaf, 0x25, 0xeb, 0x9f, 0x3d, 0x38, 0xef, 0x0e, 0x84, 0xde, 0xc1,
0x55, 0xce, 0x6e, 0x9a, 0x24, 0x5c, 0x90, 0xec, 0x7a, 0xd1, 0xa9, 0xa0, 0x3a, 0x21, 0x2c, 0xa7,
0xc1, 0x85, 0x45, 0xce, 0xc6, 0x05, 0xde, 0xc8, 0x5f, 0x18, 0xc5, 0x47, 0x2e, 0xbe, 0x34, 0xfc,
0xc1, 0xd2, 0xe8, 0x1b, 0x80, 0xf3, 0xf6, 0x76, 0xe3, 0xbc, 0x75, 0x81, 0x37, 0xde, 0x9c, 0xf1,
0x7f, 0xfb, 0xd2, 0xf8, 0xcf, 0xd1, 0xfc, 0x59, 0x1d, 0xae, 0x19, 0xe2, 0x0d, 0x44, 0x6d, 0xa4,
0x34, 0xd7, 0x4c, 0xc6, 0x34, 0x64, 0xce, 0xae, 0x9a, 0x63, 0x5e, 0x6b, 0x3f, 0x35, 0x38, 0xfa,
0x0e, 0xe0, 0x4a, 0xb1, 0x90, 0xe7, 0x11, 0x95, 0x25, 0xf9, 0xab, 0xcb, 0xdb, 0x87, 0xed, 0xb2,
0x68, 0xa3, 0x1c, 0xef, 0x4b, 0xbd, 0x87, 0x2f, 0xfe, 0x91, 0xf2, 0x77, 0xbd, 0x7d, 0x55, 0x6f,
0xd9, 0xbd, 0xde, 0xf6, 0x0c, 0x06, 0xd5, 0xef, 0xb7, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0x30,
0x45, 0xe8, 0xb6, 0x98, 0x03, 0x00, 0x00,
}
| {
if m != nil {
return m.Ipv4Address
}
return ""
} | identifier_body |
pim_rpf_hash_bag.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pim_rpf_hash_bag.proto
package cisco_ios_xr_ipv4_pim_oper_pim_standby_vrfs_vrf_safs_saf_rpf_hash_sources_rpf_hash_source
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type PimRpfHashBag_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
SafName string `protobuf:"bytes,2,opt,name=saf_name,json=safName,proto3" json:"saf_name,omitempty"`
TopologyName string `protobuf:"bytes,3,opt,name=topology_name,json=topologyName,proto3" json:"topology_name,omitempty"`
SourceAddress string `protobuf:"bytes,4,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
Mofrr uint32 `protobuf:"varint,5,opt,name=mofrr,proto3" json:"mofrr,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag_KEYS) Reset() { *m = PimRpfHashBag_KEYS{} }
func (m *PimRpfHashBag_KEYS) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag_KEYS) ProtoMessage() {}
func (*PimRpfHashBag_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{0}
}
func (m *PimRpfHashBag_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag_KEYS.Unmarshal(m, b)
}
func (m *PimRpfHashBag_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag_KEYS.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag_KEYS.Merge(m, src)
}
func (m *PimRpfHashBag_KEYS) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag_KEYS.Size(m)
}
func (m *PimRpfHashBag_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag_KEYS proto.InternalMessageInfo
func (m *PimRpfHashBag_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSafName() string {
if m != nil {
return m.SafName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetTopologyName() string {
if m != nil {
return m.TopologyName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSourceAddress() string {
if m != nil {
return m.SourceAddress
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetMofrr() uint32 {
if m != nil {
return m.Mofrr
}
return 0
}
type PimAddrtype struct {
AfName string `protobuf:"bytes,1,opt,name=af_name,json=afName,proto3" json:"af_name,omitempty"`
Ipv4Address string `protobuf:"bytes,2,opt,name=ipv4_address,json=ipv4Address,proto3" json:"ipv4_address,omitempty"`
Ipv6Address string `protobuf:"bytes,3,opt,name=ipv6_address,json=ipv6Address,proto3" json:"ipv6_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimAddrtype) Reset() { *m = PimAddrtype{} }
func (m *PimAddrtype) String() string { return proto.CompactTextString(m) }
func (*PimAddrtype) ProtoMessage() {}
func (*PimAddrtype) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{1}
}
func (m *PimAddrtype) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimAddrtype.Unmarshal(m, b)
}
func (m *PimAddrtype) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimAddrtype.Marshal(b, m, deterministic)
}
func (m *PimAddrtype) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimAddrtype.Merge(m, src)
}
func (m *PimAddrtype) XXX_Size() int {
return xxx_messageInfo_PimAddrtype.Size(m)
}
func (m *PimAddrtype) XXX_DiscardUnknown() {
xxx_messageInfo_PimAddrtype.DiscardUnknown(m)
}
var xxx_messageInfo_PimAddrtype proto.InternalMessageInfo
func (m *PimAddrtype) GetAfName() string {
if m != nil {
return m.AfName
}
return ""
}
func (m *PimAddrtype) GetIpv4Address() string {
if m != nil {
return m.Ipv4Address
}
return ""
}
func (m *PimAddrtype) GetIpv6Address() string {
if m != nil {
return m.Ipv6Address
}
return ""
}
type PimRpfHashBag struct {
NextHopMultipathEnabled bool `protobuf:"varint,50,opt,name=next_hop_multipath_enabled,json=nextHopMultipathEnabled,proto3" json:"next_hop_multipath_enabled,omitempty"`
NextHopAddress *PimAddrtype `protobuf:"bytes,51,opt,name=next_hop_address,json=nextHopAddress,proto3" json:"next_hop_address,omitempty"`
NextHopInterface string `protobuf:"bytes,52,opt,name=next_hop_interface,json=nextHopInterface,proto3" json:"next_hop_interface,omitempty"`
SecondaryNextHopAddress *PimAddrtype `protobuf:"bytes,53,opt,name=secondary_next_hop_address,json=secondaryNextHopAddress,proto3" json:"secondary_next_hop_address,omitempty"`
SecondaryNextHopInterface string `protobuf:"bytes,54,opt,name=secondary_next_hop_interface,json=secondaryNextHopInterface,proto3" json:"secondary_next_hop_interface,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag) Reset() { *m = PimRpfHashBag{} }
func (m *PimRpfHashBag) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag) ProtoMessage() {}
func (*PimRpfHashBag) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{2}
}
func (m *PimRpfHashBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag.Unmarshal(m, b)
}
func (m *PimRpfHashBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag.Merge(m, src)
}
func (m *PimRpfHashBag) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag.Size(m)
}
func (m *PimRpfHashBag) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag proto.InternalMessageInfo
func (m *PimRpfHashBag) GetNextHopMultipathEnabled() bool {
if m != nil {
return m.NextHopMultipathEnabled
}
return false
}
func (m *PimRpfHashBag) GetNextHopAddress() *PimAddrtype {
if m != nil {
return m.NextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetNextHopInterface() string {
if m != nil {
return m.NextHopInterface
}
return ""
}
func (m *PimRpfHashBag) GetSecondaryNextHopAddress() *PimAddrtype {
if m != nil {
return m.SecondaryNextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetSecondaryNextHopInterface() string {
if m != nil {
return m.SecondaryNextHopInterface
}
return ""
}
func | () {
proto.RegisterType((*PimRpfHashBag_KEYS)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag_KEYS")
proto.RegisterType((*PimAddrtype)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_addrtype")
proto.RegisterType((*PimRpfHashBag)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag")
}
func init() { proto.RegisterFile("pim_rpf_hash_bag.proto", fileDescriptor_9bbebdbd6b24e885) }
var fileDescriptor_9bbebdbd6b24e885 = []byte{
// 407 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xbd, 0xce, 0xd3, 0x30,
0x00, 0x94, 0x29, 0xfd, 0xc1, 0xfd, 0x51, 0x65, 0x01, 0x4d, 0x2b, 0x86, 0x50, 0x84, 0x94, 0x01,
0x79, 0x68, 0x4b, 0x17, 0x06, 0xc4, 0x50, 0x09, 0x84, 0xe8, 0x10, 0xa6, 0x4e, 0x96, 0x93, 0x38,
0x4d, 0xa4, 0x26, 0xb6, 0x6c, 0x37, 0x6a, 0x1e, 0x83, 0x77, 0x60, 0xe7, 0xf9, 0xd8, 0x90, 0xe3,
0x24, 0xd0, 0xc0, 0xca, 0xb7, 0x58, 0xca, 0xdd, 0x39, 0xf7, 0x13, 0x05, 0x3e, 0x17, 0x69, 0x46,
0xa4, 0x88, 0x49, 0x42, 0x55, 0x42, 0x02, 0x7a, 0xc6, 0x42, 0x72, 0xcd, 0xd1, 0x29, 0x4c, 0x55,
0xc8, 0x49, 0xca, 0x15, 0xb9, 0x49, 0x92, 0x8a, 0x62, 0x47, 0x8c, 0x92, 0x0b, 0x26, 0xb1, 0x48,
0x33, 0xac, 0x34, 0xcd, 0xa3, 0xa0, 0xc4, 0x85, 0x8c, 0x95, 0x39, 0xb0, 0xa2, 0xb1, 0x32, 0x07,
0x6e, 0x5f, 0xa6, 0xf8, 0x55, 0x86, 0x4c, 0x75, 0x81, 0xf5, 0x0f, 0x00, 0x9f, 0x75, 0x5d, 0xc9,
0xe7, 0xc3, 0xe9, 0x2b, 0x5a, 0xc2, 0x51, 0x21, 0x63, 0x92, 0xd3, 0x8c, 0x39, 0xc0, 0x05, 0xde,
0x13, 0x7f, 0x58, 0xc8, 0xf8, 0x48, 0x33, 0x66, 0x28, 0x45, 0x6b, 0xea, 0x91, 0xa5, 0x14, 0xb5,
0xd4, 0x2b, 0x38, 0xd5, 0x5c, 0xf0, 0x0b, 0x3f, 0x97, 0x96, 0xef, 0x55, 0xfc, 0xa4, 0x01, 0x2b,
0xd1, 0x6b, 0x38, 0xb3, 0xf6, 0x84, 0x46, 0x91, 0x64, 0x4a, 0x39, 0x8f, 0x2b, 0xd5, 0xd4, 0xa2,
0x1f, 0x2c, 0x88, 0x9e, 0xc2, 0x7e, 0xc6, 0x63, 0x29, 0x9d, 0xbe, 0x0b, 0xbc, 0xa9, 0x6f, 0x1f,
0xd6, 0x19, 0x9c, 0x98, 0xc0, 0xe6, 0xa6, 0x2e, 0x05, 0x43, 0x0b, 0x38, 0xa4, 0x77, 0x31, 0x07,
0x75, 0x94, 0x97, 0x70, 0x52, 0x4d, 0xd5, 0x78, 0xd8, 0xa4, 0x63, 0x83, 0x35, 0x0e, 0x56, 0xb2,
0x6f, 0x25, 0xbd, 0x56, 0xb2, 0xaf, 0x25, 0xeb, 0x9f, 0x3d, 0x38, 0xef, 0x0e, 0x84, 0xde, 0xc1,
0x55, 0xce, 0x6e, 0x9a, 0x24, 0x5c, 0x90, 0xec, 0x7a, 0xd1, 0xa9, 0xa0, 0x3a, 0x21, 0x2c, 0xa7,
0xc1, 0x85, 0x45, 0xce, 0xc6, 0x05, 0xde, 0xc8, 0x5f, 0x18, 0xc5, 0x47, 0x2e, 0xbe, 0x34, 0xfc,
0xc1, 0xd2, 0xe8, 0x1b, 0x80, 0xf3, 0xf6, 0x76, 0xe3, 0xbc, 0x75, 0x81, 0x37, 0xde, 0x9c, 0xf1,
0x7f, 0xfb, 0xd2, 0xf8, 0xcf, 0xd1, 0xfc, 0x59, 0x1d, 0xae, 0x19, 0xe2, 0x0d, 0x44, 0x6d, 0xa4,
0x34, 0xd7, 0x4c, 0xc6, 0x34, 0x64, 0xce, 0xae, 0x9a, 0x63, 0x5e, 0x6b, 0x3f, 0x35, 0x38, 0xfa,
0x0e, 0xe0, 0x4a, 0xb1, 0x90, 0xe7, 0x11, 0x95, 0x25, 0xf9, 0xab, 0xcb, 0xdb, 0x87, 0xed, 0xb2,
0x68, 0xa3, 0x1c, 0xef, 0x4b, 0xbd, 0x87, 0x2f, 0xfe, 0x91, 0xf2, 0x77, 0xbd, 0x7d, 0x55, 0x6f,
0xd9, 0xbd, 0xde, 0xf6, 0x0c, 0x06, 0xd5, 0xef, 0xb7, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0x30,
0x45, 0xe8, 0xb6, 0x98, 0x03, 0x00, 0x00,
}
| init | identifier_name |
pim_rpf_hash_bag.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pim_rpf_hash_bag.proto
package cisco_ios_xr_ipv4_pim_oper_pim_standby_vrfs_vrf_safs_saf_rpf_hash_sources_rpf_hash_source
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type PimRpfHashBag_KEYS struct {
VrfName string `protobuf:"bytes,1,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
SafName string `protobuf:"bytes,2,opt,name=saf_name,json=safName,proto3" json:"saf_name,omitempty"`
TopologyName string `protobuf:"bytes,3,opt,name=topology_name,json=topologyName,proto3" json:"topology_name,omitempty"`
SourceAddress string `protobuf:"bytes,4,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
Mofrr uint32 `protobuf:"varint,5,opt,name=mofrr,proto3" json:"mofrr,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag_KEYS) Reset() { *m = PimRpfHashBag_KEYS{} }
func (m *PimRpfHashBag_KEYS) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag_KEYS) ProtoMessage() {}
func (*PimRpfHashBag_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{0}
}
func (m *PimRpfHashBag_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag_KEYS.Unmarshal(m, b)
}
func (m *PimRpfHashBag_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag_KEYS.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag_KEYS.Merge(m, src)
}
func (m *PimRpfHashBag_KEYS) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag_KEYS.Size(m)
}
func (m *PimRpfHashBag_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag_KEYS proto.InternalMessageInfo
func (m *PimRpfHashBag_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSafName() string {
if m != nil {
return m.SafName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetTopologyName() string {
if m != nil {
return m.TopologyName
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetSourceAddress() string {
if m != nil {
return m.SourceAddress
}
return ""
}
func (m *PimRpfHashBag_KEYS) GetMofrr() uint32 {
if m != nil {
return m.Mofrr
}
return 0
}
type PimAddrtype struct {
AfName string `protobuf:"bytes,1,opt,name=af_name,json=afName,proto3" json:"af_name,omitempty"`
Ipv4Address string `protobuf:"bytes,2,opt,name=ipv4_address,json=ipv4Address,proto3" json:"ipv4_address,omitempty"`
Ipv6Address string `protobuf:"bytes,3,opt,name=ipv6_address,json=ipv6Address,proto3" json:"ipv6_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimAddrtype) Reset() { *m = PimAddrtype{} }
func (m *PimAddrtype) String() string { return proto.CompactTextString(m) }
func (*PimAddrtype) ProtoMessage() {}
func (*PimAddrtype) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{1}
}
func (m *PimAddrtype) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimAddrtype.Unmarshal(m, b)
}
func (m *PimAddrtype) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimAddrtype.Marshal(b, m, deterministic)
}
func (m *PimAddrtype) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimAddrtype.Merge(m, src)
}
func (m *PimAddrtype) XXX_Size() int {
return xxx_messageInfo_PimAddrtype.Size(m)
}
func (m *PimAddrtype) XXX_DiscardUnknown() {
xxx_messageInfo_PimAddrtype.DiscardUnknown(m)
}
var xxx_messageInfo_PimAddrtype proto.InternalMessageInfo
func (m *PimAddrtype) GetAfName() string {
if m != nil {
return m.AfName
}
return ""
}
func (m *PimAddrtype) GetIpv4Address() string {
if m != nil {
return m.Ipv4Address
}
return ""
}
func (m *PimAddrtype) GetIpv6Address() string { |
type PimRpfHashBag struct {
NextHopMultipathEnabled bool `protobuf:"varint,50,opt,name=next_hop_multipath_enabled,json=nextHopMultipathEnabled,proto3" json:"next_hop_multipath_enabled,omitempty"`
NextHopAddress *PimAddrtype `protobuf:"bytes,51,opt,name=next_hop_address,json=nextHopAddress,proto3" json:"next_hop_address,omitempty"`
NextHopInterface string `protobuf:"bytes,52,opt,name=next_hop_interface,json=nextHopInterface,proto3" json:"next_hop_interface,omitempty"`
SecondaryNextHopAddress *PimAddrtype `protobuf:"bytes,53,opt,name=secondary_next_hop_address,json=secondaryNextHopAddress,proto3" json:"secondary_next_hop_address,omitempty"`
SecondaryNextHopInterface string `protobuf:"bytes,54,opt,name=secondary_next_hop_interface,json=secondaryNextHopInterface,proto3" json:"secondary_next_hop_interface,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PimRpfHashBag) Reset() { *m = PimRpfHashBag{} }
func (m *PimRpfHashBag) String() string { return proto.CompactTextString(m) }
func (*PimRpfHashBag) ProtoMessage() {}
func (*PimRpfHashBag) Descriptor() ([]byte, []int) {
return fileDescriptor_9bbebdbd6b24e885, []int{2}
}
func (m *PimRpfHashBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PimRpfHashBag.Unmarshal(m, b)
}
func (m *PimRpfHashBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PimRpfHashBag.Marshal(b, m, deterministic)
}
func (m *PimRpfHashBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_PimRpfHashBag.Merge(m, src)
}
func (m *PimRpfHashBag) XXX_Size() int {
return xxx_messageInfo_PimRpfHashBag.Size(m)
}
func (m *PimRpfHashBag) XXX_DiscardUnknown() {
xxx_messageInfo_PimRpfHashBag.DiscardUnknown(m)
}
var xxx_messageInfo_PimRpfHashBag proto.InternalMessageInfo
func (m *PimRpfHashBag) GetNextHopMultipathEnabled() bool {
if m != nil {
return m.NextHopMultipathEnabled
}
return false
}
func (m *PimRpfHashBag) GetNextHopAddress() *PimAddrtype {
if m != nil {
return m.NextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetNextHopInterface() string {
if m != nil {
return m.NextHopInterface
}
return ""
}
func (m *PimRpfHashBag) GetSecondaryNextHopAddress() *PimAddrtype {
if m != nil {
return m.SecondaryNextHopAddress
}
return nil
}
func (m *PimRpfHashBag) GetSecondaryNextHopInterface() string {
if m != nil {
return m.SecondaryNextHopInterface
}
return ""
}
func init() {
proto.RegisterType((*PimRpfHashBag_KEYS)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag_KEYS")
proto.RegisterType((*PimAddrtype)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_addrtype")
proto.RegisterType((*PimRpfHashBag)(nil), "cisco_ios_xr_ipv4_pim_oper.pim.standby.vrfs.vrf.safs.saf.rpf_hash_sources.rpf_hash_source.pim_rpf_hash_bag")
}
func init() { proto.RegisterFile("pim_rpf_hash_bag.proto", fileDescriptor_9bbebdbd6b24e885) }
var fileDescriptor_9bbebdbd6b24e885 = []byte{
// 407 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xbd, 0xce, 0xd3, 0x30,
0x00, 0x94, 0x29, 0xfd, 0xc1, 0xfd, 0x51, 0x65, 0x01, 0x4d, 0x2b, 0x86, 0x50, 0x84, 0x94, 0x01,
0x79, 0x68, 0x4b, 0x17, 0x06, 0xc4, 0x50, 0x09, 0x84, 0xe8, 0x10, 0xa6, 0x4e, 0x96, 0x93, 0x38,
0x4d, 0xa4, 0x26, 0xb6, 0x6c, 0x37, 0x6a, 0x1e, 0x83, 0x77, 0x60, 0xe7, 0xf9, 0xd8, 0x90, 0xe3,
0x24, 0xd0, 0xc0, 0xca, 0xb7, 0x58, 0xca, 0xdd, 0x39, 0xf7, 0x13, 0x05, 0x3e, 0x17, 0x69, 0x46,
0xa4, 0x88, 0x49, 0x42, 0x55, 0x42, 0x02, 0x7a, 0xc6, 0x42, 0x72, 0xcd, 0xd1, 0x29, 0x4c, 0x55,
0xc8, 0x49, 0xca, 0x15, 0xb9, 0x49, 0x92, 0x8a, 0x62, 0x47, 0x8c, 0x92, 0x0b, 0x26, 0xb1, 0x48,
0x33, 0xac, 0x34, 0xcd, 0xa3, 0xa0, 0xc4, 0x85, 0x8c, 0x95, 0x39, 0xb0, 0xa2, 0xb1, 0x32, 0x07,
0x6e, 0x5f, 0xa6, 0xf8, 0x55, 0x86, 0x4c, 0x75, 0x81, 0xf5, 0x0f, 0x00, 0x9f, 0x75, 0x5d, 0xc9,
0xe7, 0xc3, 0xe9, 0x2b, 0x5a, 0xc2, 0x51, 0x21, 0x63, 0x92, 0xd3, 0x8c, 0x39, 0xc0, 0x05, 0xde,
0x13, 0x7f, 0x58, 0xc8, 0xf8, 0x48, 0x33, 0x66, 0x28, 0x45, 0x6b, 0xea, 0x91, 0xa5, 0x14, 0xb5,
0xd4, 0x2b, 0x38, 0xd5, 0x5c, 0xf0, 0x0b, 0x3f, 0x97, 0x96, 0xef, 0x55, 0xfc, 0xa4, 0x01, 0x2b,
0xd1, 0x6b, 0x38, 0xb3, 0xf6, 0x84, 0x46, 0x91, 0x64, 0x4a, 0x39, 0x8f, 0x2b, 0xd5, 0xd4, 0xa2,
0x1f, 0x2c, 0x88, 0x9e, 0xc2, 0x7e, 0xc6, 0x63, 0x29, 0x9d, 0xbe, 0x0b, 0xbc, 0xa9, 0x6f, 0x1f,
0xd6, 0x19, 0x9c, 0x98, 0xc0, 0xe6, 0xa6, 0x2e, 0x05, 0x43, 0x0b, 0x38, 0xa4, 0x77, 0x31, 0x07,
0x75, 0x94, 0x97, 0x70, 0x52, 0x4d, 0xd5, 0x78, 0xd8, 0xa4, 0x63, 0x83, 0x35, 0x0e, 0x56, 0xb2,
0x6f, 0x25, 0xbd, 0x56, 0xb2, 0xaf, 0x25, 0xeb, 0x9f, 0x3d, 0x38, 0xef, 0x0e, 0x84, 0xde, 0xc1,
0x55, 0xce, 0x6e, 0x9a, 0x24, 0x5c, 0x90, 0xec, 0x7a, 0xd1, 0xa9, 0xa0, 0x3a, 0x21, 0x2c, 0xa7,
0xc1, 0x85, 0x45, 0xce, 0xc6, 0x05, 0xde, 0xc8, 0x5f, 0x18, 0xc5, 0x47, 0x2e, 0xbe, 0x34, 0xfc,
0xc1, 0xd2, 0xe8, 0x1b, 0x80, 0xf3, 0xf6, 0x76, 0xe3, 0xbc, 0x75, 0x81, 0x37, 0xde, 0x9c, 0xf1,
0x7f, 0xfb, 0xd2, 0xf8, 0xcf, 0xd1, 0xfc, 0x59, 0x1d, 0xae, 0x19, 0xe2, 0x0d, 0x44, 0x6d, 0xa4,
0x34, 0xd7, 0x4c, 0xc6, 0x34, 0x64, 0xce, 0xae, 0x9a, 0x63, 0x5e, 0x6b, 0x3f, 0x35, 0x38, 0xfa,
0x0e, 0xe0, 0x4a, 0xb1, 0x90, 0xe7, 0x11, 0x95, 0x25, 0xf9, 0xab, 0xcb, 0xdb, 0x87, 0xed, 0xb2,
0x68, 0xa3, 0x1c, 0xef, 0x4b, 0xbd, 0x87, 0x2f, 0xfe, 0x91, 0xf2, 0x77, 0xbd, 0x7d, 0x55, 0x6f,
0xd9, 0xbd, 0xde, 0xf6, 0x0c, 0x06, 0xd5, 0xef, 0xb7, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0x30,
0x45, 0xe8, 0xb6, 0x98, 0x03, 0x00, 0x00,
} | if m != nil {
return m.Ipv6Address
}
return ""
} | random_line_split |
index.js | 'use strict';
const
FFI = require('ffi'),
path = require('path'),
ref = require('ref'),
/**
* @typedef LogLevel
* @type {object}
* @property {int} NONE No logging.
* @property {int} NORMAL Normal logging of important events like errors and warnings. The default log level.
* @property {int} INFO Logs significantly more information about the internals of NAPI.
* @property {int} DEBUG The log level that will likely be used when working with Nymi Support.
* @property {int} VERBOSE Logs pretty much everything down to the Bluetooth level.
* @constant
*/
LogLevel = {
NONE: 0,
NORMAL: 1,
INFO: 2,
DEBUG: 3,
VERBOSE: 4
},
/**
* @typedef ConfigOutcome
* @type {object}
* @property {int} OKAY Configured successfully.
* @property {int} INVALID_PROVISION_JSON Provision information provided is invalid (likely invalid JSON).
* @property {int} MISSING_NEA_NAME Provision information does not include neaName.
* @property {int} FAILED_TO_INIT Configuration infomation is okay, but NAPI was unable to start successfully.
* @property {int} ERROR An error occurred, likely an exception, possibly involving the parameters provided.
* @property {int} IMPOSSIBLE
* @constant
*/
ConfigOutcome = {
OKAY: 0,
INVALID_PROVISION_JSON: 1,
MISSING_NEA_NAME: 2,
FAILED_TO_INIT: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef PutOutcome
* @type {object}
* @property {int} OKAY Sending JSON was successful.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} UNPARSEABLE_JSON The provided string is not parseable as JSON.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
PutOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
UNPARSEABLE_JSON: 2,
ERROR: 3,
IMPOSSIBLE: 4
},
/**
* @typedef GetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
GetOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
BUFFER_TOO_SMALL: 2,
NAPI_TERMINATED: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef TryGetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NOTHING There is no JSON available at the time of the call.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
TryGetOutcome = {
OKAY: 0,
NOTHING: 1,
NAPI_NOT_RUNNING: 2,
BUFFER_TOO_SMALL: 3,
NAPI_TERMINATED: 4,
ERROR: 5,
IMPOSSIBLE: 6
},
stringPtr = ref.refType('string'),
intPtr = ref.refType('int'),
NapiInterface = {
napiConfigure: ['int', ['string', 'string', 'string', 'int', 'int', 'string']],
napiGet: ['int', [stringPtr, 'int', intPtr]],
napiTryGet: ['int', [stringPtr, 'int', intPtr]],
napiPut: ['int', ['string']],
napiTerminate: ['void', []]
};
let
priv = new WeakMap(),
privates = {},
_s = (scope, key, value) => {privates[key] = value; priv.set(scope, privates)},
_g = (scope, key) => priv.get(scope)[key];
/**
* <p><b>Class NapiBinding</b></p>
*
* @class NapuBinding
*/
class NapiBinding
{
/**
* GetOutcome
*
* @static
* @return {GetOutcome}
*/
static get GetOutcome ()
{
return GetOutcome;
}
/**
* TryGetOutcome
*
* @static
* @return {TryGetOutcome}
*/
static get TryGetOutcome ()
{
return TryGetOutcome;
}
/**
* PutOutcome
*
* @static
* @return {PutOutcome}
*/
static get PutOutcome ()
{
return PutOutcome;
}
/**
* ConfigOutcome
*
* @static
* @return {ConfigOutcome}
*/
static get ConfigOutcome ()
{
return ConfigOutcome;
}
/**
* LogLevel
*
* @static
* @return {LogLevel}
*/ | static get LogLevel ()
{
return LogLevel;
}
/**
* <p>Create bindings for the Nymi API</p>
*
* @constructor
* @param {boolean} [nymulator=false] TRUE create bindings for networked library, FALSE create bindings for native library.
*/
constructor (nymulator)
{
nymulator = nymulator || false;
let lib = process.platform === 'darwin' && nymulator ? './../bin/napi-net' : './../bin/napi';
_s(this, 'binding', new FFI.Library(path.resolve(__dirname, lib), NapiInterface));
}
/**
* <p>Configure and start NAPI.</p>
* <p>For most NEAs the default arguments are correct so the call would be similar to napiConfigure("root-directory-path");.
* The default host of "" is treated as "127.0.0.1". The default port of -1 will choose the port depending on platform (OS X or Windows) and libary (native or networked).
* The value of provisions should be the same as the last saved value.
* </p>
*
* @param {string} neaName Name of this NEA (used when provisioning). (6 to 18 characters)
* @param {string} logDirectory Path to a directory that will contain log files.
* @param {string} [provisions = '{}'] The provision data saved by previous runs of the NEA.
* @param {int} [logLevel=LogLevel.NORMAL] The log level to use (see LogLevel).
* @param {int} [port=-1] The default port for networked Nymi Bands (on Windows) or the Nymulator.
* @param {string} [host=''] The default host for networked Nymi Bands (on Windows) or the Nymulator.
* @return {ConfigOutcome}
*/
napiConfigure (neaName, logDirectory, provisions, logLevel, port, host)
{
neaName = String(neaName);
logDirectory = String(logDirectory);
provisions = String(provisions) || '{}';
logLevel = ~~logLevel || LogLevel.NORMAL;
port = ~~port || -1;
host = String(host) || '';
return _g(this, 'binding').napiConfigure(neaName, logDirectory, provisions, logLevel, port, host);
}
/**
* <p>Receive a JSON message from NAPI, blocks if nothing is available yet; standard usage.</p>
* <p>napiGet is a blocking call.
* If NAPI is not running, wait a short time and call napiGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (GetOutcome.OKAY)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (GetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (GetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (GetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: GetOutcome, json: object}}
*/
napiGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiGet(buf, buf.length, len);
if (outcome === NapiBinding.GetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.GetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.GetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Receive a JSON message from NAPI if one is available, non-blocking; standard usage.</p>
* <p>napiTryGet is a non-blocking call.
* If NAPI is not running, wait a short time and call napiTryGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (TryGetOutcome.OKAY)</li>
* <li>No message is available at the time of the call. (TryGetOutcome::NOTHING)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (TryGetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (TryGetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (TryGetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: (TryGetOutcome), json: object}}
*/
napiTryGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiTryGet(buf, buf.length, len);
if (outcome === NapiBinding.TryGetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiTryGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.TryGetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.TryGetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Send a JSON message to NAPI.</p>
*
* @param {object} json Stringified JSON to send to NAPI.
* @return {PutOutcome}
*/
napiPut (json)
{
try {
return _g(this, 'binding').napiPut(JSON.stringify(json));
} catch (err) {
return PutOutcome.IMPOSSIBLE;
}
}
/**
* <p>Shutdown NAPI.</p>
* <p>The NEA should call this function before exiting.</p>
* <b>Note:</b>
* Calling this function, followed by a second call to napiConfigD, may now work (consider it beta functionality).
*
* @return {void}
*/
napiTerminate ()
{
_g(this, 'binding').napiTerminate();
}
}
module.exports = NapiBinding; | random_line_split | |
index.js | 'use strict';
const
FFI = require('ffi'),
path = require('path'),
ref = require('ref'),
/**
* @typedef LogLevel
* @type {object}
* @property {int} NONE No logging.
* @property {int} NORMAL Normal logging of important events like errors and warnings. The default log level.
* @property {int} INFO Logs significantly more information about the internals of NAPI.
* @property {int} DEBUG The log level that will likely be used when working with Nymi Support.
* @property {int} VERBOSE Logs pretty much everything down to the Bluetooth level.
* @constant
*/
LogLevel = {
NONE: 0,
NORMAL: 1,
INFO: 2,
DEBUG: 3,
VERBOSE: 4
},
/**
* @typedef ConfigOutcome
* @type {object}
* @property {int} OKAY Configured successfully.
* @property {int} INVALID_PROVISION_JSON Provision information provided is invalid (likely invalid JSON).
* @property {int} MISSING_NEA_NAME Provision information does not include neaName.
* @property {int} FAILED_TO_INIT Configuration infomation is okay, but NAPI was unable to start successfully.
* @property {int} ERROR An error occurred, likely an exception, possibly involving the parameters provided.
* @property {int} IMPOSSIBLE
* @constant
*/
ConfigOutcome = {
OKAY: 0,
INVALID_PROVISION_JSON: 1,
MISSING_NEA_NAME: 2,
FAILED_TO_INIT: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef PutOutcome
* @type {object}
* @property {int} OKAY Sending JSON was successful.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} UNPARSEABLE_JSON The provided string is not parseable as JSON.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
PutOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
UNPARSEABLE_JSON: 2,
ERROR: 3,
IMPOSSIBLE: 4
},
/**
* @typedef GetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
GetOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
BUFFER_TOO_SMALL: 2,
NAPI_TERMINATED: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef TryGetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NOTHING There is no JSON available at the time of the call.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
TryGetOutcome = {
OKAY: 0,
NOTHING: 1,
NAPI_NOT_RUNNING: 2,
BUFFER_TOO_SMALL: 3,
NAPI_TERMINATED: 4,
ERROR: 5,
IMPOSSIBLE: 6
},
stringPtr = ref.refType('string'),
intPtr = ref.refType('int'),
NapiInterface = {
napiConfigure: ['int', ['string', 'string', 'string', 'int', 'int', 'string']],
napiGet: ['int', [stringPtr, 'int', intPtr]],
napiTryGet: ['int', [stringPtr, 'int', intPtr]],
napiPut: ['int', ['string']],
napiTerminate: ['void', []]
};
let
priv = new WeakMap(),
privates = {},
_s = (scope, key, value) => {privates[key] = value; priv.set(scope, privates)},
_g = (scope, key) => priv.get(scope)[key];
/**
* <p><b>Class NapiBinding</b></p>
*
* @class NapuBinding
*/
class NapiBinding
{
/**
* GetOutcome
*
* @static
* @return {GetOutcome}
*/
static get GetOutcome ()
{
return GetOutcome;
}
/**
* TryGetOutcome
*
* @static
* @return {TryGetOutcome}
*/
static get TryGetOutcome ()
{
return TryGetOutcome;
}
/**
* PutOutcome
*
* @static
* @return {PutOutcome}
*/
static get PutOutcome ()
{
return PutOutcome;
}
/**
* ConfigOutcome
*
* @static
* @return {ConfigOutcome}
*/
static get ConfigOutcome ()
{
return ConfigOutcome;
}
/**
* LogLevel
*
* @static
* @return {LogLevel}
*/
static get LogLev | {
return LogLevel;
}
/**
* <p>Create bindings for the Nymi API</p>
*
* @constructor
* @param {boolean} [nymulator=false] TRUE create bindings for networked library, FALSE create bindings for native library.
*/
constructor (nymulator)
{
nymulator = nymulator || false;
let lib = process.platform === 'darwin' && nymulator ? './../bin/napi-net' : './../bin/napi';
_s(this, 'binding', new FFI.Library(path.resolve(__dirname, lib), NapiInterface));
}
/**
* <p>Configure and start NAPI.</p>
* <p>For most NEAs the default arguments are correct so the call would be similar to napiConfigure("root-directory-path");.
* The default host of "" is treated as "127.0.0.1". The default port of -1 will choose the port depending on platform (OS X or Windows) and libary (native or networked).
* The value of provisions should be the same as the last saved value.
* </p>
*
* @param {string} neaName Name of this NEA (used when provisioning). (6 to 18 characters)
* @param {string} logDirectory Path to a directory that will contain log files.
* @param {string} [provisions = '{}'] The provision data saved by previous runs of the NEA.
* @param {int} [logLevel=LogLevel.NORMAL] The log level to use (see LogLevel).
* @param {int} [port=-1] The default port for networked Nymi Bands (on Windows) or the Nymulator.
* @param {string} [host=''] The default host for networked Nymi Bands (on Windows) or the Nymulator.
* @return {ConfigOutcome}
*/
napiConfigure (neaName, logDirectory, provisions, logLevel, port, host)
{
neaName = String(neaName);
logDirectory = String(logDirectory);
provisions = String(provisions) || '{}';
logLevel = ~~logLevel || LogLevel.NORMAL;
port = ~~port || -1;
host = String(host) || '';
return _g(this, 'binding').napiConfigure(neaName, logDirectory, provisions, logLevel, port, host);
}
/**
* <p>Receive a JSON message from NAPI, blocks if nothing is available yet; standard usage.</p>
* <p>napiGet is a blocking call.
* If NAPI is not running, wait a short time and call napiGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (GetOutcome.OKAY)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (GetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (GetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (GetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: GetOutcome, json: object}}
*/
napiGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiGet(buf, buf.length, len);
if (outcome === NapiBinding.GetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.GetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.GetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Receive a JSON message from NAPI if one is available, non-blocking; standard usage.</p>
* <p>napiTryGet is a non-blocking call.
* If NAPI is not running, wait a short time and call napiTryGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (TryGetOutcome.OKAY)</li>
* <li>No message is available at the time of the call. (TryGetOutcome::NOTHING)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (TryGetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (TryGetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (TryGetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: (TryGetOutcome), json: object}}
*/
napiTryGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiTryGet(buf, buf.length, len);
if (outcome === NapiBinding.TryGetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiTryGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.TryGetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.TryGetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Send a JSON message to NAPI.</p>
*
* @param {object} json Stringified JSON to send to NAPI.
* @return {PutOutcome}
*/
napiPut (json)
{
try {
return _g(this, 'binding').napiPut(JSON.stringify(json));
} catch (err) {
return PutOutcome.IMPOSSIBLE;
}
}
/**
* <p>Shutdown NAPI.</p>
* <p>The NEA should call this function before exiting.</p>
* <b>Note:</b>
* Calling this function, followed by a second call to napiConfigD, may now work (consider it beta functionality).
*
* @return {void}
*/
napiTerminate ()
{
_g(this, 'binding').napiTerminate();
}
}
module.exports = NapiBinding; | el ()
| identifier_name |
index.js | 'use strict';
const
FFI = require('ffi'),
path = require('path'),
ref = require('ref'),
/**
* @typedef LogLevel
* @type {object}
* @property {int} NONE No logging.
* @property {int} NORMAL Normal logging of important events like errors and warnings. The default log level.
* @property {int} INFO Logs significantly more information about the internals of NAPI.
* @property {int} DEBUG The log level that will likely be used when working with Nymi Support.
* @property {int} VERBOSE Logs pretty much everything down to the Bluetooth level.
* @constant
*/
LogLevel = {
NONE: 0,
NORMAL: 1,
INFO: 2,
DEBUG: 3,
VERBOSE: 4
},
/**
* @typedef ConfigOutcome
* @type {object}
* @property {int} OKAY Configured successfully.
* @property {int} INVALID_PROVISION_JSON Provision information provided is invalid (likely invalid JSON).
* @property {int} MISSING_NEA_NAME Provision information does not include neaName.
* @property {int} FAILED_TO_INIT Configuration infomation is okay, but NAPI was unable to start successfully.
* @property {int} ERROR An error occurred, likely an exception, possibly involving the parameters provided.
* @property {int} IMPOSSIBLE
* @constant
*/
ConfigOutcome = {
OKAY: 0,
INVALID_PROVISION_JSON: 1,
MISSING_NEA_NAME: 2,
FAILED_TO_INIT: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef PutOutcome
* @type {object}
* @property {int} OKAY Sending JSON was successful.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} UNPARSEABLE_JSON The provided string is not parseable as JSON.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
PutOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
UNPARSEABLE_JSON: 2,
ERROR: 3,
IMPOSSIBLE: 4
},
/**
* @typedef GetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
GetOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
BUFFER_TOO_SMALL: 2,
NAPI_TERMINATED: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef TryGetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NOTHING There is no JSON available at the time of the call.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
TryGetOutcome = {
OKAY: 0,
NOTHING: 1,
NAPI_NOT_RUNNING: 2,
BUFFER_TOO_SMALL: 3,
NAPI_TERMINATED: 4,
ERROR: 5,
IMPOSSIBLE: 6
},
stringPtr = ref.refType('string'),
intPtr = ref.refType('int'),
NapiInterface = {
napiConfigure: ['int', ['string', 'string', 'string', 'int', 'int', 'string']],
napiGet: ['int', [stringPtr, 'int', intPtr]],
napiTryGet: ['int', [stringPtr, 'int', intPtr]],
napiPut: ['int', ['string']],
napiTerminate: ['void', []]
};
let
priv = new WeakMap(),
privates = {},
_s = (scope, key, value) => {privates[key] = value; priv.set(scope, privates)},
_g = (scope, key) => priv.get(scope)[key];
/**
* <p><b>Class NapiBinding</b></p>
*
* @class NapuBinding
*/
class NapiBinding
{
/**
* GetOutcome
*
* @static
* @return {GetOutcome}
*/
static get GetOutcome ()
{
return GetOutcome;
}
/**
* TryGetOutcome
*
* @static
* @return {TryGetOutcome}
*/
static get TryGetOutcome ()
{
return TryGetOutcome;
}
/**
* PutOutcome
*
* @static
* @return {PutOutcome}
*/
static get PutOutcome ()
{
return PutOutcome;
}
/**
* ConfigOutcome
*
* @static
* @return {ConfigOutcome}
*/
static get ConfigOutcome ()
{
return ConfigOutcome;
}
/**
* LogLevel
*
* @static
* @return {LogLevel}
*/
static get LogLevel ()
{
return LogLevel;
}
/**
* <p>Create bindings for the Nymi API</p>
*
* @constructor
* @param {boolean} [nymulator=false] TRUE create bindings for networked library, FALSE create bindings for native library.
*/
constructor (nymulator)
{
nymulator = nymulator || false;
let lib = process.platform === 'darwin' && nymulator ? './../bin/napi-net' : './../bin/napi';
_s(this, 'binding', new FFI.Library(path.resolve(__dirname, lib), NapiInterface));
}
/**
* <p>Configure and start NAPI.</p>
* <p>For most NEAs the default arguments are correct so the call would be similar to napiConfigure("root-directory-path");.
* The default host of "" is treated as "127.0.0.1". The default port of -1 will choose the port depending on platform (OS X or Windows) and libary (native or networked).
* The value of provisions should be the same as the last saved value.
* </p>
*
* @param {string} neaName Name of this NEA (used when provisioning). (6 to 18 characters)
* @param {string} logDirectory Path to a directory that will contain log files.
* @param {string} [provisions = '{}'] The provision data saved by previous runs of the NEA.
* @param {int} [logLevel=LogLevel.NORMAL] The log level to use (see LogLevel).
* @param {int} [port=-1] The default port for networked Nymi Bands (on Windows) or the Nymulator.
* @param {string} [host=''] The default host for networked Nymi Bands (on Windows) or the Nymulator.
* @return {ConfigOutcome}
*/
napiConfigure (neaName, logDirectory, provisions, logLevel, port, host)
{
neaName = String(neaName);
logDirectory = String(logDirectory);
provisions = String(provisions) || '{}';
logLevel = ~~logLevel || LogLevel.NORMAL;
port = ~~port || -1;
host = String(host) || '';
return _g(this, 'binding').napiConfigure(neaName, logDirectory, provisions, logLevel, port, host);
}
/**
* <p>Receive a JSON message from NAPI, blocks if nothing is available yet; standard usage.</p>
* <p>napiGet is a blocking call.
* If NAPI is not running, wait a short time and call napiGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (GetOutcome.OKAY)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (GetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (GetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (GetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: GetOutcome, json: object}}
*/
napiGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiGet(buf, buf.length, len);
if (outcome === NapiBinding.GetOutcome.BUFFER_TOO_SMALL) {
| if (outcome === NapiBinding.GetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.GetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Receive a JSON message from NAPI if one is available, non-blocking; standard usage.</p>
* <p>napiTryGet is a non-blocking call.
* If NAPI is not running, wait a short time and call napiTryGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (TryGetOutcome.OKAY)</li>
* <li>No message is available at the time of the call. (TryGetOutcome::NOTHING)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (TryGetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (TryGetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (TryGetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: (TryGetOutcome), json: object}}
*/
napiTryGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiTryGet(buf, buf.length, len);
if (outcome === NapiBinding.TryGetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiTryGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.TryGetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.TryGetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Send a JSON message to NAPI.</p>
*
* @param {object} json Stringified JSON to send to NAPI.
* @return {PutOutcome}
*/
napiPut (json)
{
try {
return _g(this, 'binding').napiPut(JSON.stringify(json));
} catch (err) {
return PutOutcome.IMPOSSIBLE;
}
}
/**
* <p>Shutdown NAPI.</p>
* <p>The NEA should call this function before exiting.</p>
* <b>Note:</b>
* Calling this function, followed by a second call to napiConfigD, may now work (consider it beta functionality).
*
* @return {void}
*/
napiTerminate ()
{
_g(this, 'binding').napiTerminate();
}
}
module.exports = NapiBinding; | outcome = _g(this, 'binding').napiGet(buf, len.deref(), len);
}
| conditional_block |
index.js | 'use strict';
const
FFI = require('ffi'),
path = require('path'),
ref = require('ref'),
/**
* @typedef LogLevel
* @type {object}
* @property {int} NONE No logging.
* @property {int} NORMAL Normal logging of important events like errors and warnings. The default log level.
* @property {int} INFO Logs significantly more information about the internals of NAPI.
* @property {int} DEBUG The log level that will likely be used when working with Nymi Support.
* @property {int} VERBOSE Logs pretty much everything down to the Bluetooth level.
* @constant
*/
LogLevel = {
NONE: 0,
NORMAL: 1,
INFO: 2,
DEBUG: 3,
VERBOSE: 4
},
/**
* @typedef ConfigOutcome
* @type {object}
* @property {int} OKAY Configured successfully.
* @property {int} INVALID_PROVISION_JSON Provision information provided is invalid (likely invalid JSON).
* @property {int} MISSING_NEA_NAME Provision information does not include neaName.
* @property {int} FAILED_TO_INIT Configuration infomation is okay, but NAPI was unable to start successfully.
* @property {int} ERROR An error occurred, likely an exception, possibly involving the parameters provided.
* @property {int} IMPOSSIBLE
* @constant
*/
ConfigOutcome = {
OKAY: 0,
INVALID_PROVISION_JSON: 1,
MISSING_NEA_NAME: 2,
FAILED_TO_INIT: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef PutOutcome
* @type {object}
* @property {int} OKAY Sending JSON was successful.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} UNPARSEABLE_JSON The provided string is not parseable as JSON.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
PutOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
UNPARSEABLE_JSON: 2,
ERROR: 3,
IMPOSSIBLE: 4
},
/**
* @typedef GetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
GetOutcome = {
OKAY: 0,
NAPI_NOT_RUNNING: 1,
BUFFER_TOO_SMALL: 2,
NAPI_TERMINATED: 3,
ERROR: 4,
IMPOSSIBLE: 5
},
/**
* @typedef TryGetOutcome
* @type {object}
* @property {int} OKAY A JSON string has been returned.
* @property {int} NOTHING There is no JSON available at the time of the call.
* @property {int} NAPI_NOT_RUNNING NAPI is not running – either napiConfigure did not complete or napiTerminate was already called.
* @property {int} BUFFER_TOO_SMALL The provided char* buffer is not long enough; the length value will contain the minimum required size.
* @property {int} NAPI_TERMINATED Napi::terminate was called. This outcome will be returned once. Afterwards, the outcome is NAPI_NOT_RUNNING.
* @property {int} ERROR An error occurred, likely an exception.
* @property {int} IMPOSSIBLE
* @constant
*/
TryGetOutcome = {
OKAY: 0,
NOTHING: 1,
NAPI_NOT_RUNNING: 2,
BUFFER_TOO_SMALL: 3,
NAPI_TERMINATED: 4,
ERROR: 5,
IMPOSSIBLE: 6
},
stringPtr = ref.refType('string'),
intPtr = ref.refType('int'),
NapiInterface = {
napiConfigure: ['int', ['string', 'string', 'string', 'int', 'int', 'string']],
napiGet: ['int', [stringPtr, 'int', intPtr]],
napiTryGet: ['int', [stringPtr, 'int', intPtr]],
napiPut: ['int', ['string']],
napiTerminate: ['void', []]
};
let
priv = new WeakMap(),
privates = {},
_s = (scope, key, value) => {privates[key] = value; priv.set(scope, privates)},
_g = (scope, key) => priv.get(scope)[key];
/**
* <p><b>Class NapiBinding</b></p>
*
* @class NapuBinding
*/
class NapiBinding
{
/**
* GetOutcome
*
* @static
* @return {GetOutcome}
*/
static get GetOutcome ()
{
return GetOutcome;
}
/**
* TryGetOutcome
*
* @static
* @return {TryGetOutcome}
*/
static get TryGetOutcome ()
{
return TryGetOutcome;
}
/**
* PutOutcome
*
* @static
* @return {PutOutcome}
*/
static get PutOutcome ()
{
return PutOutcome;
}
/**
* ConfigOutcome
*
* @static
* @return {ConfigOutcome}
*/
static get ConfigOutcome ()
{
| /**
* LogLevel
*
* @static
* @return {LogLevel}
*/
static get LogLevel ()
{
return LogLevel;
}
/**
* <p>Create bindings for the Nymi API</p>
*
* @constructor
* @param {boolean} [nymulator=false] TRUE create bindings for networked library, FALSE create bindings for native library.
*/
constructor (nymulator)
{
nymulator = nymulator || false;
let lib = process.platform === 'darwin' && nymulator ? './../bin/napi-net' : './../bin/napi';
_s(this, 'binding', new FFI.Library(path.resolve(__dirname, lib), NapiInterface));
}
/**
* <p>Configure and start NAPI.</p>
* <p>For most NEAs the default arguments are correct so the call would be similar to napiConfigure("root-directory-path");.
* The default host of "" is treated as "127.0.0.1". The default port of -1 will choose the port depending on platform (OS X or Windows) and libary (native or networked).
* The value of provisions should be the same as the last saved value.
* </p>
*
* @param {string} neaName Name of this NEA (used when provisioning). (6 to 18 characters)
* @param {string} logDirectory Path to a directory that will contain log files.
* @param {string} [provisions = '{}'] The provision data saved by previous runs of the NEA.
* @param {int} [logLevel=LogLevel.NORMAL] The log level to use (see LogLevel).
* @param {int} [port=-1] The default port for networked Nymi Bands (on Windows) or the Nymulator.
* @param {string} [host=''] The default host for networked Nymi Bands (on Windows) or the Nymulator.
* @return {ConfigOutcome}
*/
napiConfigure (neaName, logDirectory, provisions, logLevel, port, host)
{
neaName = String(neaName);
logDirectory = String(logDirectory);
provisions = String(provisions) || '{}';
logLevel = ~~logLevel || LogLevel.NORMAL;
port = ~~port || -1;
host = String(host) || '';
return _g(this, 'binding').napiConfigure(neaName, logDirectory, provisions, logLevel, port, host);
}
/**
* <p>Receive a JSON message from NAPI, blocks if nothing is available yet; standard usage.</p>
* <p>napiGet is a blocking call.
* If NAPI is not running, wait a short time and call napiGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (GetOutcome.OKAY)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (GetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (GetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (GetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: GetOutcome, json: object}}
*/
napiGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiGet(buf, buf.length, len);
if (outcome === NapiBinding.GetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.GetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.GetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Receive a JSON message from NAPI if one is available, non-blocking; standard usage.</p>
* <p>napiTryGet is a non-blocking call.
* If NAPI is not running, wait a short time and call napiTryGet again. No JSON messages are lost.
* </p>
* <b>This variant returns when:</b>
* <li>A message is available from NAPI (TryGetOutcome.OKAY)</li>
* <li>No message is available at the time of the call. (TryGetOutcome::NOTHING)</li>
* <li>A message from NAPI is ready, but the provided buffer is too small (TryGetOutcome::BUFFER_TOO_SMALL)</li>
* <li>NAPI is not running (TryGetOutcome.NAPI_NOT_RUNNING)</li>
* <li>NAPI has terminated (TryGetOutcome.NAPI_TERMINATED)</li>
*
* @return {{outcome: (TryGetOutcome), json: object}}
*/
napiTryGet ()
{
let outcome, buf, len,
json = null;
try {
buf = Buffer.alloc(4096);
len = ref.alloc('int');
buf.type = stringPtr;
outcome = _g(this, 'binding').napiTryGet(buf, buf.length, len);
if (outcome === NapiBinding.TryGetOutcome.BUFFER_TOO_SMALL) {
outcome = _g(this, 'binding').napiTryGet(buf, len.deref(), len);
}
if (outcome === NapiBinding.TryGetOutcome.OKAY) {
json = JSON.parse(buf.readCString(0));
}
} catch (err) {
outcome = NapiBinding.TryGetOutcome.ERROR;
}
return {outcome: outcome, json: json};
}
/**
* <p>Send a JSON message to NAPI.</p>
*
* @param {object} json Stringified JSON to send to NAPI.
* @return {PutOutcome}
*/
napiPut (json)
{
try {
return _g(this, 'binding').napiPut(JSON.stringify(json));
} catch (err) {
return PutOutcome.IMPOSSIBLE;
}
}
/**
* <p>Shutdown NAPI.</p>
* <p>The NEA should call this function before exiting.</p>
* <b>Note:</b>
* Calling this function, followed by a second call to napiConfigD, may now work (consider it beta functionality).
*
* @return {void}
*/
napiTerminate ()
{
_g(this, 'binding').napiTerminate();
}
}
module.exports = NapiBinding; | return ConfigOutcome;
}
| identifier_body |
run.py | #run.py
import hrr
import cleanup_lib.cleanup_utilities as cu
import cleanup_lib.learn_cleanup as lc
from stats.bootstrapci import bootstrapci
from ca.nengo.util.impl import NodeThreadPool
from optparse import OptionParser
import sys
import random
import ConfigParser
import numeric as np
import logging
def make_simple_test_schedule(learning, testing):
def f():
yield learning
yield testing
return f
def simple_noise(noise):
def f(input_vec):
v = input_vec + noise * hrr.HRR(D).v
v = v / np.sqrt(sum(v**2))
return v
return f
def hrr_noise(D, num):
noise_vocab = hrr.Vocabulary(D)
keys = [noise_vocab.parse(str(x)) for x in range(2*num+1)]
def f(input_vec):
input_vec = hrr.HRR(data=input_vec)
partner_key = random.choice(keys)
pair_keys = filter(lambda x: x != partner_key, keys)
pairs = random.sample(pair_keys, 2 * num)
p0 = (pairs[x] for x in range(0,len(pairs),2))
p1 = (pairs[x] for x in range(1,len(pairs),2))
S = map(lambda x, y: noise_vocab[x].convolve(noise_vocab[y]), p0, p1)
S = reduce(lambda x, y: x + y, S, noise_vocab[partner_key].convolve(input_vec))
S.normalize()
reconstruction = S.convolve(~noise_vocab[partner_key])
reconstruction.normalize()
return reconstruction.v
return f
def reduced_run(options):
radius = options.radius
options.cleanup_neurons = \
cu.minimum_neurons(options.Plo, options.Vlo,
options.threshold - options.testing_bias, options.D, (100, 5000))
options.threshold = (options.threshold, options.threshold)
options.max_rate = (options.max_rate, options.max_rate)
run(options)
def normal_run(options):
N = options.cleanup_neurons
D = options.D
P_hi = options.Phi
P_lo = options.Plo
V_hi = options.Vhi
V_lo = options.Vlo
_, threshold_lo = cu.minimum_threshold(P_lo, V_lo, N, D)
_, threshold_hi = cu.minimum_threshold(P_hi, V_hi, N, D)
options.radius = threshould_hi
options.max_rate = (100,200)
threshold_ratio = float(threshold_lo)/float(threshold_hi)
options.threshold = (threshold_ratio, threshold_ratio + 0.8 * (1 - threshold_ratio))
run(options)
def run(options):
logging.info("Threshold: " + str(options.threshold))
logging.info("Num cleanup neurons: " + str(options.cleanup_neurons))
logging.info("Radius: " + str(options.radius))
if options.noise_type == "hrr":
options.learning_noise = hrr_noise(options.D, options.learning_noise)
options.testing_noise = hrr_noise(options.D, options.testing_noise)
else:
options.learning_noise = simple_noise(options.learning_noise)
options.testing_noise = simple_noise(options.testing_noise)
options.schedule_func = make_simple_test_schedule(options.learningpres, options.testingpres)
options_dict = options.__dict__
network = lc.make_learnable_cleanup(**options_dict)
def parse_args():
parser = OptionParser()
parser.add_option("-N", "--numneurons", dest="cleanup_neurons", default=None, type="int",
help="Number of neurons in cleanup")
parser.add_option("-n", "--neuronsperdim", dest="neurons_per_dim", default=20, type="int",
help="Number of neurons per dimension for other ensembles")
parser.add_option("-D", "--dim", dest="D", default=16, type="int",
help="Dimension of the vectors that the cleanup operates on")
parser.add_option("-V", "--numvectors", dest="num_vecs", default=4, type="int",
help="Number of vectors that the cleanup will try to learn")
parser.add_option("--dt", default=0.001, type="float", help="Time step")
parser.add_option("--cleanup-pstc", dest='cleanup_pstc', default=0.02, type="float",
help="Time constant for post-synaptic current of cleanup neurons")
parser.add_option("-a", "--alpha", dest="learning_rate", default=5e-5, type="float", help="Learning rate")
parser.add_option("-L", "--triallength", dest="trial_length", default=100, type="int",
help="Length of each vector presentation, in timesteps")
parser.add_option("-P", "--learningpres", default=100, type="int",
help="Number of presentations during learning")
parser.add_option("-p", "--testingpres", default=20, type="int",
help="Number of presentations during testing")
parser.add_option("-T", "--learningnoise", dest="learning_noise", default=1, type="float",
help="Parameter for the noise during learning")
parser.add_option("-t", "--testingnoise", dest="testing_noise", default=1, type="float",
help="Parameter for the noise during testing")
parser.add_option("--noise-type", dest="noise_type", default="hrr",
help="Type of noise to use")
parser.add_option("-R", "--numruns", default=0, type="int",
help="Number of runs to do. We can reuse certain things between runs, \
so this speeds up the process of doing several runs at once")
parser.add_option("--control-bias", dest="user_control_bias", default=True,
help="Whether to use different biases during learning and testing")
parser.add_option("--control-learning", dest="user_control_learning", default=True,
help="Whether user controls learning schedule")
parser.add_option("-B", "--learningbias", dest="learning_bias", default=0.25, type="float",
help="Amount of bias during learning. Only has an effect if varthresh is True")
parser.add_option("-b", "--testingbias", dest="testing_bias", default=-0.1, type="float",
help="Amount of bias during testing. Only has an effect if varthresh is True")
parser.add_option("--Phi", default=.9, type="float", help="Probability for hi")
parser.add_option("--Plo", default=.9, type="float", help="Probability for low")
parser.add_option("--Vhi", default=10, type="int", help="Number of neurons for hi")
parser.add_option("--Vlo", default=50, type="int", help="Number of neurons for low")
parser.add_option("--threads", default=8, type="int",
help="Number of threads to use to run the simulation")
parser.add_option("--resultsfile", dest="results_file", default="results", help="Name of file to write results to")
parser.add_option("--logfile", default="log", help="Name of file to log to")
parser.add_option("-c", "--cleanlearning", dest="clean_learning", default=False,
help="Whether to set decoders using clean vectors (supervised) or not (unsupervised)")
parser.add_option("-I", "--neuralinput", dest="neural_input", default=False,
help="Whether the input should be passed through a neural population first")
parser.add_option("--replacecleanup", default=True,
help="Whether to generate a new cleanup neural population for each run.")
parser.add_option("--replacevectors", default=True,
help="Whether to generate new vocabulary vectors for each run.")
parser.add_option("--errorlearning", default=False,
help="Whether to use error for learning \
(alternative is to use the learning vector as is).")
parser.add_option("--dry-run", dest="dry_run", action='store_true', default=False,
help="Whether to exit right away (for testing purposes)")
parser.add_option("--reduced-mode", dest="reduced_mode", action='store_true', default=True,
help="In reduced mode, Vhi, Vlo, Phi, Plo all ignored. \
Uses max-firing-rate and radius")
parser.add_option("--max-rate", dest="max_rate", default=200, type="float",
help="Maximum firing rate of neurons")
parser.add_option("--radius", default=1.0, type="float",
help="Range of values neurons sensitive to. Only used in reduced mode.")
parser.add_option("--threshold", default=0.3, type="float",
help="Value for intercept of neural tuning curves. Only used in reduced mode")
(options, args) = parser.parse_args()
print "options: ", options
print "args: ", args
return (options, args)
def run_batch(network, options, args):
|
def dry_run(results_file):
logging.info("Dry run!")
config = ConfigParser.ConfigParser()
config.add_section('Error')
config.set('Error', 'mean', 0.0)
config.set('Error', 'var', 0.0)
f = open(results_file, 'w')
config.write(f)
f.close()
sys.exit()
def start():
(options, args) = parse_args()
logging.basicConfig(filename=options.logfile, filemode='w', level=logging.INFO)
logging.info("Parameters: " + str(options) + str(args))
command_line = 'cl' in args
if options.dry_run:
dry_run(options.results_file)
NodeThreadPool.setNumJavaThreads(options.threads)
if options.reduced_mode:
reduced_run(options)
else:
normal_run(options)
if __name__=="__main__":
start()
| run = 0
run_length = (options.learningpres + options.testingpres) * trial_length * options.dt
logging.info("Run length: %g" % (run_length))
controller = network._get_node('EC')
errors = []
while run < options.num_runs:
logging.info("Starting run: %d" % (run + 1))
network.run(run_length, options.dt)
network.reset()
if options.replacecleanup:
replace_cleanup(network, **options.__dict__)
if options.replacevectors:
controller.generate_vectors()
logging.info("Done run: %d" % (run + 1))
run += 1
errors.append(controller.latest_rmse)
logging.info("RMSE for run: %g" % (controller.latest_rmse))
if len(errors) > 0:
mean = np.mean
def var(x):
if len(x) > 1:
return float(sum((np.array(x) - mean(x))**2)) / float(len(x) - 1)
else:
return 0
ci = bootstrapci(errors, mean, n=999, p=.95)
config = ConfigParser.ConfigParser()
config.add_section('Options')
for attr in dir(options):
item = getattr(options, attr)
if not callable(item) and attr[0] != '_':
config.set('Options', attr, str(item))
config.add_section('Error')
config.set('Error', 'raw', str(errors))
config.set('Error', 'mean', str(mean(errors)))
config.set('Error', 'var', str(var(errors)))
config.set('Error', 'lowCI', str(ci[0]))
config.set('Error', 'hiCI', str(ci[1]))
f = open(options.results_file, 'w')
config.write(f)
f.close() | identifier_body |
run.py | #run.py
import hrr
import cleanup_lib.cleanup_utilities as cu
import cleanup_lib.learn_cleanup as lc
from stats.bootstrapci import bootstrapci
from ca.nengo.util.impl import NodeThreadPool
from optparse import OptionParser
import sys
import random
import ConfigParser
import numeric as np
import logging
def make_simple_test_schedule(learning, testing):
def | ():
yield learning
yield testing
return f
def simple_noise(noise):
def f(input_vec):
v = input_vec + noise * hrr.HRR(D).v
v = v / np.sqrt(sum(v**2))
return v
return f
def hrr_noise(D, num):
noise_vocab = hrr.Vocabulary(D)
keys = [noise_vocab.parse(str(x)) for x in range(2*num+1)]
def f(input_vec):
input_vec = hrr.HRR(data=input_vec)
partner_key = random.choice(keys)
pair_keys = filter(lambda x: x != partner_key, keys)
pairs = random.sample(pair_keys, 2 * num)
p0 = (pairs[x] for x in range(0,len(pairs),2))
p1 = (pairs[x] for x in range(1,len(pairs),2))
S = map(lambda x, y: noise_vocab[x].convolve(noise_vocab[y]), p0, p1)
S = reduce(lambda x, y: x + y, S, noise_vocab[partner_key].convolve(input_vec))
S.normalize()
reconstruction = S.convolve(~noise_vocab[partner_key])
reconstruction.normalize()
return reconstruction.v
return f
def reduced_run(options):
radius = options.radius
options.cleanup_neurons = \
cu.minimum_neurons(options.Plo, options.Vlo,
options.threshold - options.testing_bias, options.D, (100, 5000))
options.threshold = (options.threshold, options.threshold)
options.max_rate = (options.max_rate, options.max_rate)
run(options)
def normal_run(options):
N = options.cleanup_neurons
D = options.D
P_hi = options.Phi
P_lo = options.Plo
V_hi = options.Vhi
V_lo = options.Vlo
_, threshold_lo = cu.minimum_threshold(P_lo, V_lo, N, D)
_, threshold_hi = cu.minimum_threshold(P_hi, V_hi, N, D)
options.radius = threshould_hi
options.max_rate = (100,200)
threshold_ratio = float(threshold_lo)/float(threshold_hi)
options.threshold = (threshold_ratio, threshold_ratio + 0.8 * (1 - threshold_ratio))
run(options)
def run(options):
logging.info("Threshold: " + str(options.threshold))
logging.info("Num cleanup neurons: " + str(options.cleanup_neurons))
logging.info("Radius: " + str(options.radius))
if options.noise_type == "hrr":
options.learning_noise = hrr_noise(options.D, options.learning_noise)
options.testing_noise = hrr_noise(options.D, options.testing_noise)
else:
options.learning_noise = simple_noise(options.learning_noise)
options.testing_noise = simple_noise(options.testing_noise)
options.schedule_func = make_simple_test_schedule(options.learningpres, options.testingpres)
options_dict = options.__dict__
network = lc.make_learnable_cleanup(**options_dict)
def parse_args():
parser = OptionParser()
parser.add_option("-N", "--numneurons", dest="cleanup_neurons", default=None, type="int",
help="Number of neurons in cleanup")
parser.add_option("-n", "--neuronsperdim", dest="neurons_per_dim", default=20, type="int",
help="Number of neurons per dimension for other ensembles")
parser.add_option("-D", "--dim", dest="D", default=16, type="int",
help="Dimension of the vectors that the cleanup operates on")
parser.add_option("-V", "--numvectors", dest="num_vecs", default=4, type="int",
help="Number of vectors that the cleanup will try to learn")
parser.add_option("--dt", default=0.001, type="float", help="Time step")
parser.add_option("--cleanup-pstc", dest='cleanup_pstc', default=0.02, type="float",
help="Time constant for post-synaptic current of cleanup neurons")
parser.add_option("-a", "--alpha", dest="learning_rate", default=5e-5, type="float", help="Learning rate")
parser.add_option("-L", "--triallength", dest="trial_length", default=100, type="int",
help="Length of each vector presentation, in timesteps")
parser.add_option("-P", "--learningpres", default=100, type="int",
help="Number of presentations during learning")
parser.add_option("-p", "--testingpres", default=20, type="int",
help="Number of presentations during testing")
parser.add_option("-T", "--learningnoise", dest="learning_noise", default=1, type="float",
help="Parameter for the noise during learning")
parser.add_option("-t", "--testingnoise", dest="testing_noise", default=1, type="float",
help="Parameter for the noise during testing")
parser.add_option("--noise-type", dest="noise_type", default="hrr",
help="Type of noise to use")
parser.add_option("-R", "--numruns", default=0, type="int",
help="Number of runs to do. We can reuse certain things between runs, \
so this speeds up the process of doing several runs at once")
parser.add_option("--control-bias", dest="user_control_bias", default=True,
help="Whether to use different biases during learning and testing")
parser.add_option("--control-learning", dest="user_control_learning", default=True,
help="Whether user controls learning schedule")
parser.add_option("-B", "--learningbias", dest="learning_bias", default=0.25, type="float",
help="Amount of bias during learning. Only has an effect if varthresh is True")
parser.add_option("-b", "--testingbias", dest="testing_bias", default=-0.1, type="float",
help="Amount of bias during testing. Only has an effect if varthresh is True")
parser.add_option("--Phi", default=.9, type="float", help="Probability for hi")
parser.add_option("--Plo", default=.9, type="float", help="Probability for low")
parser.add_option("--Vhi", default=10, type="int", help="Number of neurons for hi")
parser.add_option("--Vlo", default=50, type="int", help="Number of neurons for low")
parser.add_option("--threads", default=8, type="int",
help="Number of threads to use to run the simulation")
parser.add_option("--resultsfile", dest="results_file", default="results", help="Name of file to write results to")
parser.add_option("--logfile", default="log", help="Name of file to log to")
parser.add_option("-c", "--cleanlearning", dest="clean_learning", default=False,
help="Whether to set decoders using clean vectors (supervised) or not (unsupervised)")
parser.add_option("-I", "--neuralinput", dest="neural_input", default=False,
help="Whether the input should be passed through a neural population first")
parser.add_option("--replacecleanup", default=True,
help="Whether to generate a new cleanup neural population for each run.")
parser.add_option("--replacevectors", default=True,
help="Whether to generate new vocabulary vectors for each run.")
parser.add_option("--errorlearning", default=False,
help="Whether to use error for learning \
(alternative is to use the learning vector as is).")
parser.add_option("--dry-run", dest="dry_run", action='store_true', default=False,
help="Whether to exit right away (for testing purposes)")
parser.add_option("--reduced-mode", dest="reduced_mode", action='store_true', default=True,
help="In reduced mode, Vhi, Vlo, Phi, Plo all ignored. \
Uses max-firing-rate and radius")
parser.add_option("--max-rate", dest="max_rate", default=200, type="float",
help="Maximum firing rate of neurons")
parser.add_option("--radius", default=1.0, type="float",
help="Range of values neurons sensitive to. Only used in reduced mode.")
parser.add_option("--threshold", default=0.3, type="float",
help="Value for intercept of neural tuning curves. Only used in reduced mode")
(options, args) = parser.parse_args()
print "options: ", options
print "args: ", args
return (options, args)
def run_batch(network, options, args):
run = 0
run_length = (options.learningpres + options.testingpres) * trial_length * options.dt
logging.info("Run length: %g" % (run_length))
controller = network._get_node('EC')
errors = []
while run < options.num_runs:
logging.info("Starting run: %d" % (run + 1))
network.run(run_length, options.dt)
network.reset()
if options.replacecleanup:
replace_cleanup(network, **options.__dict__)
if options.replacevectors:
controller.generate_vectors()
logging.info("Done run: %d" % (run + 1))
run += 1
errors.append(controller.latest_rmse)
logging.info("RMSE for run: %g" % (controller.latest_rmse))
if len(errors) > 0:
mean = np.mean
def var(x):
if len(x) > 1:
return float(sum((np.array(x) - mean(x))**2)) / float(len(x) - 1)
else:
return 0
ci = bootstrapci(errors, mean, n=999, p=.95)
config = ConfigParser.ConfigParser()
config.add_section('Options')
for attr in dir(options):
item = getattr(options, attr)
if not callable(item) and attr[0] != '_':
config.set('Options', attr, str(item))
config.add_section('Error')
config.set('Error', 'raw', str(errors))
config.set('Error', 'mean', str(mean(errors)))
config.set('Error', 'var', str(var(errors)))
config.set('Error', 'lowCI', str(ci[0]))
config.set('Error', 'hiCI', str(ci[1]))
f = open(options.results_file, 'w')
config.write(f)
f.close()
def dry_run(results_file):
logging.info("Dry run!")
config = ConfigParser.ConfigParser()
config.add_section('Error')
config.set('Error', 'mean', 0.0)
config.set('Error', 'var', 0.0)
f = open(results_file, 'w')
config.write(f)
f.close()
sys.exit()
def start():
(options, args) = parse_args()
logging.basicConfig(filename=options.logfile, filemode='w', level=logging.INFO)
logging.info("Parameters: " + str(options) + str(args))
command_line = 'cl' in args
if options.dry_run:
dry_run(options.results_file)
NodeThreadPool.setNumJavaThreads(options.threads)
if options.reduced_mode:
reduced_run(options)
else:
normal_run(options)
if __name__=="__main__":
start()
| f | identifier_name |
run.py | #run.py
import hrr
import cleanup_lib.cleanup_utilities as cu
import cleanup_lib.learn_cleanup as lc
from stats.bootstrapci import bootstrapci
from ca.nengo.util.impl import NodeThreadPool
from optparse import OptionParser
import sys
import random
import ConfigParser
import numeric as np
import logging
def make_simple_test_schedule(learning, testing):
def f():
yield learning
yield testing
return f
def simple_noise(noise):
def f(input_vec):
v = input_vec + noise * hrr.HRR(D).v
v = v / np.sqrt(sum(v**2))
return v
return f
def hrr_noise(D, num):
noise_vocab = hrr.Vocabulary(D)
keys = [noise_vocab.parse(str(x)) for x in range(2*num+1)]
def f(input_vec):
input_vec = hrr.HRR(data=input_vec)
partner_key = random.choice(keys)
pair_keys = filter(lambda x: x != partner_key, keys)
pairs = random.sample(pair_keys, 2 * num)
p0 = (pairs[x] for x in range(0,len(pairs),2))
p1 = (pairs[x] for x in range(1,len(pairs),2))
S = map(lambda x, y: noise_vocab[x].convolve(noise_vocab[y]), p0, p1)
S = reduce(lambda x, y: x + y, S, noise_vocab[partner_key].convolve(input_vec))
S.normalize()
reconstruction = S.convolve(~noise_vocab[partner_key])
reconstruction.normalize()
return reconstruction.v
return f
def reduced_run(options):
radius = options.radius
options.cleanup_neurons = \
cu.minimum_neurons(options.Plo, options.Vlo,
options.threshold - options.testing_bias, options.D, (100, 5000))
options.threshold = (options.threshold, options.threshold)
options.max_rate = (options.max_rate, options.max_rate)
run(options)
def normal_run(options):
N = options.cleanup_neurons
D = options.D
P_hi = options.Phi
P_lo = options.Plo
V_hi = options.Vhi
V_lo = options.Vlo
_, threshold_lo = cu.minimum_threshold(P_lo, V_lo, N, D)
_, threshold_hi = cu.minimum_threshold(P_hi, V_hi, N, D)
options.radius = threshould_hi
options.max_rate = (100,200)
threshold_ratio = float(threshold_lo)/float(threshold_hi)
options.threshold = (threshold_ratio, threshold_ratio + 0.8 * (1 - threshold_ratio))
run(options)
def run(options):
logging.info("Threshold: " + str(options.threshold))
logging.info("Num cleanup neurons: " + str(options.cleanup_neurons))
logging.info("Radius: " + str(options.radius))
if options.noise_type == "hrr":
options.learning_noise = hrr_noise(options.D, options.learning_noise)
options.testing_noise = hrr_noise(options.D, options.testing_noise)
else:
options.learning_noise = simple_noise(options.learning_noise)
options.testing_noise = simple_noise(options.testing_noise)
options.schedule_func = make_simple_test_schedule(options.learningpres, options.testingpres)
options_dict = options.__dict__
network = lc.make_learnable_cleanup(**options_dict)
def parse_args():
parser = OptionParser()
parser.add_option("-N", "--numneurons", dest="cleanup_neurons", default=None, type="int",
help="Number of neurons in cleanup")
parser.add_option("-n", "--neuronsperdim", dest="neurons_per_dim", default=20, type="int", | help="Dimension of the vectors that the cleanup operates on")
parser.add_option("-V", "--numvectors", dest="num_vecs", default=4, type="int",
help="Number of vectors that the cleanup will try to learn")
parser.add_option("--dt", default=0.001, type="float", help="Time step")
parser.add_option("--cleanup-pstc", dest='cleanup_pstc', default=0.02, type="float",
help="Time constant for post-synaptic current of cleanup neurons")
parser.add_option("-a", "--alpha", dest="learning_rate", default=5e-5, type="float", help="Learning rate")
parser.add_option("-L", "--triallength", dest="trial_length", default=100, type="int",
help="Length of each vector presentation, in timesteps")
parser.add_option("-P", "--learningpres", default=100, type="int",
help="Number of presentations during learning")
parser.add_option("-p", "--testingpres", default=20, type="int",
help="Number of presentations during testing")
parser.add_option("-T", "--learningnoise", dest="learning_noise", default=1, type="float",
help="Parameter for the noise during learning")
parser.add_option("-t", "--testingnoise", dest="testing_noise", default=1, type="float",
help="Parameter for the noise during testing")
parser.add_option("--noise-type", dest="noise_type", default="hrr",
help="Type of noise to use")
parser.add_option("-R", "--numruns", default=0, type="int",
help="Number of runs to do. We can reuse certain things between runs, \
so this speeds up the process of doing several runs at once")
parser.add_option("--control-bias", dest="user_control_bias", default=True,
help="Whether to use different biases during learning and testing")
parser.add_option("--control-learning", dest="user_control_learning", default=True,
help="Whether user controls learning schedule")
parser.add_option("-B", "--learningbias", dest="learning_bias", default=0.25, type="float",
help="Amount of bias during learning. Only has an effect if varthresh is True")
parser.add_option("-b", "--testingbias", dest="testing_bias", default=-0.1, type="float",
help="Amount of bias during testing. Only has an effect if varthresh is True")
parser.add_option("--Phi", default=.9, type="float", help="Probability for hi")
parser.add_option("--Plo", default=.9, type="float", help="Probability for low")
parser.add_option("--Vhi", default=10, type="int", help="Number of neurons for hi")
parser.add_option("--Vlo", default=50, type="int", help="Number of neurons for low")
parser.add_option("--threads", default=8, type="int",
help="Number of threads to use to run the simulation")
parser.add_option("--resultsfile", dest="results_file", default="results", help="Name of file to write results to")
parser.add_option("--logfile", default="log", help="Name of file to log to")
parser.add_option("-c", "--cleanlearning", dest="clean_learning", default=False,
help="Whether to set decoders using clean vectors (supervised) or not (unsupervised)")
parser.add_option("-I", "--neuralinput", dest="neural_input", default=False,
help="Whether the input should be passed through a neural population first")
parser.add_option("--replacecleanup", default=True,
help="Whether to generate a new cleanup neural population for each run.")
parser.add_option("--replacevectors", default=True,
help="Whether to generate new vocabulary vectors for each run.")
parser.add_option("--errorlearning", default=False,
help="Whether to use error for learning \
(alternative is to use the learning vector as is).")
parser.add_option("--dry-run", dest="dry_run", action='store_true', default=False,
help="Whether to exit right away (for testing purposes)")
parser.add_option("--reduced-mode", dest="reduced_mode", action='store_true', default=True,
help="In reduced mode, Vhi, Vlo, Phi, Plo all ignored. \
Uses max-firing-rate and radius")
parser.add_option("--max-rate", dest="max_rate", default=200, type="float",
help="Maximum firing rate of neurons")
parser.add_option("--radius", default=1.0, type="float",
help="Range of values neurons sensitive to. Only used in reduced mode.")
parser.add_option("--threshold", default=0.3, type="float",
help="Value for intercept of neural tuning curves. Only used in reduced mode")
(options, args) = parser.parse_args()
print "options: ", options
print "args: ", args
return (options, args)
def run_batch(network, options, args):
run = 0
run_length = (options.learningpres + options.testingpres) * trial_length * options.dt
logging.info("Run length: %g" % (run_length))
controller = network._get_node('EC')
errors = []
while run < options.num_runs:
logging.info("Starting run: %d" % (run + 1))
network.run(run_length, options.dt)
network.reset()
if options.replacecleanup:
replace_cleanup(network, **options.__dict__)
if options.replacevectors:
controller.generate_vectors()
logging.info("Done run: %d" % (run + 1))
run += 1
errors.append(controller.latest_rmse)
logging.info("RMSE for run: %g" % (controller.latest_rmse))
if len(errors) > 0:
mean = np.mean
def var(x):
if len(x) > 1:
return float(sum((np.array(x) - mean(x))**2)) / float(len(x) - 1)
else:
return 0
ci = bootstrapci(errors, mean, n=999, p=.95)
config = ConfigParser.ConfigParser()
config.add_section('Options')
for attr in dir(options):
item = getattr(options, attr)
if not callable(item) and attr[0] != '_':
config.set('Options', attr, str(item))
config.add_section('Error')
config.set('Error', 'raw', str(errors))
config.set('Error', 'mean', str(mean(errors)))
config.set('Error', 'var', str(var(errors)))
config.set('Error', 'lowCI', str(ci[0]))
config.set('Error', 'hiCI', str(ci[1]))
f = open(options.results_file, 'w')
config.write(f)
f.close()
def dry_run(results_file):
logging.info("Dry run!")
config = ConfigParser.ConfigParser()
config.add_section('Error')
config.set('Error', 'mean', 0.0)
config.set('Error', 'var', 0.0)
f = open(results_file, 'w')
config.write(f)
f.close()
sys.exit()
def start():
(options, args) = parse_args()
logging.basicConfig(filename=options.logfile, filemode='w', level=logging.INFO)
logging.info("Parameters: " + str(options) + str(args))
command_line = 'cl' in args
if options.dry_run:
dry_run(options.results_file)
NodeThreadPool.setNumJavaThreads(options.threads)
if options.reduced_mode:
reduced_run(options)
else:
normal_run(options)
if __name__=="__main__":
start() | help="Number of neurons per dimension for other ensembles")
parser.add_option("-D", "--dim", dest="D", default=16, type="int", | random_line_split |
run.py | #run.py
import hrr
import cleanup_lib.cleanup_utilities as cu
import cleanup_lib.learn_cleanup as lc
from stats.bootstrapci import bootstrapci
from ca.nengo.util.impl import NodeThreadPool
from optparse import OptionParser
import sys
import random
import ConfigParser
import numeric as np
import logging
def make_simple_test_schedule(learning, testing):
def f():
yield learning
yield testing
return f
def simple_noise(noise):
def f(input_vec):
v = input_vec + noise * hrr.HRR(D).v
v = v / np.sqrt(sum(v**2))
return v
return f
def hrr_noise(D, num):
noise_vocab = hrr.Vocabulary(D)
keys = [noise_vocab.parse(str(x)) for x in range(2*num+1)]
def f(input_vec):
input_vec = hrr.HRR(data=input_vec)
partner_key = random.choice(keys)
pair_keys = filter(lambda x: x != partner_key, keys)
pairs = random.sample(pair_keys, 2 * num)
p0 = (pairs[x] for x in range(0,len(pairs),2))
p1 = (pairs[x] for x in range(1,len(pairs),2))
S = map(lambda x, y: noise_vocab[x].convolve(noise_vocab[y]), p0, p1)
S = reduce(lambda x, y: x + y, S, noise_vocab[partner_key].convolve(input_vec))
S.normalize()
reconstruction = S.convolve(~noise_vocab[partner_key])
reconstruction.normalize()
return reconstruction.v
return f
def reduced_run(options):
radius = options.radius
options.cleanup_neurons = \
cu.minimum_neurons(options.Plo, options.Vlo,
options.threshold - options.testing_bias, options.D, (100, 5000))
options.threshold = (options.threshold, options.threshold)
options.max_rate = (options.max_rate, options.max_rate)
run(options)
def normal_run(options):
N = options.cleanup_neurons
D = options.D
P_hi = options.Phi
P_lo = options.Plo
V_hi = options.Vhi
V_lo = options.Vlo
_, threshold_lo = cu.minimum_threshold(P_lo, V_lo, N, D)
_, threshold_hi = cu.minimum_threshold(P_hi, V_hi, N, D)
options.radius = threshould_hi
options.max_rate = (100,200)
threshold_ratio = float(threshold_lo)/float(threshold_hi)
options.threshold = (threshold_ratio, threshold_ratio + 0.8 * (1 - threshold_ratio))
run(options)
def run(options):
logging.info("Threshold: " + str(options.threshold))
logging.info("Num cleanup neurons: " + str(options.cleanup_neurons))
logging.info("Radius: " + str(options.radius))
if options.noise_type == "hrr":
|
else:
options.learning_noise = simple_noise(options.learning_noise)
options.testing_noise = simple_noise(options.testing_noise)
options.schedule_func = make_simple_test_schedule(options.learningpres, options.testingpres)
options_dict = options.__dict__
network = lc.make_learnable_cleanup(**options_dict)
def parse_args():
parser = OptionParser()
parser.add_option("-N", "--numneurons", dest="cleanup_neurons", default=None, type="int",
help="Number of neurons in cleanup")
parser.add_option("-n", "--neuronsperdim", dest="neurons_per_dim", default=20, type="int",
help="Number of neurons per dimension for other ensembles")
parser.add_option("-D", "--dim", dest="D", default=16, type="int",
help="Dimension of the vectors that the cleanup operates on")
parser.add_option("-V", "--numvectors", dest="num_vecs", default=4, type="int",
help="Number of vectors that the cleanup will try to learn")
parser.add_option("--dt", default=0.001, type="float", help="Time step")
parser.add_option("--cleanup-pstc", dest='cleanup_pstc', default=0.02, type="float",
help="Time constant for post-synaptic current of cleanup neurons")
parser.add_option("-a", "--alpha", dest="learning_rate", default=5e-5, type="float", help="Learning rate")
parser.add_option("-L", "--triallength", dest="trial_length", default=100, type="int",
help="Length of each vector presentation, in timesteps")
parser.add_option("-P", "--learningpres", default=100, type="int",
help="Number of presentations during learning")
parser.add_option("-p", "--testingpres", default=20, type="int",
help="Number of presentations during testing")
parser.add_option("-T", "--learningnoise", dest="learning_noise", default=1, type="float",
help="Parameter for the noise during learning")
parser.add_option("-t", "--testingnoise", dest="testing_noise", default=1, type="float",
help="Parameter for the noise during testing")
parser.add_option("--noise-type", dest="noise_type", default="hrr",
help="Type of noise to use")
parser.add_option("-R", "--numruns", default=0, type="int",
help="Number of runs to do. We can reuse certain things between runs, \
so this speeds up the process of doing several runs at once")
parser.add_option("--control-bias", dest="user_control_bias", default=True,
help="Whether to use different biases during learning and testing")
parser.add_option("--control-learning", dest="user_control_learning", default=True,
help="Whether user controls learning schedule")
parser.add_option("-B", "--learningbias", dest="learning_bias", default=0.25, type="float",
help="Amount of bias during learning. Only has an effect if varthresh is True")
parser.add_option("-b", "--testingbias", dest="testing_bias", default=-0.1, type="float",
help="Amount of bias during testing. Only has an effect if varthresh is True")
parser.add_option("--Phi", default=.9, type="float", help="Probability for hi")
parser.add_option("--Plo", default=.9, type="float", help="Probability for low")
parser.add_option("--Vhi", default=10, type="int", help="Number of neurons for hi")
parser.add_option("--Vlo", default=50, type="int", help="Number of neurons for low")
parser.add_option("--threads", default=8, type="int",
help="Number of threads to use to run the simulation")
parser.add_option("--resultsfile", dest="results_file", default="results", help="Name of file to write results to")
parser.add_option("--logfile", default="log", help="Name of file to log to")
parser.add_option("-c", "--cleanlearning", dest="clean_learning", default=False,
help="Whether to set decoders using clean vectors (supervised) or not (unsupervised)")
parser.add_option("-I", "--neuralinput", dest="neural_input", default=False,
help="Whether the input should be passed through a neural population first")
parser.add_option("--replacecleanup", default=True,
help="Whether to generate a new cleanup neural population for each run.")
parser.add_option("--replacevectors", default=True,
help="Whether to generate new vocabulary vectors for each run.")
parser.add_option("--errorlearning", default=False,
help="Whether to use error for learning \
(alternative is to use the learning vector as is).")
parser.add_option("--dry-run", dest="dry_run", action='store_true', default=False,
help="Whether to exit right away (for testing purposes)")
parser.add_option("--reduced-mode", dest="reduced_mode", action='store_true', default=True,
help="In reduced mode, Vhi, Vlo, Phi, Plo all ignored. \
Uses max-firing-rate and radius")
parser.add_option("--max-rate", dest="max_rate", default=200, type="float",
help="Maximum firing rate of neurons")
parser.add_option("--radius", default=1.0, type="float",
help="Range of values neurons sensitive to. Only used in reduced mode.")
parser.add_option("--threshold", default=0.3, type="float",
help="Value for intercept of neural tuning curves. Only used in reduced mode")
(options, args) = parser.parse_args()
print "options: ", options
print "args: ", args
return (options, args)
def run_batch(network, options, args):
run = 0
run_length = (options.learningpres + options.testingpres) * trial_length * options.dt
logging.info("Run length: %g" % (run_length))
controller = network._get_node('EC')
errors = []
while run < options.num_runs:
logging.info("Starting run: %d" % (run + 1))
network.run(run_length, options.dt)
network.reset()
if options.replacecleanup:
replace_cleanup(network, **options.__dict__)
if options.replacevectors:
controller.generate_vectors()
logging.info("Done run: %d" % (run + 1))
run += 1
errors.append(controller.latest_rmse)
logging.info("RMSE for run: %g" % (controller.latest_rmse))
if len(errors) > 0:
mean = np.mean
def var(x):
if len(x) > 1:
return float(sum((np.array(x) - mean(x))**2)) / float(len(x) - 1)
else:
return 0
ci = bootstrapci(errors, mean, n=999, p=.95)
config = ConfigParser.ConfigParser()
config.add_section('Options')
for attr in dir(options):
item = getattr(options, attr)
if not callable(item) and attr[0] != '_':
config.set('Options', attr, str(item))
config.add_section('Error')
config.set('Error', 'raw', str(errors))
config.set('Error', 'mean', str(mean(errors)))
config.set('Error', 'var', str(var(errors)))
config.set('Error', 'lowCI', str(ci[0]))
config.set('Error', 'hiCI', str(ci[1]))
f = open(options.results_file, 'w')
config.write(f)
f.close()
def dry_run(results_file):
logging.info("Dry run!")
config = ConfigParser.ConfigParser()
config.add_section('Error')
config.set('Error', 'mean', 0.0)
config.set('Error', 'var', 0.0)
f = open(results_file, 'w')
config.write(f)
f.close()
sys.exit()
def start():
(options, args) = parse_args()
logging.basicConfig(filename=options.logfile, filemode='w', level=logging.INFO)
logging.info("Parameters: " + str(options) + str(args))
command_line = 'cl' in args
if options.dry_run:
dry_run(options.results_file)
NodeThreadPool.setNumJavaThreads(options.threads)
if options.reduced_mode:
reduced_run(options)
else:
normal_run(options)
if __name__=="__main__":
start()
| options.learning_noise = hrr_noise(options.D, options.learning_noise)
options.testing_noise = hrr_noise(options.D, options.testing_noise) | conditional_block |
tab_switch_cuj.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package tabswitchcuj contains the test code for TabSwitchCUJ. The test is
// extracted into this package to be shared between TabSwitchCUJRecorder and
// TabSwitchCUJ.
//
// Steps to update the test:
// 1. Make changes in this package.
// 2. "tast run $IP ui.TabSwitchCUJRecorder" to record the contents.
// Look for the recorded wpr archive in /tmp/tab_switch_cuj.wprgo.
// 3. Update the recorded wpr archive to cloud storage under
// gs://chromiumos-test-assets-public/tast/cros/ui/
// It is recommended to add a date suffix to make it easier to change.
// 4. Update "tab_switch_cuj.wprgo.external" file under ui/data.
// 5. "tast run $IP ui.TabSwitchCUJ" locally to make sure tests works
// with the new recorded contents.
// 6. Submit the changes here with updated external data reference.
package tabswitchcuj
import (
"context"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
sim "chromiumos/tast/local/chrome/cuj/inputsimulations"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/ui/cujrecorder"
"chromiumos/tast/testing"
)
const (
// WPRArchiveName is used as the external file name of the wpr archive for
// TabSwitchCuj and as the output filename under "/tmp" for
// TabSwitchCujRecorder.
WPRArchiveName = "tab_switch_cuj.wprgo"
)
// TabSwitchParam holds parameters of tab switch cuj test variations.
type TabSwitchParam struct {
BrowserType browser.Type // Chrome type.
}
// tabSwitchVariables holds all the necessary variables used by the test.
type tabSwitchVariables struct {
param TabSwitchParam // Test Parameters
webPages []webPage // List of sites to visit
cr *chrome.Chrome
br *browser.Browser
closeBrowser func(context.Context) error
tconn *chrome.TestConn
bTconn *chrome.TestConn
recorder *cujrecorder.Recorder
}
// webPage holds the info used to visit new sites in the test.
type webPage struct {
name string // Display Name of the Website
startURL string // Base URL to the Website
urlPattern string // RegExp Pattern to Open Relevant Links on the Website
}
// coreTestDuration is a minimum duration for the core part of the test.
// The actual test duration could be longer because of various setup.
const coreTestDuration = 10 * time.Minute
func runSetup(ctx context.Context, s *testing.State) (*tabSwitchVariables, error) {
vars := tabSwitchVariables{
param: s.Param().(TabSwitchParam),
webPages: getTestWebpages(),
cr: s.FixtValue().(chrome.HasChrome).Chrome(),
}
var err error
vars.br, vars.closeBrowser, err = browserfixt.SetUp(ctx, vars.cr, vars.param.BrowserType)
if err != nil |
vars.bTconn, err = vars.br.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get browser TestAPIConn")
}
vars.recorder, err = cujrecorder.NewRecorder(ctx, vars.cr, vars.bTconn, nil, cujrecorder.RecorderOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to create a recorder")
}
metricsSuccessfullyAdded := false
defer func(ctx context.Context) {
if metricsSuccessfullyAdded {
return
}
vars.closeBrowser(ctx)
vars.recorder.Close(ctx)
}(ctx)
vars.tconn, err = vars.cr.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get ash-chrome test connection")
}
if err := vars.recorder.AddCommonMetrics(vars.tconn, vars.bTconn); err != nil {
s.Fatal("Failed to add common metrics to the recorder: ", err)
}
vars.recorder.EnableTracing(s.OutDir(), s.DataPath(cujrecorder.SystemTraceConfigFile))
if _, ok := s.Var("record"); ok {
if err := vars.recorder.AddScreenRecorder(ctx, vars.tconn, s.TestName()); err != nil {
s.Fatal("Failed to add screen recorder: ", err)
}
}
// Add an empty screenshot recorder.
if err := vars.recorder.AddScreenshotRecorder(ctx, 0, 0); err != nil {
s.Log("Failed to add screenshot recorder: ", err)
}
metricsSuccessfullyAdded = true
return &vars, nil
}
func getTestWebpages() []webPage {
CNN := webPage{
name: "CNN",
startURL: "https://cnn.com",
urlPattern: `^.*://www.cnn.com/\d{4}/\d{2}/\d{2}/`,
}
Reddit := webPage{
name: "Reddit",
startURL: "https://reddit.com",
urlPattern: `^.*://www.reddit.com/r/[^/]+/comments/[^/]+/`,
}
return []webPage{CNN, Reddit}
}
func muteDevice(ctx context.Context, s *testing.State) error {
// The custom variable for the developer to mute the device before the test,
// so it doesn't make any noise when some of the visited pages play video.
if _, ok := s.Var("mute"); !ok {
return nil
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
topRow, err := input.KeyboardTopRowLayout(ctx, kw)
if err != nil {
return errors.Wrap(err, "failed to obtain the top-row layout")
}
if err = kw.Accel(ctx, topRow.VolumeMute); err != nil {
return errors.Wrap(err, "failed to press mute key")
}
return nil
}
// findAnchorURLs returns the unique URLs of the anchors, which matches the pattern.
// If it finds more than limit, returns the first limit elements.
func findAnchorURLs(ctx context.Context, c *chrome.Conn, pattern string, limit int) ([]string, error) {
var urls []string
if err := c.Call(ctx, &urls, `(pattern, limit) => {
const anchors = [...document.getElementsByTagName('A')];
const founds = new Set();
const results = [];
const regexp = new RegExp(pattern);
for (let i = 0; i < anchors.length && results.length < limit; i++) {
const href = new URL(anchors[i].href).toString();
if (founds.has(href)) {
continue;
}
founds.add(href);
if (regexp.test(href)) {
results.push(href);
}
}
return results;
}`, pattern, limit); err != nil {
return nil, err
}
if len(urls) == 0 {
return nil, errors.New("no urls found")
}
return urls, nil
}
func waitUntilAllTabsLoaded(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) error {
query := map[string]interface{}{
"status": "loading",
"currentWindow": true,
}
return testing.Poll(ctx, func(ctx context.Context) error {
var tabs []map[string]interface{}
if err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, query); err != nil {
return testing.PollBreak(err)
}
if len(tabs) != 0 {
return errors.Errorf("still %d tabs are loading", len(tabs))
}
return nil
}, &testing.PollOptions{Timeout: timeout})
}
func retrieveAllTabs(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) ([]map[string]interface{}, error) {
emptyQuery := map[string]interface{}{}
// Get all tabs
var tabs []map[string]interface{}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, emptyQuery)
return tabs, err
}
func focusTab(ctx context.Context, tconn *chrome.TestConn, tabs *[]map[string]interface{}, tabIndexWithinWindow int, timeout time.Duration) error {
// Define parameters for API calls
activateTabProperties := map[string]interface{}{
"active": true,
}
// Find id of tab with positional index.
tabID := int((*tabs)[tabIndexWithinWindow]["id"].(float64))
// Switch to this tab as the active window
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return tconn.Call(ctx, nil, `tast.promisify(chrome.tabs.update)`, tabID, activateTabProperties)
}
func testBody(ctx context.Context, test *tabSwitchVariables) error {
const (
numPages = 7
tabSwitchTimeout = 20 * time.Second
)
info, err := display.GetPrimaryInfo(ctx, test.tconn)
if err != nil {
return errors.Wrap(err, "failed to get the primary display info")
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
// Create a virtual mouse.
mw, err := input.Mouse(ctx)
if err != nil {
return errors.Wrap(err, "failed to create a mouse")
}
defer mw.Close()
ac := uiauto.New(test.tconn)
for _, data := range test.webPages {
conns := make([]*chrome.Conn, 0, numPages)
// Create the homepage of the site.
firstPage, err := test.br.NewConn(ctx, data.startURL)
if err != nil {
return errors.Wrapf(err, "failed to open %s", data.startURL)
}
conns = append(conns, firstPage)
if test.param.BrowserType == browser.TypeLacros {
if err := browser.CloseTabByTitle(ctx, test.bTconn, "New Tab"); err != nil {
return errors.Wrap(err, `failed to close "New Tab" tab`)
}
}
// Find extra urls to navigate to.
urls, err := findAnchorURLs(ctx, firstPage, data.urlPattern, numPages-1)
if err != nil {
return errors.Wrapf(err, "failed to get URLs for %s", data.startURL)
}
// Open those found URLs as new tabs.
for _, url := range urls {
newConnection, err := test.br.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open the URL %s", url)
}
conns = append(conns, newConnection)
}
// Ensure that all tabs are properly loaded before starting test.
if err := waitUntilAllTabsLoaded(ctx, test.bTconn, time.Minute); err != nil {
testing.ContextLog(ctx, "Some tabs are still in loading state, but proceeding with the test: ", err)
}
// Repeat the test as many times as necessary to fulfill its time requirements.
// e.g. If there are two windows that need to be tested sequentially, and the
// total core test duration is 10 mins, each window will be tested for 5 mins.
//
// Note: Test runs for coreTestDuration minutes.
if len(test.webPages) == 0 {
return errors.New("test scenario does not specify any web pages")
}
testing.ContextLog(ctx, "Start switching tabs")
// Switch through tabs in a skip-order fashion.
// Note: when skipSize = N-1, then the skip-order is 1,1,1,1 ... N times
// Therefore i + skipSize + 1 % N holds when 0 <= skipSize < N-1
skipSize := 0
i := 0
currentTab := 0
endTime := time.Now().Add(coreTestDuration/time.Duration(len(test.webPages)) + time.Second)
for time.Now().Before(endTime) {
tabToClick := nodewith.HasClass("TabIcon").Nth(currentTab)
if err := action.Combine(
"click on tab and move mouse back to the center of the display",
ac.MouseMoveTo(tabToClick, 500*time.Millisecond),
ac.LeftClick(tabToClick),
mouse.Move(test.tconn, info.Bounds.CenterPoint(), 500*time.Millisecond),
)(ctx); err != nil {
return err
}
if err := webutil.WaitForQuiescence(ctx, conns[currentTab], tabSwitchTimeout); err != nil {
return errors.Wrap(err, "failed to wait for the tab to quiesce")
}
for _, key := range []string{"Down", "Up"} {
if err := sim.RepeatKeyPress(ctx, kw, key, 200*time.Millisecond, 3); err != nil {
return errors.Wrapf(err, "failed to repeatedly press %s in between tab switches", key)
}
}
for _, scrollDown := range []bool{true, false} {
if err := sim.RepeatMouseScroll(ctx, mw, scrollDown, 50*time.Millisecond, 20); err != nil {
return errors.Wrap(err, "failed to scroll in between tab switches")
}
}
if err := ac.WithInterval(time.Second).WithTimeout(5*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil {
testing.ContextLog(ctx, "Scroll animations haven't stabilized yet, continuing anyway: ", err)
}
if err := sim.RunDragMouseCycle(ctx, test.tconn, info); err != nil {
return errors.Wrap(err, "failed to run the mouse drag cycle")
}
currentTab = (currentTab + skipSize + 1) % len(conns)
// Once we have seen every tab, adjust the skipSize to
// vary the tab visitation order.
if i == len(conns)-1 {
i = 0
currentTab = 0
skipSize = (skipSize + 1) % len(conns)
} else {
i++
}
}
// Take a screenshot to see the status of the CNN/Reddit
// window before closing it.
test.recorder.CustomScreenshot(ctx)
switch test.param.BrowserType {
case browser.TypeLacros:
if err := browser.ReplaceAllTabsWithSingleNewTab(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs and leave a single new tab open")
}
case browser.TypeAsh:
if err := browser.CloseAllTabs(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs")
}
default:
return errors.Errorf("unsupported browser type %v", test.param.BrowserType)
}
}
return nil
}
// Run runs the setup, core part of the TabSwitchCUJ test, and cleanup.
func Run(ctx context.Context, s *testing.State) {
// Reserve time for cleanup
closeCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)
defer cancel()
// Perform initial test setup
setupVars, err := runSetup(ctx, s)
if err != nil {
s.Fatal("Failed to run setup: ", err)
}
defer setupVars.closeBrowser(closeCtx)
defer setupVars.recorder.Close(closeCtx)
if err := muteDevice(ctx, s); err != nil {
s.Log("(non-error) Failed to mute device: ", err)
}
// Execute Test
if err := setupVars.recorder.Run(ctx, func(ctx context.Context) error {
return testBody(ctx, setupVars)
}); err != nil {
s.Fatal("Failed to conduct the test scenario, or collect the histogram data: ", err)
}
// Write out values
pv := perf.NewValues()
if err := setupVars.recorder.Record(ctx, pv); err != nil {
s.Fatal("Failed to report: ", err)
}
if err := pv.Save(s.OutDir()); err != nil {
s.Error("Failed to store values: ", err)
}
}
| {
return nil, errors.Wrap(err, "failed to open the browser")
} | conditional_block |
tab_switch_cuj.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package tabswitchcuj contains the test code for TabSwitchCUJ. The test is
// extracted into this package to be shared between TabSwitchCUJRecorder and
// TabSwitchCUJ.
//
// Steps to update the test:
// 1. Make changes in this package.
// 2. "tast run $IP ui.TabSwitchCUJRecorder" to record the contents.
// Look for the recorded wpr archive in /tmp/tab_switch_cuj.wprgo.
// 3. Update the recorded wpr archive to cloud storage under
// gs://chromiumos-test-assets-public/tast/cros/ui/
// It is recommended to add a date suffix to make it easier to change.
// 4. Update "tab_switch_cuj.wprgo.external" file under ui/data.
// 5. "tast run $IP ui.TabSwitchCUJ" locally to make sure tests works
// with the new recorded contents.
// 6. Submit the changes here with updated external data reference.
package tabswitchcuj
import (
"context"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
sim "chromiumos/tast/local/chrome/cuj/inputsimulations"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/ui/cujrecorder"
"chromiumos/tast/testing"
)
const (
// WPRArchiveName is used as the external file name of the wpr archive for
// TabSwitchCuj and as the output filename under "/tmp" for
// TabSwitchCujRecorder.
WPRArchiveName = "tab_switch_cuj.wprgo"
)
// TabSwitchParam holds parameters of tab switch cuj test variations.
type TabSwitchParam struct {
BrowserType browser.Type // Chrome type.
}
// tabSwitchVariables holds all the necessary variables used by the test.
type tabSwitchVariables struct {
param TabSwitchParam // Test Parameters
webPages []webPage // List of sites to visit
cr *chrome.Chrome
br *browser.Browser
closeBrowser func(context.Context) error
tconn *chrome.TestConn
bTconn *chrome.TestConn
recorder *cujrecorder.Recorder
}
// webPage holds the info used to visit new sites in the test.
type webPage struct {
name string // Display Name of the Website
startURL string // Base URL to the Website
urlPattern string // RegExp Pattern to Open Relevant Links on the Website
}
// coreTestDuration is a minimum duration for the core part of the test.
// The actual test duration could be longer because of various setup.
const coreTestDuration = 10 * time.Minute
func runSetup(ctx context.Context, s *testing.State) (*tabSwitchVariables, error) {
vars := tabSwitchVariables{
param: s.Param().(TabSwitchParam),
webPages: getTestWebpages(),
cr: s.FixtValue().(chrome.HasChrome).Chrome(),
}
var err error
vars.br, vars.closeBrowser, err = browserfixt.SetUp(ctx, vars.cr, vars.param.BrowserType)
if err != nil {
return nil, errors.Wrap(err, "failed to open the browser")
}
vars.bTconn, err = vars.br.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get browser TestAPIConn")
}
vars.recorder, err = cujrecorder.NewRecorder(ctx, vars.cr, vars.bTconn, nil, cujrecorder.RecorderOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to create a recorder")
}
metricsSuccessfullyAdded := false
defer func(ctx context.Context) {
if metricsSuccessfullyAdded {
return
}
vars.closeBrowser(ctx)
vars.recorder.Close(ctx)
}(ctx)
vars.tconn, err = vars.cr.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get ash-chrome test connection")
}
if err := vars.recorder.AddCommonMetrics(vars.tconn, vars.bTconn); err != nil {
s.Fatal("Failed to add common metrics to the recorder: ", err)
}
vars.recorder.EnableTracing(s.OutDir(), s.DataPath(cujrecorder.SystemTraceConfigFile))
if _, ok := s.Var("record"); ok {
if err := vars.recorder.AddScreenRecorder(ctx, vars.tconn, s.TestName()); err != nil {
s.Fatal("Failed to add screen recorder: ", err)
}
}
// Add an empty screenshot recorder.
if err := vars.recorder.AddScreenshotRecorder(ctx, 0, 0); err != nil {
s.Log("Failed to add screenshot recorder: ", err)
}
metricsSuccessfullyAdded = true
return &vars, nil
}
func getTestWebpages() []webPage {
CNN := webPage{
name: "CNN",
startURL: "https://cnn.com",
urlPattern: `^.*://www.cnn.com/\d{4}/\d{2}/\d{2}/`,
}
Reddit := webPage{
name: "Reddit",
startURL: "https://reddit.com",
urlPattern: `^.*://www.reddit.com/r/[^/]+/comments/[^/]+/`,
}
return []webPage{CNN, Reddit}
}
func muteDevice(ctx context.Context, s *testing.State) error {
// The custom variable for the developer to mute the device before the test,
// so it doesn't make any noise when some of the visited pages play video.
if _, ok := s.Var("mute"); !ok {
return nil
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
topRow, err := input.KeyboardTopRowLayout(ctx, kw)
if err != nil {
return errors.Wrap(err, "failed to obtain the top-row layout")
}
if err = kw.Accel(ctx, topRow.VolumeMute); err != nil {
return errors.Wrap(err, "failed to press mute key")
}
return nil
}
// findAnchorURLs returns the unique URLs of the anchors, which matches the pattern.
// If it finds more than limit, returns the first limit elements.
func findAnchorURLs(ctx context.Context, c *chrome.Conn, pattern string, limit int) ([]string, error) {
var urls []string
if err := c.Call(ctx, &urls, `(pattern, limit) => {
const anchors = [...document.getElementsByTagName('A')];
const founds = new Set();
const results = [];
const regexp = new RegExp(pattern);
for (let i = 0; i < anchors.length && results.length < limit; i++) {
const href = new URL(anchors[i].href).toString();
if (founds.has(href)) {
continue;
}
founds.add(href);
if (regexp.test(href)) {
results.push(href);
}
}
return results;
}`, pattern, limit); err != nil {
return nil, err
}
if len(urls) == 0 {
return nil, errors.New("no urls found")
}
return urls, nil
}
func waitUntilAllTabsLoaded(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) error {
query := map[string]interface{}{
"status": "loading",
"currentWindow": true,
}
return testing.Poll(ctx, func(ctx context.Context) error {
var tabs []map[string]interface{}
if err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, query); err != nil {
return testing.PollBreak(err)
}
if len(tabs) != 0 {
return errors.Errorf("still %d tabs are loading", len(tabs))
}
return nil
}, &testing.PollOptions{Timeout: timeout})
}
func retrieveAllTabs(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) ([]map[string]interface{}, error) {
emptyQuery := map[string]interface{}{}
// Get all tabs
var tabs []map[string]interface{}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, emptyQuery)
return tabs, err
}
func | (ctx context.Context, tconn *chrome.TestConn, tabs *[]map[string]interface{}, tabIndexWithinWindow int, timeout time.Duration) error {
// Define parameters for API calls
activateTabProperties := map[string]interface{}{
"active": true,
}
// Find id of tab with positional index.
tabID := int((*tabs)[tabIndexWithinWindow]["id"].(float64))
// Switch to this tab as the active window
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return tconn.Call(ctx, nil, `tast.promisify(chrome.tabs.update)`, tabID, activateTabProperties)
}
func testBody(ctx context.Context, test *tabSwitchVariables) error {
const (
numPages = 7
tabSwitchTimeout = 20 * time.Second
)
info, err := display.GetPrimaryInfo(ctx, test.tconn)
if err != nil {
return errors.Wrap(err, "failed to get the primary display info")
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
// Create a virtual mouse.
mw, err := input.Mouse(ctx)
if err != nil {
return errors.Wrap(err, "failed to create a mouse")
}
defer mw.Close()
ac := uiauto.New(test.tconn)
for _, data := range test.webPages {
conns := make([]*chrome.Conn, 0, numPages)
// Create the homepage of the site.
firstPage, err := test.br.NewConn(ctx, data.startURL)
if err != nil {
return errors.Wrapf(err, "failed to open %s", data.startURL)
}
conns = append(conns, firstPage)
if test.param.BrowserType == browser.TypeLacros {
if err := browser.CloseTabByTitle(ctx, test.bTconn, "New Tab"); err != nil {
return errors.Wrap(err, `failed to close "New Tab" tab`)
}
}
// Find extra urls to navigate to.
urls, err := findAnchorURLs(ctx, firstPage, data.urlPattern, numPages-1)
if err != nil {
return errors.Wrapf(err, "failed to get URLs for %s", data.startURL)
}
// Open those found URLs as new tabs.
for _, url := range urls {
newConnection, err := test.br.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open the URL %s", url)
}
conns = append(conns, newConnection)
}
// Ensure that all tabs are properly loaded before starting test.
if err := waitUntilAllTabsLoaded(ctx, test.bTconn, time.Minute); err != nil {
testing.ContextLog(ctx, "Some tabs are still in loading state, but proceeding with the test: ", err)
}
// Repeat the test as many times as necessary to fulfill its time requirements.
// e.g. If there are two windows that need to be tested sequentially, and the
// total core test duration is 10 mins, each window will be tested for 5 mins.
//
// Note: Test runs for coreTestDuration minutes.
if len(test.webPages) == 0 {
return errors.New("test scenario does not specify any web pages")
}
testing.ContextLog(ctx, "Start switching tabs")
// Switch through tabs in a skip-order fashion.
// Note: when skipSize = N-1, then the skip-order is 1,1,1,1 ... N times
// Therefore i + skipSize + 1 % N holds when 0 <= skipSize < N-1
skipSize := 0
i := 0
currentTab := 0
endTime := time.Now().Add(coreTestDuration/time.Duration(len(test.webPages)) + time.Second)
for time.Now().Before(endTime) {
tabToClick := nodewith.HasClass("TabIcon").Nth(currentTab)
if err := action.Combine(
"click on tab and move mouse back to the center of the display",
ac.MouseMoveTo(tabToClick, 500*time.Millisecond),
ac.LeftClick(tabToClick),
mouse.Move(test.tconn, info.Bounds.CenterPoint(), 500*time.Millisecond),
)(ctx); err != nil {
return err
}
if err := webutil.WaitForQuiescence(ctx, conns[currentTab], tabSwitchTimeout); err != nil {
return errors.Wrap(err, "failed to wait for the tab to quiesce")
}
for _, key := range []string{"Down", "Up"} {
if err := sim.RepeatKeyPress(ctx, kw, key, 200*time.Millisecond, 3); err != nil {
return errors.Wrapf(err, "failed to repeatedly press %s in between tab switches", key)
}
}
for _, scrollDown := range []bool{true, false} {
if err := sim.RepeatMouseScroll(ctx, mw, scrollDown, 50*time.Millisecond, 20); err != nil {
return errors.Wrap(err, "failed to scroll in between tab switches")
}
}
if err := ac.WithInterval(time.Second).WithTimeout(5*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil {
testing.ContextLog(ctx, "Scroll animations haven't stabilized yet, continuing anyway: ", err)
}
if err := sim.RunDragMouseCycle(ctx, test.tconn, info); err != nil {
return errors.Wrap(err, "failed to run the mouse drag cycle")
}
currentTab = (currentTab + skipSize + 1) % len(conns)
// Once we have seen every tab, adjust the skipSize to
// vary the tab visitation order.
if i == len(conns)-1 {
i = 0
currentTab = 0
skipSize = (skipSize + 1) % len(conns)
} else {
i++
}
}
// Take a screenshot to see the status of the CNN/Reddit
// window before closing it.
test.recorder.CustomScreenshot(ctx)
switch test.param.BrowserType {
case browser.TypeLacros:
if err := browser.ReplaceAllTabsWithSingleNewTab(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs and leave a single new tab open")
}
case browser.TypeAsh:
if err := browser.CloseAllTabs(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs")
}
default:
return errors.Errorf("unsupported browser type %v", test.param.BrowserType)
}
}
return nil
}
// Run runs the setup, core part of the TabSwitchCUJ test, and cleanup.
func Run(ctx context.Context, s *testing.State) {
// Reserve time for cleanup
closeCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)
defer cancel()
// Perform initial test setup
setupVars, err := runSetup(ctx, s)
if err != nil {
s.Fatal("Failed to run setup: ", err)
}
defer setupVars.closeBrowser(closeCtx)
defer setupVars.recorder.Close(closeCtx)
if err := muteDevice(ctx, s); err != nil {
s.Log("(non-error) Failed to mute device: ", err)
}
// Execute Test
if err := setupVars.recorder.Run(ctx, func(ctx context.Context) error {
return testBody(ctx, setupVars)
}); err != nil {
s.Fatal("Failed to conduct the test scenario, or collect the histogram data: ", err)
}
// Write out values
pv := perf.NewValues()
if err := setupVars.recorder.Record(ctx, pv); err != nil {
s.Fatal("Failed to report: ", err)
}
if err := pv.Save(s.OutDir()); err != nil {
s.Error("Failed to store values: ", err)
}
}
| focusTab | identifier_name |
tab_switch_cuj.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package tabswitchcuj contains the test code for TabSwitchCUJ. The test is
// extracted into this package to be shared between TabSwitchCUJRecorder and
// TabSwitchCUJ.
//
// Steps to update the test:
// 1. Make changes in this package.
// 2. "tast run $IP ui.TabSwitchCUJRecorder" to record the contents.
// Look for the recorded wpr archive in /tmp/tab_switch_cuj.wprgo.
// 3. Update the recorded wpr archive to cloud storage under
// gs://chromiumos-test-assets-public/tast/cros/ui/
// It is recommended to add a date suffix to make it easier to change.
// 4. Update "tab_switch_cuj.wprgo.external" file under ui/data.
// 5. "tast run $IP ui.TabSwitchCUJ" locally to make sure tests works
// with the new recorded contents.
// 6. Submit the changes here with updated external data reference.
package tabswitchcuj
import (
"context"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
sim "chromiumos/tast/local/chrome/cuj/inputsimulations"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/ui/cujrecorder"
"chromiumos/tast/testing"
)
const (
// WPRArchiveName is used as the external file name of the wpr archive for
// TabSwitchCuj and as the output filename under "/tmp" for
// TabSwitchCujRecorder.
WPRArchiveName = "tab_switch_cuj.wprgo"
)
// TabSwitchParam holds parameters of tab switch cuj test variations.
type TabSwitchParam struct {
BrowserType browser.Type // Chrome type.
}
// tabSwitchVariables holds all the necessary variables used by the test.
type tabSwitchVariables struct {
param TabSwitchParam // Test Parameters
webPages []webPage // List of sites to visit
cr *chrome.Chrome
br *browser.Browser
closeBrowser func(context.Context) error
tconn *chrome.TestConn
bTconn *chrome.TestConn
recorder *cujrecorder.Recorder
}
// webPage holds the info used to visit new sites in the test.
type webPage struct {
name string // Display Name of the Website
startURL string // Base URL to the Website
urlPattern string // RegExp Pattern to Open Relevant Links on the Website
}
// coreTestDuration is a minimum duration for the core part of the test.
// The actual test duration could be longer because of various setup.
const coreTestDuration = 10 * time.Minute
func runSetup(ctx context.Context, s *testing.State) (*tabSwitchVariables, error) {
vars := tabSwitchVariables{
param: s.Param().(TabSwitchParam),
webPages: getTestWebpages(),
cr: s.FixtValue().(chrome.HasChrome).Chrome(),
}
var err error
vars.br, vars.closeBrowser, err = browserfixt.SetUp(ctx, vars.cr, vars.param.BrowserType)
if err != nil {
return nil, errors.Wrap(err, "failed to open the browser")
}
vars.bTconn, err = vars.br.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get browser TestAPIConn")
}
vars.recorder, err = cujrecorder.NewRecorder(ctx, vars.cr, vars.bTconn, nil, cujrecorder.RecorderOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to create a recorder")
}
metricsSuccessfullyAdded := false
defer func(ctx context.Context) {
if metricsSuccessfullyAdded {
return
}
vars.closeBrowser(ctx)
vars.recorder.Close(ctx)
}(ctx)
vars.tconn, err = vars.cr.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get ash-chrome test connection")
}
if err := vars.recorder.AddCommonMetrics(vars.tconn, vars.bTconn); err != nil {
s.Fatal("Failed to add common metrics to the recorder: ", err)
}
vars.recorder.EnableTracing(s.OutDir(), s.DataPath(cujrecorder.SystemTraceConfigFile))
if _, ok := s.Var("record"); ok {
if err := vars.recorder.AddScreenRecorder(ctx, vars.tconn, s.TestName()); err != nil {
s.Fatal("Failed to add screen recorder: ", err)
}
}
// Add an empty screenshot recorder.
if err := vars.recorder.AddScreenshotRecorder(ctx, 0, 0); err != nil {
s.Log("Failed to add screenshot recorder: ", err)
}
metricsSuccessfullyAdded = true
return &vars, nil
}
func getTestWebpages() []webPage {
CNN := webPage{
name: "CNN",
startURL: "https://cnn.com",
urlPattern: `^.*://www.cnn.com/\d{4}/\d{2}/\d{2}/`,
}
Reddit := webPage{
name: "Reddit",
startURL: "https://reddit.com",
urlPattern: `^.*://www.reddit.com/r/[^/]+/comments/[^/]+/`,
}
return []webPage{CNN, Reddit}
}
func muteDevice(ctx context.Context, s *testing.State) error {
// The custom variable for the developer to mute the device before the test,
// so it doesn't make any noise when some of the visited pages play video.
if _, ok := s.Var("mute"); !ok {
return nil
}
kw, err := input.Keyboard(ctx)
if err != nil { |
topRow, err := input.KeyboardTopRowLayout(ctx, kw)
if err != nil {
return errors.Wrap(err, "failed to obtain the top-row layout")
}
if err = kw.Accel(ctx, topRow.VolumeMute); err != nil {
return errors.Wrap(err, "failed to press mute key")
}
return nil
}
// findAnchorURLs returns the unique URLs of the anchors, which matches the pattern.
// If it finds more than limit, returns the first limit elements.
func findAnchorURLs(ctx context.Context, c *chrome.Conn, pattern string, limit int) ([]string, error) {
var urls []string
if err := c.Call(ctx, &urls, `(pattern, limit) => {
const anchors = [...document.getElementsByTagName('A')];
const founds = new Set();
const results = [];
const regexp = new RegExp(pattern);
for (let i = 0; i < anchors.length && results.length < limit; i++) {
const href = new URL(anchors[i].href).toString();
if (founds.has(href)) {
continue;
}
founds.add(href);
if (regexp.test(href)) {
results.push(href);
}
}
return results;
}`, pattern, limit); err != nil {
return nil, err
}
if len(urls) == 0 {
return nil, errors.New("no urls found")
}
return urls, nil
}
func waitUntilAllTabsLoaded(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) error {
query := map[string]interface{}{
"status": "loading",
"currentWindow": true,
}
return testing.Poll(ctx, func(ctx context.Context) error {
var tabs []map[string]interface{}
if err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, query); err != nil {
return testing.PollBreak(err)
}
if len(tabs) != 0 {
return errors.Errorf("still %d tabs are loading", len(tabs))
}
return nil
}, &testing.PollOptions{Timeout: timeout})
}
func retrieveAllTabs(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) ([]map[string]interface{}, error) {
emptyQuery := map[string]interface{}{}
// Get all tabs
var tabs []map[string]interface{}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, emptyQuery)
return tabs, err
}
func focusTab(ctx context.Context, tconn *chrome.TestConn, tabs *[]map[string]interface{}, tabIndexWithinWindow int, timeout time.Duration) error {
// Define parameters for API calls
activateTabProperties := map[string]interface{}{
"active": true,
}
// Find id of tab with positional index.
tabID := int((*tabs)[tabIndexWithinWindow]["id"].(float64))
// Switch to this tab as the active window
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return tconn.Call(ctx, nil, `tast.promisify(chrome.tabs.update)`, tabID, activateTabProperties)
}
func testBody(ctx context.Context, test *tabSwitchVariables) error {
const (
numPages = 7
tabSwitchTimeout = 20 * time.Second
)
info, err := display.GetPrimaryInfo(ctx, test.tconn)
if err != nil {
return errors.Wrap(err, "failed to get the primary display info")
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
// Create a virtual mouse.
mw, err := input.Mouse(ctx)
if err != nil {
return errors.Wrap(err, "failed to create a mouse")
}
defer mw.Close()
ac := uiauto.New(test.tconn)
for _, data := range test.webPages {
conns := make([]*chrome.Conn, 0, numPages)
// Create the homepage of the site.
firstPage, err := test.br.NewConn(ctx, data.startURL)
if err != nil {
return errors.Wrapf(err, "failed to open %s", data.startURL)
}
conns = append(conns, firstPage)
if test.param.BrowserType == browser.TypeLacros {
if err := browser.CloseTabByTitle(ctx, test.bTconn, "New Tab"); err != nil {
return errors.Wrap(err, `failed to close "New Tab" tab`)
}
}
// Find extra urls to navigate to.
urls, err := findAnchorURLs(ctx, firstPage, data.urlPattern, numPages-1)
if err != nil {
return errors.Wrapf(err, "failed to get URLs for %s", data.startURL)
}
// Open those found URLs as new tabs.
for _, url := range urls {
newConnection, err := test.br.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open the URL %s", url)
}
conns = append(conns, newConnection)
}
// Ensure that all tabs are properly loaded before starting test.
if err := waitUntilAllTabsLoaded(ctx, test.bTconn, time.Minute); err != nil {
testing.ContextLog(ctx, "Some tabs are still in loading state, but proceeding with the test: ", err)
}
// Repeat the test as many times as necessary to fulfill its time requirements.
// e.g. If there are two windows that need to be tested sequentially, and the
// total core test duration is 10 mins, each window will be tested for 5 mins.
//
// Note: Test runs for coreTestDuration minutes.
if len(test.webPages) == 0 {
return errors.New("test scenario does not specify any web pages")
}
testing.ContextLog(ctx, "Start switching tabs")
// Switch through tabs in a skip-order fashion.
// Note: when skipSize = N-1, then the skip-order is 1,1,1,1 ... N times
// Therefore i + skipSize + 1 % N holds when 0 <= skipSize < N-1
skipSize := 0
i := 0
currentTab := 0
endTime := time.Now().Add(coreTestDuration/time.Duration(len(test.webPages)) + time.Second)
for time.Now().Before(endTime) {
tabToClick := nodewith.HasClass("TabIcon").Nth(currentTab)
if err := action.Combine(
"click on tab and move mouse back to the center of the display",
ac.MouseMoveTo(tabToClick, 500*time.Millisecond),
ac.LeftClick(tabToClick),
mouse.Move(test.tconn, info.Bounds.CenterPoint(), 500*time.Millisecond),
)(ctx); err != nil {
return err
}
if err := webutil.WaitForQuiescence(ctx, conns[currentTab], tabSwitchTimeout); err != nil {
return errors.Wrap(err, "failed to wait for the tab to quiesce")
}
for _, key := range []string{"Down", "Up"} {
if err := sim.RepeatKeyPress(ctx, kw, key, 200*time.Millisecond, 3); err != nil {
return errors.Wrapf(err, "failed to repeatedly press %s in between tab switches", key)
}
}
for _, scrollDown := range []bool{true, false} {
if err := sim.RepeatMouseScroll(ctx, mw, scrollDown, 50*time.Millisecond, 20); err != nil {
return errors.Wrap(err, "failed to scroll in between tab switches")
}
}
if err := ac.WithInterval(time.Second).WithTimeout(5*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil {
testing.ContextLog(ctx, "Scroll animations haven't stabilized yet, continuing anyway: ", err)
}
if err := sim.RunDragMouseCycle(ctx, test.tconn, info); err != nil {
return errors.Wrap(err, "failed to run the mouse drag cycle")
}
currentTab = (currentTab + skipSize + 1) % len(conns)
// Once we have seen every tab, adjust the skipSize to
// vary the tab visitation order.
if i == len(conns)-1 {
i = 0
currentTab = 0
skipSize = (skipSize + 1) % len(conns)
} else {
i++
}
}
// Take a screenshot to see the status of the CNN/Reddit
// window before closing it.
test.recorder.CustomScreenshot(ctx)
switch test.param.BrowserType {
case browser.TypeLacros:
if err := browser.ReplaceAllTabsWithSingleNewTab(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs and leave a single new tab open")
}
case browser.TypeAsh:
if err := browser.CloseAllTabs(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs")
}
default:
return errors.Errorf("unsupported browser type %v", test.param.BrowserType)
}
}
return nil
}
// Run runs the setup, core part of the TabSwitchCUJ test, and cleanup.
func Run(ctx context.Context, s *testing.State) {
// Reserve time for cleanup
closeCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)
defer cancel()
// Perform initial test setup
setupVars, err := runSetup(ctx, s)
if err != nil {
s.Fatal("Failed to run setup: ", err)
}
defer setupVars.closeBrowser(closeCtx)
defer setupVars.recorder.Close(closeCtx)
if err := muteDevice(ctx, s); err != nil {
s.Log("(non-error) Failed to mute device: ", err)
}
// Execute Test
if err := setupVars.recorder.Run(ctx, func(ctx context.Context) error {
return testBody(ctx, setupVars)
}); err != nil {
s.Fatal("Failed to conduct the test scenario, or collect the histogram data: ", err)
}
// Write out values
pv := perf.NewValues()
if err := setupVars.recorder.Record(ctx, pv); err != nil {
s.Fatal("Failed to report: ", err)
}
if err := pv.Save(s.OutDir()); err != nil {
s.Error("Failed to store values: ", err)
}
} | return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close() | random_line_split |
tab_switch_cuj.go | // Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package tabswitchcuj contains the test code for TabSwitchCUJ. The test is
// extracted into this package to be shared between TabSwitchCUJRecorder and
// TabSwitchCUJ.
//
// Steps to update the test:
// 1. Make changes in this package.
// 2. "tast run $IP ui.TabSwitchCUJRecorder" to record the contents.
// Look for the recorded wpr archive in /tmp/tab_switch_cuj.wprgo.
// 3. Update the recorded wpr archive to cloud storage under
// gs://chromiumos-test-assets-public/tast/cros/ui/
// It is recommended to add a date suffix to make it easier to change.
// 4. Update "tab_switch_cuj.wprgo.external" file under ui/data.
// 5. "tast run $IP ui.TabSwitchCUJ" locally to make sure tests works
// with the new recorded contents.
// 6. Submit the changes here with updated external data reference.
package tabswitchcuj
import (
"context"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
sim "chromiumos/tast/local/chrome/cuj/inputsimulations"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/event"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/ui/cujrecorder"
"chromiumos/tast/testing"
)
const (
// WPRArchiveName is used as the external file name of the wpr archive for
// TabSwitchCuj and as the output filename under "/tmp" for
// TabSwitchCujRecorder.
WPRArchiveName = "tab_switch_cuj.wprgo"
)
// TabSwitchParam holds parameters of tab switch cuj test variations.
type TabSwitchParam struct {
BrowserType browser.Type // Chrome type.
}
// tabSwitchVariables holds all the necessary variables used by the test.
type tabSwitchVariables struct {
param TabSwitchParam // Test Parameters
webPages []webPage // List of sites to visit
cr *chrome.Chrome
br *browser.Browser
closeBrowser func(context.Context) error
tconn *chrome.TestConn
bTconn *chrome.TestConn
recorder *cujrecorder.Recorder
}
// webPage holds the info used to visit new sites in the test.
type webPage struct {
name string // Display Name of the Website
startURL string // Base URL to the Website
urlPattern string // RegExp Pattern to Open Relevant Links on the Website
}
// coreTestDuration is a minimum duration for the core part of the test.
// The actual test duration could be longer because of various setup.
const coreTestDuration = 10 * time.Minute
func runSetup(ctx context.Context, s *testing.State) (*tabSwitchVariables, error) {
vars := tabSwitchVariables{
param: s.Param().(TabSwitchParam),
webPages: getTestWebpages(),
cr: s.FixtValue().(chrome.HasChrome).Chrome(),
}
var err error
vars.br, vars.closeBrowser, err = browserfixt.SetUp(ctx, vars.cr, vars.param.BrowserType)
if err != nil {
return nil, errors.Wrap(err, "failed to open the browser")
}
vars.bTconn, err = vars.br.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get browser TestAPIConn")
}
vars.recorder, err = cujrecorder.NewRecorder(ctx, vars.cr, vars.bTconn, nil, cujrecorder.RecorderOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to create a recorder")
}
metricsSuccessfullyAdded := false
defer func(ctx context.Context) {
if metricsSuccessfullyAdded {
return
}
vars.closeBrowser(ctx)
vars.recorder.Close(ctx)
}(ctx)
vars.tconn, err = vars.cr.TestAPIConn(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get ash-chrome test connection")
}
if err := vars.recorder.AddCommonMetrics(vars.tconn, vars.bTconn); err != nil {
s.Fatal("Failed to add common metrics to the recorder: ", err)
}
vars.recorder.EnableTracing(s.OutDir(), s.DataPath(cujrecorder.SystemTraceConfigFile))
if _, ok := s.Var("record"); ok {
if err := vars.recorder.AddScreenRecorder(ctx, vars.tconn, s.TestName()); err != nil {
s.Fatal("Failed to add screen recorder: ", err)
}
}
// Add an empty screenshot recorder.
if err := vars.recorder.AddScreenshotRecorder(ctx, 0, 0); err != nil {
s.Log("Failed to add screenshot recorder: ", err)
}
metricsSuccessfullyAdded = true
return &vars, nil
}
func getTestWebpages() []webPage {
CNN := webPage{
name: "CNN",
startURL: "https://cnn.com",
urlPattern: `^.*://www.cnn.com/\d{4}/\d{2}/\d{2}/`,
}
Reddit := webPage{
name: "Reddit",
startURL: "https://reddit.com",
urlPattern: `^.*://www.reddit.com/r/[^/]+/comments/[^/]+/`,
}
return []webPage{CNN, Reddit}
}
func muteDevice(ctx context.Context, s *testing.State) error {
// The custom variable for the developer to mute the device before the test,
// so it doesn't make any noise when some of the visited pages play video.
if _, ok := s.Var("mute"); !ok {
return nil
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
topRow, err := input.KeyboardTopRowLayout(ctx, kw)
if err != nil {
return errors.Wrap(err, "failed to obtain the top-row layout")
}
if err = kw.Accel(ctx, topRow.VolumeMute); err != nil {
return errors.Wrap(err, "failed to press mute key")
}
return nil
}
// findAnchorURLs returns the unique URLs of the anchors, which matches the pattern.
// If it finds more than limit, returns the first limit elements.
func findAnchorURLs(ctx context.Context, c *chrome.Conn, pattern string, limit int) ([]string, error) {
var urls []string
if err := c.Call(ctx, &urls, `(pattern, limit) => {
const anchors = [...document.getElementsByTagName('A')];
const founds = new Set();
const results = [];
const regexp = new RegExp(pattern);
for (let i = 0; i < anchors.length && results.length < limit; i++) {
const href = new URL(anchors[i].href).toString();
if (founds.has(href)) {
continue;
}
founds.add(href);
if (regexp.test(href)) {
results.push(href);
}
}
return results;
}`, pattern, limit); err != nil {
return nil, err
}
if len(urls) == 0 {
return nil, errors.New("no urls found")
}
return urls, nil
}
func waitUntilAllTabsLoaded(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) error |
func retrieveAllTabs(ctx context.Context, tconn *chrome.TestConn, timeout time.Duration) ([]map[string]interface{}, error) {
emptyQuery := map[string]interface{}{}
// Get all tabs
var tabs []map[string]interface{}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, emptyQuery)
return tabs, err
}
func focusTab(ctx context.Context, tconn *chrome.TestConn, tabs *[]map[string]interface{}, tabIndexWithinWindow int, timeout time.Duration) error {
// Define parameters for API calls
activateTabProperties := map[string]interface{}{
"active": true,
}
// Find id of tab with positional index.
tabID := int((*tabs)[tabIndexWithinWindow]["id"].(float64))
// Switch to this tab as the active window
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return tconn.Call(ctx, nil, `tast.promisify(chrome.tabs.update)`, tabID, activateTabProperties)
}
func testBody(ctx context.Context, test *tabSwitchVariables) error {
const (
numPages = 7
tabSwitchTimeout = 20 * time.Second
)
info, err := display.GetPrimaryInfo(ctx, test.tconn)
if err != nil {
return errors.Wrap(err, "failed to get the primary display info")
}
kw, err := input.Keyboard(ctx)
if err != nil {
return errors.Wrap(err, "failed to open the keyboard")
}
defer kw.Close()
// Create a virtual mouse.
mw, err := input.Mouse(ctx)
if err != nil {
return errors.Wrap(err, "failed to create a mouse")
}
defer mw.Close()
ac := uiauto.New(test.tconn)
for _, data := range test.webPages {
conns := make([]*chrome.Conn, 0, numPages)
// Create the homepage of the site.
firstPage, err := test.br.NewConn(ctx, data.startURL)
if err != nil {
return errors.Wrapf(err, "failed to open %s", data.startURL)
}
conns = append(conns, firstPage)
if test.param.BrowserType == browser.TypeLacros {
if err := browser.CloseTabByTitle(ctx, test.bTconn, "New Tab"); err != nil {
return errors.Wrap(err, `failed to close "New Tab" tab`)
}
}
// Find extra urls to navigate to.
urls, err := findAnchorURLs(ctx, firstPage, data.urlPattern, numPages-1)
if err != nil {
return errors.Wrapf(err, "failed to get URLs for %s", data.startURL)
}
// Open those found URLs as new tabs.
for _, url := range urls {
newConnection, err := test.br.NewConn(ctx, url)
if err != nil {
return errors.Wrapf(err, "failed to open the URL %s", url)
}
conns = append(conns, newConnection)
}
// Ensure that all tabs are properly loaded before starting test.
if err := waitUntilAllTabsLoaded(ctx, test.bTconn, time.Minute); err != nil {
testing.ContextLog(ctx, "Some tabs are still in loading state, but proceeding with the test: ", err)
}
// Repeat the test as many times as necessary to fulfill its time requirements.
// e.g. If there are two windows that need to be tested sequentially, and the
// total core test duration is 10 mins, each window will be tested for 5 mins.
//
// Note: Test runs for coreTestDuration minutes.
if len(test.webPages) == 0 {
return errors.New("test scenario does not specify any web pages")
}
testing.ContextLog(ctx, "Start switching tabs")
// Switch through tabs in a skip-order fashion.
// Note: when skipSize = N-1, then the skip-order is 1,1,1,1 ... N times
// Therefore i + skipSize + 1 % N holds when 0 <= skipSize < N-1
skipSize := 0
i := 0
currentTab := 0
endTime := time.Now().Add(coreTestDuration/time.Duration(len(test.webPages)) + time.Second)
for time.Now().Before(endTime) {
tabToClick := nodewith.HasClass("TabIcon").Nth(currentTab)
if err := action.Combine(
"click on tab and move mouse back to the center of the display",
ac.MouseMoveTo(tabToClick, 500*time.Millisecond),
ac.LeftClick(tabToClick),
mouse.Move(test.tconn, info.Bounds.CenterPoint(), 500*time.Millisecond),
)(ctx); err != nil {
return err
}
if err := webutil.WaitForQuiescence(ctx, conns[currentTab], tabSwitchTimeout); err != nil {
return errors.Wrap(err, "failed to wait for the tab to quiesce")
}
for _, key := range []string{"Down", "Up"} {
if err := sim.RepeatKeyPress(ctx, kw, key, 200*time.Millisecond, 3); err != nil {
return errors.Wrapf(err, "failed to repeatedly press %s in between tab switches", key)
}
}
for _, scrollDown := range []bool{true, false} {
if err := sim.RepeatMouseScroll(ctx, mw, scrollDown, 50*time.Millisecond, 20); err != nil {
return errors.Wrap(err, "failed to scroll in between tab switches")
}
}
if err := ac.WithInterval(time.Second).WithTimeout(5*time.Second).WaitUntilNoEvent(nodewith.Root(), event.LocationChanged)(ctx); err != nil {
testing.ContextLog(ctx, "Scroll animations haven't stabilized yet, continuing anyway: ", err)
}
if err := sim.RunDragMouseCycle(ctx, test.tconn, info); err != nil {
return errors.Wrap(err, "failed to run the mouse drag cycle")
}
currentTab = (currentTab + skipSize + 1) % len(conns)
// Once we have seen every tab, adjust the skipSize to
// vary the tab visitation order.
if i == len(conns)-1 {
i = 0
currentTab = 0
skipSize = (skipSize + 1) % len(conns)
} else {
i++
}
}
// Take a screenshot to see the status of the CNN/Reddit
// window before closing it.
test.recorder.CustomScreenshot(ctx)
switch test.param.BrowserType {
case browser.TypeLacros:
if err := browser.ReplaceAllTabsWithSingleNewTab(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs and leave a single new tab open")
}
case browser.TypeAsh:
if err := browser.CloseAllTabs(ctx, test.bTconn); err != nil {
return errors.Wrap(err, "failed to close all tabs")
}
default:
return errors.Errorf("unsupported browser type %v", test.param.BrowserType)
}
}
return nil
}
// Run runs the setup, core part of the TabSwitchCUJ test, and cleanup.
func Run(ctx context.Context, s *testing.State) {
// Reserve time for cleanup
closeCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)
defer cancel()
// Perform initial test setup
setupVars, err := runSetup(ctx, s)
if err != nil {
s.Fatal("Failed to run setup: ", err)
}
defer setupVars.closeBrowser(closeCtx)
defer setupVars.recorder.Close(closeCtx)
if err := muteDevice(ctx, s); err != nil {
s.Log("(non-error) Failed to mute device: ", err)
}
// Execute Test
if err := setupVars.recorder.Run(ctx, func(ctx context.Context) error {
return testBody(ctx, setupVars)
}); err != nil {
s.Fatal("Failed to conduct the test scenario, or collect the histogram data: ", err)
}
// Write out values
pv := perf.NewValues()
if err := setupVars.recorder.Record(ctx, pv); err != nil {
s.Fatal("Failed to report: ", err)
}
if err := pv.Save(s.OutDir()); err != nil {
s.Error("Failed to store values: ", err)
}
}
| {
query := map[string]interface{}{
"status": "loading",
"currentWindow": true,
}
return testing.Poll(ctx, func(ctx context.Context) error {
var tabs []map[string]interface{}
if err := tconn.Call(ctx, &tabs, `tast.promisify(chrome.tabs.query)`, query); err != nil {
return testing.PollBreak(err)
}
if len(tabs) != 0 {
return errors.Errorf("still %d tabs are loading", len(tabs))
}
return nil
}, &testing.PollOptions{Timeout: timeout})
} | identifier_body |
main.rs | #[macro_use] extern crate errln;
#[macro_use] extern crate error_chain;
extern crate clap;
extern crate hex;
extern crate lalrpop_util;
extern crate parser_haskell;
extern crate regex;
extern crate tempdir;
extern crate walkdir;
extern crate corollary;
extern crate inflector;
use parser_haskell::util::{print_parse_error, simplify_parse_error};
use clap::{Arg, App};
use regex::Regex;
use std::fmt::Write;
use std::fs::{File, create_dir_all};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use tempdir::TempDir;
use corollary::print_item_list;
use corollary::ir::PrintState;
// Define error chain.
mod errors {
error_chain! {
foreign_links {
Walkdir(::walkdir::Error);
Io(::std::io::Error);
Fmt(::std::fmt::Error);
}
}
}
use errors::*;
#[test] #[ignore]
fn test_single_file() {
let a = "./corrode/src/Language/Rust/Corrode/C.lhs";
// let a = "./corrode/src/Language/Rust/Corrode/C.hs";
// let a = "./test/input.hs";
println!("file: {}", a);
let mut file = File::open(a).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if a.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(okay) => println!("{:#?}", okay),
Err(e) => {
let e = simplify_parse_error(e);
print_parse_error(&contents, &e);
panic!(e);
}
}
}
#[test]
fn test_no_regressions() {
let a = vec![
"../deps/corrode/src/Language/Rust/AST.hs",
"../deps/corrode/src/Language/Rust/Corrode/C.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CFG.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs",
"../deps/corrode/src/Language/Rust/Idiomatic.hs",
"../deps/corrode/src/Language/Rust.hs",
"../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/Builtins.hs",
"../deps/language-c/src/Language/C/Analysis/ConstEval.hs",
"../deps/language-c/src/Language/C/Analysis/Debug.hs",
"../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/DefTable.hs",
"../deps/language-c/src/Language/C/Analysis/Export.hs",
"../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs",
"../deps/language-c/src/Language/C/Analysis/SemError.hs",
"../deps/language-c/src/Language/C/Analysis/SemRep.hs",
"../deps/language-c/src/Language/C/Analysis/TravMonad.hs",
"../deps/language-c/src/Language/C/Analysis/TypeCheck.hs",
"../deps/language-c/src/Language/C/Analysis/TypeConversions.hs",
"../deps/language-c/src/Language/C/Analysis/TypeUtils.hs",
"../deps/language-c/src/Language/C/Analysis.hs",
"../deps/language-c/src/Language/C/Data/Error.hs",
"../deps/language-c/src/Language/C/Data/Ident.hs",
"../deps/language-c/src/Language/C/Data/InputStream.hs",
"../deps/language-c/src/Language/C/Data/Name.hs",
"../deps/language-c/src/Language/C/Data/Node.hs",
"../deps/language-c/src/Language/C/Data/Position.hs",
"../deps/language-c/src/Language/C/Data/RList.hs",
"../deps/language-c/src/Language/C/Data.hs",
"../deps/language-c/src/Language/C/Parser/Builtin.hs",
"../deps/language-c/src/Language/C/Parser/ParserMonad.hs",
"../deps/language-c/src/Language/C/Parser/Tokens.hs",
"../deps/language-c/src/Language/C/Parser.hs",
"../deps/language-c/src/Language/C/Pretty.hs",
"../deps/language-c/src/Language/C/Syntax/AST.hs",
"../deps/language-c/src/Language/C/Syntax/Constants.hs",
"../deps/language-c/src/Language/C/Syntax/Ops.hs",
"../deps/language-c/src/Language/C/Syntax/Utils.hs",
"../deps/language-c/src/Language/C/Syntax.hs",
"../deps/language-c/src/Language/C/System/GCC.hs",
"../deps/language-c/src/Language/C/System/Preprocess.hs",
"../parser-c/gen/Lexer.hs",
"../parser-c/gen/Parser.hs",
];
for path in a {
let mut file = File::open(path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if path.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// Do not output preprocessed data temp.txt
println!("{:?}", path);
// use ::std::io::Write;
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(_) => {
// OK
}
Err(e) => {
//TODO print_parse_error return string, feed to panic
print_parse_error(&contents, &simplify_parse_error(e));
panic!("cannot convert file {:?}", path);
}
}
}
}
fn strip_lhs(s: &str) -> String {
let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap();
let mut out = vec![];
for cap in re.captures_iter(&s) {
let indent = cap[1].to_string().len();
let group = cap[2].to_string()
.lines()
.map(|x| {
x.chars().skip(indent).collect::<String>()
})
.collect::<Vec<_>>()
.join("\n");
out.push(group);
}
out.join("\n\n")
}
/// Converts a Haskell file by its path into a Rust module.
fn convert_file(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> |
quick_main!(run);
fn run() -> Result<()> {
use std::io::Write;
let matches = App::new("corollary")
.version("0.1")
.about("Converts Haskell to Rust")
.arg(Arg::with_name("run")
.short("r")
.long("run")
.help("Runs the file"))
.arg(Arg::with_name("out")
.short("o")
.long("out")
.help("Output path")
.takes_value(true))
.arg(Arg::with_name("ast")
.short("a")
.long("ast")
.help("Dump AST"))
.arg(Arg::with_name("INPUT")
.help("Sets the input file to use")
.required(true)
.index(1))
.get_matches();
let arg_input = matches.value_of("INPUT").unwrap();
let arg_run = matches.is_present("run");
let arg_out: Option<_> = matches.value_of("out");
let arg_ast = matches.is_present("ast");
if arg_run && arg_out.is_some() {
bail!("Cannot use --out and --run at the same time.");
}
if (arg_run || arg_out.is_some()) && arg_ast {
bail!("Cannot use --ast and (--run or --out) at the same time.");
}
// Starting message.
if arg_run {
errln!("running {:?}...", arg_input);
} else {
errln!("cross-compiling {:?}...", arg_input);
}
// Read file contents.
let mut file = File::open(arg_input)
.chain_err(|| format!("Could not open {:?}", arg_input))?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
// Preprocess the file. Translate .lhs.
if arg_input.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?;
if arg_ast {
return Ok(());
}
// Add Rust segments RUST ... /RUST and Haskell support code.
let _ = writeln!(file_section, "");
let _ = writeln!(file_section, "");
if rust_section.len() > 0 {
let _ = writeln!(file_section, "/* RUST ... /RUST */");
let _ = writeln!(file_section, "{}", rust_section);
}
if let Some(out_path) = arg_out {
// Create directory.
let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap());
// Write file to path.
errln!("... outputting to {:?}", out_path);
let mut f = File::create(&out_path)?;
let _ = f.write_all(file_section.as_bytes());
} else if !arg_run {
// Print file to stdout.
print!("{}", file_section);
} else if arg_run {
// Run the file.
let dir = TempDir::new("corollary")?;
let file_path = dir.path().join("script.rs");
let mut f = File::create(&file_path)?;
let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n");
let _ = f.write_all(file_section.as_bytes());
if rust_section.len() == 0 {
let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n");
}
drop(f);
let output = Command::new("cargo")
.args(&["script", &file_path.display().to_string()])
.output()
.expect("failed to execute process");
if !output.status.success() {
err!("{}", String::from_utf8_lossy(&output.stderr));
}
err!("{}", String::from_utf8_lossy(&output.stdout));
::std::process::exit(output.status.code().unwrap());
}
Ok(())
}
| {
let mut contents = input.to_string();
let mut file_out = String::new();
let mut rust_out = String::new();
// Parse out HASKELL /HASKELL RUST /RUST sections.
let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap();
contents = re.replace(&contents, "").to_string();
let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap();
if let Some(cap) = re.captures(&contents) {
rust_out.push_str(&cap.get(1).unwrap().as_str().to_string());
}
contents = re.replace(&contents, "").to_string();
// Preprocess the file.
let contents = parser_haskell::preprocess(&contents);
// errln!("{}", contents);
// Parse the file.
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(v) => {
// errln!("{:?}", v);
if dump_ast {
println!("{}", format!("{:#?}", v).replace(" ", " "));
} else {
writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?;
writeln!(file_out, "// File auto-generated using Corollary.")?;
writeln!(file_out, "")?;
if inline_mod {
writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?;
writeln!(file_out, " use haskell_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?;
writeln!(file_out, "}}\n")?;
} else {
writeln!(file_out, "#[macro_use] use corollary_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state, &v.items, true))?;
}
}
}
Err(e) => {
errln!("/* ERROR: cannot convert file {:?}" ,p);
// TODO have this write to Format
print_parse_error(&contents, &simplify_parse_error(e));
errln!("*/");
panic!("COULDN'T PARSE");
}
}
Ok((file_out, rust_out))
} | identifier_body |
main.rs | #[macro_use] extern crate errln;
#[macro_use] extern crate error_chain;
extern crate clap;
extern crate hex;
extern crate lalrpop_util;
extern crate parser_haskell;
extern crate regex;
extern crate tempdir;
extern crate walkdir;
extern crate corollary;
extern crate inflector;
use parser_haskell::util::{print_parse_error, simplify_parse_error};
use clap::{Arg, App};
use regex::Regex;
use std::fmt::Write;
use std::fs::{File, create_dir_all};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use tempdir::TempDir;
use corollary::print_item_list;
use corollary::ir::PrintState;
// Define error chain.
mod errors {
error_chain! {
foreign_links {
Walkdir(::walkdir::Error);
Io(::std::io::Error);
Fmt(::std::fmt::Error);
}
}
}
use errors::*;
#[test] #[ignore]
fn test_single_file() {
let a = "./corrode/src/Language/Rust/Corrode/C.lhs";
// let a = "./corrode/src/Language/Rust/Corrode/C.hs";
// let a = "./test/input.hs";
println!("file: {}", a);
let mut file = File::open(a).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if a.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(okay) => println!("{:#?}", okay),
Err(e) => {
let e = simplify_parse_error(e);
print_parse_error(&contents, &e);
panic!(e);
}
}
}
#[test]
fn test_no_regressions() {
let a = vec![
"../deps/corrode/src/Language/Rust/AST.hs",
"../deps/corrode/src/Language/Rust/Corrode/C.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CFG.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs",
"../deps/corrode/src/Language/Rust/Idiomatic.hs",
"../deps/corrode/src/Language/Rust.hs",
"../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/Builtins.hs",
"../deps/language-c/src/Language/C/Analysis/ConstEval.hs",
"../deps/language-c/src/Language/C/Analysis/Debug.hs",
"../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/DefTable.hs",
"../deps/language-c/src/Language/C/Analysis/Export.hs",
"../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs",
"../deps/language-c/src/Language/C/Analysis/SemError.hs",
"../deps/language-c/src/Language/C/Analysis/SemRep.hs",
"../deps/language-c/src/Language/C/Analysis/TravMonad.hs",
"../deps/language-c/src/Language/C/Analysis/TypeCheck.hs",
"../deps/language-c/src/Language/C/Analysis/TypeConversions.hs",
"../deps/language-c/src/Language/C/Analysis/TypeUtils.hs",
"../deps/language-c/src/Language/C/Analysis.hs",
"../deps/language-c/src/Language/C/Data/Error.hs",
"../deps/language-c/src/Language/C/Data/Ident.hs",
"../deps/language-c/src/Language/C/Data/InputStream.hs",
"../deps/language-c/src/Language/C/Data/Name.hs",
"../deps/language-c/src/Language/C/Data/Node.hs",
"../deps/language-c/src/Language/C/Data/Position.hs",
"../deps/language-c/src/Language/C/Data/RList.hs",
"../deps/language-c/src/Language/C/Data.hs",
"../deps/language-c/src/Language/C/Parser/Builtin.hs",
"../deps/language-c/src/Language/C/Parser/ParserMonad.hs",
"../deps/language-c/src/Language/C/Parser/Tokens.hs",
"../deps/language-c/src/Language/C/Parser.hs",
"../deps/language-c/src/Language/C/Pretty.hs",
"../deps/language-c/src/Language/C/Syntax/AST.hs",
"../deps/language-c/src/Language/C/Syntax/Constants.hs",
"../deps/language-c/src/Language/C/Syntax/Ops.hs",
"../deps/language-c/src/Language/C/Syntax/Utils.hs",
"../deps/language-c/src/Language/C/Syntax.hs",
"../deps/language-c/src/Language/C/System/GCC.hs",
"../deps/language-c/src/Language/C/System/Preprocess.hs",
"../parser-c/gen/Lexer.hs",
"../parser-c/gen/Parser.hs",
];
for path in a {
let mut file = File::open(path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if path.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// Do not output preprocessed data temp.txt
println!("{:?}", path);
// use ::std::io::Write;
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(_) => {
// OK
}
Err(e) => {
//TODO print_parse_error return string, feed to panic
print_parse_error(&contents, &simplify_parse_error(e));
panic!("cannot convert file {:?}", path);
}
}
}
}
fn strip_lhs(s: &str) -> String {
let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap();
let mut out = vec![];
for cap in re.captures_iter(&s) {
let indent = cap[1].to_string().len();
let group = cap[2].to_string()
.lines()
.map(|x| {
x.chars().skip(indent).collect::<String>()
})
.collect::<Vec<_>>()
.join("\n");
out.push(group);
}
out.join("\n\n")
}
/// Converts a Haskell file by its path into a Rust module.
fn convert_file(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> {
let mut contents = input.to_string();
let mut file_out = String::new();
let mut rust_out = String::new();
// Parse out HASKELL /HASKELL RUST /RUST sections.
let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap();
contents = re.replace(&contents, "").to_string();
let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap();
if let Some(cap) = re.captures(&contents) {
rust_out.push_str(&cap.get(1).unwrap().as_str().to_string());
}
contents = re.replace(&contents, "").to_string();
// Preprocess the file.
let contents = parser_haskell::preprocess(&contents);
// errln!("{}", contents);
// Parse the file.
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(v) => {
// errln!("{:?}", v);
if dump_ast {
println!("{}", format!("{:#?}", v).replace(" ", " "));
} else {
writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?;
writeln!(file_out, "// File auto-generated using Corollary.")?;
writeln!(file_out, "")?;
if inline_mod {
writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?;
writeln!(file_out, " use haskell_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?;
writeln!(file_out, "}}\n")?;
} else {
writeln!(file_out, "#[macro_use] use corollary_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state, &v.items, true))?;
}
}
}
Err(e) => {
errln!("/* ERROR: cannot convert file {:?}" ,p);
// TODO have this write to Format
print_parse_error(&contents, &simplify_parse_error(e));
errln!("*/");
panic!("COULDN'T PARSE");
}
}
Ok((file_out, rust_out))
}
quick_main!(run);
fn run() -> Result<()> {
use std::io::Write;
let matches = App::new("corollary")
.version("0.1")
.about("Converts Haskell to Rust")
.arg(Arg::with_name("run")
.short("r")
.long("run")
.help("Runs the file"))
.arg(Arg::with_name("out")
.short("o")
.long("out")
.help("Output path")
.takes_value(true))
.arg(Arg::with_name("ast")
.short("a")
.long("ast")
.help("Dump AST"))
.arg(Arg::with_name("INPUT")
.help("Sets the input file to use")
.required(true)
.index(1))
.get_matches();
let arg_input = matches.value_of("INPUT").unwrap();
let arg_run = matches.is_present("run");
let arg_out: Option<_> = matches.value_of("out");
let arg_ast = matches.is_present("ast");
if arg_run && arg_out.is_some() {
bail!("Cannot use --out and --run at the same time."); | }
// Starting message.
if arg_run {
errln!("running {:?}...", arg_input);
} else {
errln!("cross-compiling {:?}...", arg_input);
}
// Read file contents.
let mut file = File::open(arg_input)
.chain_err(|| format!("Could not open {:?}", arg_input))?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
// Preprocess the file. Translate .lhs.
if arg_input.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?;
if arg_ast {
return Ok(());
}
// Add Rust segments RUST ... /RUST and Haskell support code.
let _ = writeln!(file_section, "");
let _ = writeln!(file_section, "");
if rust_section.len() > 0 {
let _ = writeln!(file_section, "/* RUST ... /RUST */");
let _ = writeln!(file_section, "{}", rust_section);
}
if let Some(out_path) = arg_out {
// Create directory.
let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap());
// Write file to path.
errln!("... outputting to {:?}", out_path);
let mut f = File::create(&out_path)?;
let _ = f.write_all(file_section.as_bytes());
} else if !arg_run {
// Print file to stdout.
print!("{}", file_section);
} else if arg_run {
// Run the file.
let dir = TempDir::new("corollary")?;
let file_path = dir.path().join("script.rs");
let mut f = File::create(&file_path)?;
let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n");
let _ = f.write_all(file_section.as_bytes());
if rust_section.len() == 0 {
let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n");
}
drop(f);
let output = Command::new("cargo")
.args(&["script", &file_path.display().to_string()])
.output()
.expect("failed to execute process");
if !output.status.success() {
err!("{}", String::from_utf8_lossy(&output.stderr));
}
err!("{}", String::from_utf8_lossy(&output.stdout));
::std::process::exit(output.status.code().unwrap());
}
Ok(())
} | }
if (arg_run || arg_out.is_some()) && arg_ast {
bail!("Cannot use --ast and (--run or --out) at the same time."); | random_line_split |
main.rs | #[macro_use] extern crate errln;
#[macro_use] extern crate error_chain;
extern crate clap;
extern crate hex;
extern crate lalrpop_util;
extern crate parser_haskell;
extern crate regex;
extern crate tempdir;
extern crate walkdir;
extern crate corollary;
extern crate inflector;
use parser_haskell::util::{print_parse_error, simplify_parse_error};
use clap::{Arg, App};
use regex::Regex;
use std::fmt::Write;
use std::fs::{File, create_dir_all};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use tempdir::TempDir;
use corollary::print_item_list;
use corollary::ir::PrintState;
// Define error chain.
mod errors {
error_chain! {
foreign_links {
Walkdir(::walkdir::Error);
Io(::std::io::Error);
Fmt(::std::fmt::Error);
}
}
}
use errors::*;
#[test] #[ignore]
fn test_single_file() {
let a = "./corrode/src/Language/Rust/Corrode/C.lhs";
// let a = "./corrode/src/Language/Rust/Corrode/C.hs";
// let a = "./test/input.hs";
println!("file: {}", a);
let mut file = File::open(a).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if a.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(okay) => println!("{:#?}", okay),
Err(e) => {
let e = simplify_parse_error(e);
print_parse_error(&contents, &e);
panic!(e);
}
}
}
#[test]
fn test_no_regressions() {
let a = vec![
"../deps/corrode/src/Language/Rust/AST.hs",
"../deps/corrode/src/Language/Rust/Corrode/C.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CFG.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs",
"../deps/corrode/src/Language/Rust/Idiomatic.hs",
"../deps/corrode/src/Language/Rust.hs",
"../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/Builtins.hs",
"../deps/language-c/src/Language/C/Analysis/ConstEval.hs",
"../deps/language-c/src/Language/C/Analysis/Debug.hs",
"../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/DefTable.hs",
"../deps/language-c/src/Language/C/Analysis/Export.hs",
"../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs",
"../deps/language-c/src/Language/C/Analysis/SemError.hs",
"../deps/language-c/src/Language/C/Analysis/SemRep.hs",
"../deps/language-c/src/Language/C/Analysis/TravMonad.hs",
"../deps/language-c/src/Language/C/Analysis/TypeCheck.hs",
"../deps/language-c/src/Language/C/Analysis/TypeConversions.hs",
"../deps/language-c/src/Language/C/Analysis/TypeUtils.hs",
"../deps/language-c/src/Language/C/Analysis.hs",
"../deps/language-c/src/Language/C/Data/Error.hs",
"../deps/language-c/src/Language/C/Data/Ident.hs",
"../deps/language-c/src/Language/C/Data/InputStream.hs",
"../deps/language-c/src/Language/C/Data/Name.hs",
"../deps/language-c/src/Language/C/Data/Node.hs",
"../deps/language-c/src/Language/C/Data/Position.hs",
"../deps/language-c/src/Language/C/Data/RList.hs",
"../deps/language-c/src/Language/C/Data.hs",
"../deps/language-c/src/Language/C/Parser/Builtin.hs",
"../deps/language-c/src/Language/C/Parser/ParserMonad.hs",
"../deps/language-c/src/Language/C/Parser/Tokens.hs",
"../deps/language-c/src/Language/C/Parser.hs",
"../deps/language-c/src/Language/C/Pretty.hs",
"../deps/language-c/src/Language/C/Syntax/AST.hs",
"../deps/language-c/src/Language/C/Syntax/Constants.hs",
"../deps/language-c/src/Language/C/Syntax/Ops.hs",
"../deps/language-c/src/Language/C/Syntax/Utils.hs",
"../deps/language-c/src/Language/C/Syntax.hs",
"../deps/language-c/src/Language/C/System/GCC.hs",
"../deps/language-c/src/Language/C/System/Preprocess.hs",
"../parser-c/gen/Lexer.hs",
"../parser-c/gen/Parser.hs",
];
for path in a {
let mut file = File::open(path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if path.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// Do not output preprocessed data temp.txt
println!("{:?}", path);
// use ::std::io::Write;
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(_) => {
// OK
}
Err(e) => {
//TODO print_parse_error return string, feed to panic
print_parse_error(&contents, &simplify_parse_error(e));
panic!("cannot convert file {:?}", path);
}
}
}
}
fn strip_lhs(s: &str) -> String {
let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap();
let mut out = vec![];
for cap in re.captures_iter(&s) {
let indent = cap[1].to_string().len();
let group = cap[2].to_string()
.lines()
.map(|x| {
x.chars().skip(indent).collect::<String>()
})
.collect::<Vec<_>>()
.join("\n");
out.push(group);
}
out.join("\n\n")
}
/// Converts a Haskell file by its path into a Rust module.
fn | (input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> {
let mut contents = input.to_string();
let mut file_out = String::new();
let mut rust_out = String::new();
// Parse out HASKELL /HASKELL RUST /RUST sections.
let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap();
contents = re.replace(&contents, "").to_string();
let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap();
if let Some(cap) = re.captures(&contents) {
rust_out.push_str(&cap.get(1).unwrap().as_str().to_string());
}
contents = re.replace(&contents, "").to_string();
// Preprocess the file.
let contents = parser_haskell::preprocess(&contents);
// errln!("{}", contents);
// Parse the file.
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(v) => {
// errln!("{:?}", v);
if dump_ast {
println!("{}", format!("{:#?}", v).replace(" ", " "));
} else {
writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?;
writeln!(file_out, "// File auto-generated using Corollary.")?;
writeln!(file_out, "")?;
if inline_mod {
writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?;
writeln!(file_out, " use haskell_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?;
writeln!(file_out, "}}\n")?;
} else {
writeln!(file_out, "#[macro_use] use corollary_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state, &v.items, true))?;
}
}
}
Err(e) => {
errln!("/* ERROR: cannot convert file {:?}" ,p);
// TODO have this write to Format
print_parse_error(&contents, &simplify_parse_error(e));
errln!("*/");
panic!("COULDN'T PARSE");
}
}
Ok((file_out, rust_out))
}
quick_main!(run);
fn run() -> Result<()> {
use std::io::Write;
let matches = App::new("corollary")
.version("0.1")
.about("Converts Haskell to Rust")
.arg(Arg::with_name("run")
.short("r")
.long("run")
.help("Runs the file"))
.arg(Arg::with_name("out")
.short("o")
.long("out")
.help("Output path")
.takes_value(true))
.arg(Arg::with_name("ast")
.short("a")
.long("ast")
.help("Dump AST"))
.arg(Arg::with_name("INPUT")
.help("Sets the input file to use")
.required(true)
.index(1))
.get_matches();
let arg_input = matches.value_of("INPUT").unwrap();
let arg_run = matches.is_present("run");
let arg_out: Option<_> = matches.value_of("out");
let arg_ast = matches.is_present("ast");
if arg_run && arg_out.is_some() {
bail!("Cannot use --out and --run at the same time.");
}
if (arg_run || arg_out.is_some()) && arg_ast {
bail!("Cannot use --ast and (--run or --out) at the same time.");
}
// Starting message.
if arg_run {
errln!("running {:?}...", arg_input);
} else {
errln!("cross-compiling {:?}...", arg_input);
}
// Read file contents.
let mut file = File::open(arg_input)
.chain_err(|| format!("Could not open {:?}", arg_input))?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
// Preprocess the file. Translate .lhs.
if arg_input.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?;
if arg_ast {
return Ok(());
}
// Add Rust segments RUST ... /RUST and Haskell support code.
let _ = writeln!(file_section, "");
let _ = writeln!(file_section, "");
if rust_section.len() > 0 {
let _ = writeln!(file_section, "/* RUST ... /RUST */");
let _ = writeln!(file_section, "{}", rust_section);
}
if let Some(out_path) = arg_out {
// Create directory.
let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap());
// Write file to path.
errln!("... outputting to {:?}", out_path);
let mut f = File::create(&out_path)?;
let _ = f.write_all(file_section.as_bytes());
} else if !arg_run {
// Print file to stdout.
print!("{}", file_section);
} else if arg_run {
// Run the file.
let dir = TempDir::new("corollary")?;
let file_path = dir.path().join("script.rs");
let mut f = File::create(&file_path)?;
let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n");
let _ = f.write_all(file_section.as_bytes());
if rust_section.len() == 0 {
let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n");
}
drop(f);
let output = Command::new("cargo")
.args(&["script", &file_path.display().to_string()])
.output()
.expect("failed to execute process");
if !output.status.success() {
err!("{}", String::from_utf8_lossy(&output.stderr));
}
err!("{}", String::from_utf8_lossy(&output.stdout));
::std::process::exit(output.status.code().unwrap());
}
Ok(())
}
| convert_file | identifier_name |
main.rs | #[macro_use] extern crate errln;
#[macro_use] extern crate error_chain;
extern crate clap;
extern crate hex;
extern crate lalrpop_util;
extern crate parser_haskell;
extern crate regex;
extern crate tempdir;
extern crate walkdir;
extern crate corollary;
extern crate inflector;
use parser_haskell::util::{print_parse_error, simplify_parse_error};
use clap::{Arg, App};
use regex::Regex;
use std::fmt::Write;
use std::fs::{File, create_dir_all};
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use std::process::Command;
use tempdir::TempDir;
use corollary::print_item_list;
use corollary::ir::PrintState;
// Define error chain.
mod errors {
error_chain! {
foreign_links {
Walkdir(::walkdir::Error);
Io(::std::io::Error);
Fmt(::std::fmt::Error);
}
}
}
use errors::*;
#[test] #[ignore]
fn test_single_file() {
let a = "./corrode/src/Language/Rust/Corrode/C.lhs";
// let a = "./corrode/src/Language/Rust/Corrode/C.hs";
// let a = "./test/input.hs";
println!("file: {}", a);
let mut file = File::open(a).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if a.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(okay) => println!("{:#?}", okay),
Err(e) => {
let e = simplify_parse_error(e);
print_parse_error(&contents, &e);
panic!(e);
}
}
}
#[test]
fn test_no_regressions() {
let a = vec![
"../deps/corrode/src/Language/Rust/AST.hs",
"../deps/corrode/src/Language/Rust/Corrode/C.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CFG.lhs",
"../deps/corrode/src/Language/Rust/Corrode/CrateMap.hs",
"../deps/corrode/src/Language/Rust/Idiomatic.hs",
"../deps/corrode/src/Language/Rust.hs",
"../deps/language-c/src/Language/C/Analysis/AstAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/Builtins.hs",
"../deps/language-c/src/Language/C/Analysis/ConstEval.hs",
"../deps/language-c/src/Language/C/Analysis/Debug.hs",
"../deps/language-c/src/Language/C/Analysis/DeclAnalysis.hs",
"../deps/language-c/src/Language/C/Analysis/DefTable.hs",
"../deps/language-c/src/Language/C/Analysis/Export.hs",
"../deps/language-c/src/Language/C/Analysis/NameSpaceMap.hs",
"../deps/language-c/src/Language/C/Analysis/SemError.hs",
"../deps/language-c/src/Language/C/Analysis/SemRep.hs",
"../deps/language-c/src/Language/C/Analysis/TravMonad.hs",
"../deps/language-c/src/Language/C/Analysis/TypeCheck.hs",
"../deps/language-c/src/Language/C/Analysis/TypeConversions.hs",
"../deps/language-c/src/Language/C/Analysis/TypeUtils.hs",
"../deps/language-c/src/Language/C/Analysis.hs",
"../deps/language-c/src/Language/C/Data/Error.hs",
"../deps/language-c/src/Language/C/Data/Ident.hs",
"../deps/language-c/src/Language/C/Data/InputStream.hs",
"../deps/language-c/src/Language/C/Data/Name.hs",
"../deps/language-c/src/Language/C/Data/Node.hs",
"../deps/language-c/src/Language/C/Data/Position.hs",
"../deps/language-c/src/Language/C/Data/RList.hs",
"../deps/language-c/src/Language/C/Data.hs",
"../deps/language-c/src/Language/C/Parser/Builtin.hs",
"../deps/language-c/src/Language/C/Parser/ParserMonad.hs",
"../deps/language-c/src/Language/C/Parser/Tokens.hs",
"../deps/language-c/src/Language/C/Parser.hs",
"../deps/language-c/src/Language/C/Pretty.hs",
"../deps/language-c/src/Language/C/Syntax/AST.hs",
"../deps/language-c/src/Language/C/Syntax/Constants.hs",
"../deps/language-c/src/Language/C/Syntax/Ops.hs",
"../deps/language-c/src/Language/C/Syntax/Utils.hs",
"../deps/language-c/src/Language/C/Syntax.hs",
"../deps/language-c/src/Language/C/System/GCC.hs",
"../deps/language-c/src/Language/C/System/Preprocess.hs",
"../parser-c/gen/Lexer.hs",
"../parser-c/gen/Parser.hs",
];
for path in a {
let mut file = File::open(path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
if path.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let contents = parser_haskell::preprocess(&contents);
// Do not output preprocessed data temp.txt
println!("{:?}", path);
// use ::std::io::Write;
// let mut a = ::std::fs::File::create("temp.txt").unwrap();
// a.write_all(contents.as_bytes());
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(_) => {
// OK
}
Err(e) => {
//TODO print_parse_error return string, feed to panic
print_parse_error(&contents, &simplify_parse_error(e));
panic!("cannot convert file {:?}", path);
}
}
}
}
fn strip_lhs(s: &str) -> String {
let re = Regex::new(r"([ \t]*)```haskell([\s\S]*?)```").unwrap();
let mut out = vec![];
for cap in re.captures_iter(&s) {
let indent = cap[1].to_string().len();
let group = cap[2].to_string()
.lines()
.map(|x| {
x.chars().skip(indent).collect::<String>()
})
.collect::<Vec<_>>()
.join("\n");
out.push(group);
}
out.join("\n\n")
}
/// Converts a Haskell file by its path into a Rust module.
fn convert_file(input: &str, p: &Path, inline_mod: bool, dump_ast: bool) -> Result<(String, String)> {
let mut contents = input.to_string();
let mut file_out = String::new();
let mut rust_out = String::new();
// Parse out HASKELL /HASKELL RUST /RUST sections.
let re = Regex::new(r#"HASKELL[\s\S]*?/HASKELL"#).unwrap();
contents = re.replace(&contents, "").to_string();
let re = Regex::new(r#"RUST([\s\S]*?)/RUST"#).unwrap();
if let Some(cap) = re.captures(&contents) {
rust_out.push_str(&cap.get(1).unwrap().as_str().to_string());
}
contents = re.replace(&contents, "").to_string();
// Preprocess the file.
let contents = parser_haskell::preprocess(&contents);
// errln!("{}", contents);
// Parse the file.
let mut errors = Vec::new();
match parser_haskell::parse(&mut errors, &contents) {
Ok(v) => {
// errln!("{:?}", v);
if dump_ast {
println!("{}", format!("{:#?}", v).replace(" ", " "));
} else {
writeln!(file_out, "// Original file: {:?}", p.file_name().unwrap())?;
writeln!(file_out, "// File auto-generated using Corollary.")?;
writeln!(file_out, "")?;
if inline_mod | else {
writeln!(file_out, "#[macro_use] use corollary_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state, &v.items, true))?;
}
}
}
Err(e) => {
errln!("/* ERROR: cannot convert file {:?}" ,p);
// TODO have this write to Format
print_parse_error(&contents, &simplify_parse_error(e));
errln!("*/");
panic!("COULDN'T PARSE");
}
}
Ok((file_out, rust_out))
}
quick_main!(run);
fn run() -> Result<()> {
use std::io::Write;
let matches = App::new("corollary")
.version("0.1")
.about("Converts Haskell to Rust")
.arg(Arg::with_name("run")
.short("r")
.long("run")
.help("Runs the file"))
.arg(Arg::with_name("out")
.short("o")
.long("out")
.help("Output path")
.takes_value(true))
.arg(Arg::with_name("ast")
.short("a")
.long("ast")
.help("Dump AST"))
.arg(Arg::with_name("INPUT")
.help("Sets the input file to use")
.required(true)
.index(1))
.get_matches();
let arg_input = matches.value_of("INPUT").unwrap();
let arg_run = matches.is_present("run");
let arg_out: Option<_> = matches.value_of("out");
let arg_ast = matches.is_present("ast");
if arg_run && arg_out.is_some() {
bail!("Cannot use --out and --run at the same time.");
}
if (arg_run || arg_out.is_some()) && arg_ast {
bail!("Cannot use --ast and (--run or --out) at the same time.");
}
// Starting message.
if arg_run {
errln!("running {:?}...", arg_input);
} else {
errln!("cross-compiling {:?}...", arg_input);
}
// Read file contents.
let mut file = File::open(arg_input)
.chain_err(|| format!("Could not open {:?}", arg_input))?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
// Preprocess the file. Translate .lhs.
if arg_input.ends_with(".lhs") {
contents = strip_lhs(&contents);
}
let (mut file_section, rust_section) = convert_file(&contents, &PathBuf::from(arg_input), false, arg_ast)?;
if arg_ast {
return Ok(());
}
// Add Rust segments RUST ... /RUST and Haskell support code.
let _ = writeln!(file_section, "");
let _ = writeln!(file_section, "");
if rust_section.len() > 0 {
let _ = writeln!(file_section, "/* RUST ... /RUST */");
let _ = writeln!(file_section, "{}", rust_section);
}
if let Some(out_path) = arg_out {
// Create directory.
let _ = create_dir_all(&Path::new(&arg_out.unwrap()).parent().unwrap());
// Write file to path.
errln!("... outputting to {:?}", out_path);
let mut f = File::create(&out_path)?;
let _ = f.write_all(file_section.as_bytes());
} else if !arg_run {
// Print file to stdout.
print!("{}", file_section);
} else if arg_run {
// Run the file.
let dir = TempDir::new("corollary")?;
let file_path = dir.path().join("script.rs");
let mut f = File::create(&file_path)?;
let _ = f.write_all(b"// cargo-deps: corollary-support={path=\"/Users/trim/Desktop/corrode-but-in-rust/corollary-support\"}\n\nextern crate corollary_support;\n\n");
let _ = f.write_all(file_section.as_bytes());
if rust_section.len() == 0 {
let _ = f.write_all(b"\n\nfn main() { let _ = __main(); }\n");
}
drop(f);
let output = Command::new("cargo")
.args(&["script", &file_path.display().to_string()])
.output()
.expect("failed to execute process");
if !output.status.success() {
err!("{}", String::from_utf8_lossy(&output.stderr));
}
err!("{}", String::from_utf8_lossy(&output.stdout));
::std::process::exit(output.status.code().unwrap());
}
Ok(())
}
| {
writeln!(file_out, "pub mod {} {{", v.name.0.replace(".", "_"))?;
writeln!(file_out, " use haskell_support::*;")?;
writeln!(file_out, "")?;
let state = PrintState::new();
writeln!(file_out, "{}", print_item_list(state.tab(), &v.items, true))?;
writeln!(file_out, "}}\n")?;
} | conditional_block |
bgs-utils.ts | import { CardIds, GameType, Race, ReferenceCard } from '../public-api';
export const ALL_BG_RACES = [
Race.BEAST,
Race.DEMON,
Race.DRAGON,
Race.MECH,
Race.MURLOC,
Race.PIRATE,
Race.ELEMENTAL,
Race.QUILBOAR,
Race.NAGA,
Race.UNDEAD,
];
export const TOTAL_RACES_IN_GAME = 5;
export const NON_BUYABLE_MINION_IDS = [
CardIds.Cuddlgam_TB_BaconShop_HP_033t_SKIN_A,
CardIds.Cuddlgam_TB_BaconShop_HP_033t_SKIN_A_G,
CardIds.AbominableAmalgam_TB_BaconShop_HP_033t_SKIN_D,
CardIds.AbominableAmalgam_TB_BaconShop_HP_033t_SKIN_D_G,
CardIds.BookEatingAmalgam_TB_BaconShop_HP_033t_SKIN_B,
CardIds.BookEatingAmalgam_TB_BaconShop_HP_033t_SKIN_B_G,
CardIds.ArgentBraggart_BG_SCH_149,
CardIds.ArgentBraggart_TB_BaconUps_308,
CardIds.AvatarOfNzoth_FishOfNzothToken,
CardIds.FishOfNzoth,
CardIds.CattlecarpOfNzoth_TB_BaconShop_HP_105t_SKIN_A,
CardIds.CattlecarpOfNzoth_TB_BaconShop_HP_105t_SKIN_A_G,
CardIds.SnakeTrap_SnakeLegacyToken,
CardIds.SnakeTrap_SnakeVanillaToken,
CardIds.ImprovedSnakeTrap_SnakeToken,
CardIds.ElementEarth_StoneElementalToken,
CardIds.BabyKrush_DevilsaurToken,
CardIds.Devilsaur,
// To remove once the cards list is properly updated
CardIds.FriendOfAFriend_BG22_404,
CardIds.FriendOfAFriend_BG22_404_G,
CardIds.Onyxia_OnyxianWhelpToken,
CardIds.MurlocWarleaderCore,
CardIds.MurlocWarleaderVanilla,
// 23.4, probably not needed since they are already tokens
CardIds.Tentacular_OzumatsTentacleToken_BG23_HERO_201pt,
CardIds.Tentacular_OzumatsTentacleToken_BG23_HERO_201pt2,
CardIds.Tentacular_OzumatsTentacleToken_BG23_HERO_201pt3,
CardIds.Tentacular_OzumatsTentacleToken_BG23_HERO_201pt4,
CardIds.Tentacular_OzumatsTentacleToken_BG23_HERO_201pt5,
CardIds.Tentacular_OzumatsTentacleToken_BG23_HERO_201pt6,
// 24.0, same
CardIds.EmperorCobraLegacy_BG_EX1_170,
CardIds.EmperorCobraLegacy_BG_EX1_170_G,
CardIds.EmperorCobraLegacy_EX1_170,
CardIds.SnakeLegacyToken,
CardIds.SnakeLegacy,
CardIds.ElementEarth_StoneElementalToken,
CardIds.BolvarFireblood_CORE_ICC_858,
CardIds.BolvarFireblood_ICC_858,
// 25.2
CardIds.HandlessForsaken_HelpingHandToken_BG25_010t,
CardIds.HandlessForsaken_HelpingHandToken_BG25_010_Gt,
CardIds.GeneralDrakkisath_SmolderwingToken_BG25_309t,
CardIds.GeneralDrakkisath_SmolderwingToken_BG25_309_Gt,
CardIds.MechaJaraxxus_RustedReggieToken,
CardIds.MechaJaraxxus_MagtheridonPrimeToken,
CardIds.MechaJaraxxus_BaltharakToken,
CardIds.RustedReggie,
CardIds.MagtheridonPrime,
CardIds.Baltharak,
CardIds.Shudderskull_TB_BaconShop_HP_022t_SKIN_C,
CardIds.Shudderskull_TB_BaconShop_HP_022t_SKIN_C_G,
CardIds.OzumatsTentacle_BG23_HERO_201pt_SKIN_A,
CardIds.OzumatsTentacle_BG23_HERO_201pt_SKIN_A_G,
// 26.2
CardIds.Manasaber_CublingToken_BG26_800t,
CardIds.Manasaber_CublingToken_BG26_800_Gt,
CardIds.TentacleOfOctosariToken_BG26_803t,
CardIds.TentacleOfOctosariToken_BG26_803_Gt,
];
| CardIds.SpiritRaptor_BG22_HERO_001_Buddy,
CardIds.RagingContender_TB_BaconShop_HERO_67_Buddy,
CardIds.CaptainFairmount_BG21_HERO_000_Buddy,
CardIds.FlightTrainer_BG20_HERO_283_Buddy,
CardIds.JandicesApprentice_TB_BaconShop_HERO_71_Buddy,
CardIds.CrimsonHandCenturion_TB_BaconShop_HERO_60_Buddy,
CardIds.CrazyMonkey_TB_BaconShop_HERO_38_Buddy,
CardIds.ShadowWarden_TB_BaconShop_HERO_62_Buddy,
CardIds.NexusLord_TB_BaconShop_HERO_58_Buddy,
CardIds.LeiFlamepaw_BG20_HERO_202_Buddy,
CardIds.Watfin_BG23_HERO_303_Buddy,
CardIds.NightmareEctoplasm_BG20_HERO_301_Buddy,
CardIds.ManyWhelps_BG22_HERO_305_Buddy,
CardIds.DranoshSaurfang_BG20_HERO_102_Buddy,
CardIds.Tamuzo_BG23_HERO_201_Buddy,
CardIds.MaxwellMightySteed_TB_BaconShop_HERO_40_Buddy,
CardIds.Crabby_BG22_HERO_000_Buddy,
CardIds.Mishmash_TB_BaconShop_HERO_33_Buddy,
CardIds.PigeonLord_TB_BaconShop_HERO_12_Buddy,
CardIds.VardensAquarrior_BG22_HERO_004_Buddy,
CardIds.AcolyteOfYoggSaron_TB_BaconShop_HERO_35_Buddy,
CardIds.SubmersibleChef_BG22_HERO_201_Buddy,
];
export const BUDDIES_TRIBE_REQUIREMENTS = [
{
buddy: CardIds.Vaelastrasz_TB_BaconShop_HERO_56_Buddy,
tribe: Race.DRAGON,
},
{
buddy: CardIds.DeathsHeadSage_BG20_HERO_103_Buddy,
tribe: Race.QUILBOAR,
},
{
buddy: CardIds.SparkfinSoothsayer_TB_BaconShop_HERO_55_Buddy,
tribe: Race.MURLOC,
},
{
buddy: CardIds.SubScrubber_BG22_HERO_200_Buddy,
tribe: Race.MECH,
},
{
buddy: CardIds.CoilfangElite_BG23_HERO_304_Buddy,
tribe: Race.NAGA,
},
{
buddy: CardIds.Kilrek_TB_BaconShop_HERO_37_Buddy,
tribe: Race.DEMON,
},
{
buddy: CardIds.ElementiumSquirrelBomb_TB_BaconShop_HERO_17_Buddy,
tribe: Race.MECH,
},
{
buddy: CardIds.TuskarrRaider_TB_BaconShop_HERO_18_Buddy,
tribe: Race.PIRATE,
},
{
buddy: CardIds.Festergut_BG25_HERO_100_Buddy,
tribe: Race.UNDEAD,
},
{
buddy: CardIds.ImperialDefender_BG22_HERO_007_Buddy,
tribe: Race.NAGA,
},
{
buddy: CardIds.ValithriaDreamwalker_TB_BaconShop_HERO_53_Buddy,
tribe: Race.DRAGON,
},
];
export const defaultStartingHp = (
gameType: GameType,
heroCardId: string,
allCards: { getCard: (cardId: string | number) => ReferenceCard },
): number => {
if (isBattlegrounds(gameType)) {
const normalized = normalizeHeroCardId(heroCardId, allCards);
return allCards.getCard(normalized).health || 30;
}
return 30;
};
export const isBattlegrounds = (gameType: GameType): boolean => {
return [
GameType.GT_BATTLEGROUNDS,
GameType.GT_BATTLEGROUNDS_FRIENDLY,
GameType.GT_BATTLEGROUNDS_AI_VS_AI,
GameType.GT_BATTLEGROUNDS_PLAYER_VS_AI,
].includes(gameType as GameType);
};
export const normalizeHeroCardId = (
heroCardId: string,
allCards: { getCard: (cardId: string | number) => ReferenceCard },
): string => {
if (!heroCardId) {
return heroCardId;
}
const normalizedAfterSkin = normalizeHeroCardIdAfterSkin(heroCardId, allCards);
switch (normalizedAfterSkin) {
case 'TB_BaconShop_HERO_59t':
return 'TB_BaconShop_HERO_59';
case CardIds.QueenAzshara_NagaQueenAzsharaToken:
return CardIds.QueenAzshara_BG22_HERO_007;
default:
return normalizedAfterSkin;
}
};
const normalizeHeroCardIdAfterSkin = (
heroCardId: string,
allCards: { getCard: (cardId: string | number) => ReferenceCard },
): string => {
const heroCard = allCards.getCard(heroCardId);
if (!!heroCard?.battlegroundsHeroParentDbfId) {
const parentCard = allCards.getCard(heroCard.battlegroundsHeroParentDbfId);
if (!!parentCard) {
return parentCard.id;
}
}
// Fallback to regex
const bgHeroSkinMatch = heroCardId.match(/(.*)_SKIN_.*/);
if (bgHeroSkinMatch) {
return bgHeroSkinMatch[1];
}
return heroCardId;
};
export const getTribeName = (tribe: Race, i18n: { translateString: (input: string) => string }): string =>
i18n.translateString(`app.battlegrounds.tribes.${Race[tribe]?.toLowerCase()}`);
export const getTribeIcon = (tribe: string | Race): string => {
const referenceCardId = getReferenceTribeCardId(tribe);
return `https://static.zerotoheroes.com/hearthstone/cardart/256x/${referenceCardId}.jpg`;
};
export const getReferenceTribeCardId = (tribe: string | Race): string => {
let referenceCardId: string;
tribe = (tribe as string)?.padStart ? (tribe as string).toLowerCase() : tribe;
switch (tribe) {
case 'mech':
case Race.MECH:
referenceCardId = CardIds.DeflectOBot_BGS_071;
break;
case 'beast':
case Race.BEAST:
referenceCardId = CardIds.MamaBear_BGS_021;
break;
case 'demon':
case Race.DEMON:
referenceCardId = CardIds.ImpulsiveTrickster_BG21_006;
break;
case 'dragon':
case Race.DRAGON:
referenceCardId = CardIds.KalecgosArcaneAspect_BGS_041;
break;
case 'murloc':
case Race.MURLOC:
referenceCardId = CardIds.RockpoolHunter_BG_UNG_073;
break;
case 'pirate':
case Race.PIRATE:
referenceCardId = CardIds.Scallywag_BGS_061;
break;
case 'elemental':
case Race.ELEMENTAL:
referenceCardId = CardIds.Sellemental_BGS_115;
break;
case 'naga':
case Race.NAGA:
referenceCardId = CardIds.MiniMyrmidon_BG23_000;
break;
case 'quilboar':
case Race.QUILBOAR:
referenceCardId = CardIds.SunBaconRelaxer_BG20_301;
break;
case 'undead':
case Race.UNDEAD:
referenceCardId = CardIds.EternalKnight_BG25_008;
break;
case 'all':
case Race.ALL:
referenceCardId = CardIds.Amalgadon_BGS_069;
break;
default:
referenceCardId = CardIds.PatientScout_BG24_715;
break;
}
return referenceCardId;
};
export const getHeroPower = (
heroCardId: string,
allCards: { getCard: (cardId: string | number) => ReferenceCard },
): string => {
const normalized = normalizeHeroCardId(heroCardId, allCards);
switch (normalized) {
case 'TB_BaconShop_HERO_01':
return 'TB_BaconShop_HP_001';
case 'TB_BaconShop_HERO_02':
return 'TB_BaconShop_HP_011';
case 'TB_BaconShop_HERO_08':
return 'TB_BaconShop_HP_069';
case CardIds.RagnarosTheFirelord_TB_BaconShop_HERO_11:
return CardIds.DieInsects_TB_BaconShop_HP_087;
case 'TB_BaconShop_HERO_12':
return 'TB_BaconShop_HP_041';
case CardIds.QueenWagtoggle_TB_BaconShop_HERO_14:
return CardIds.WaxWarband;
case 'TB_BaconShop_HERO_15':
return 'TB_BaconShop_HP_010';
case 'TB_BaconShop_HERO_16':
return 'TB_BaconShop_HP_044';
case 'TB_BaconShop_HERO_17':
return 'TB_BaconShop_HP_015';
case 'TB_BaconShop_HERO_18':
return 'TB_BaconShop_HP_072';
case 'TB_BaconShop_HERO_20':
return 'TB_BaconShop_HP_018';
case 'TB_BaconShop_HERO_21':
return 'TB_BaconShop_HP_020';
case 'TB_BaconShop_HERO_22':
return 'TB_BaconShop_HP_024';
case CardIds.Shudderwock_TB_BaconShop_HERO_23:
return CardIds.SnickerSnack;
case 'TB_BaconShop_HERO_25':
return 'TB_BaconShop_HP_049';
case 'TB_BaconShop_HERO_27':
return 'TB_BaconShop_HP_014';
case 'TB_BaconShop_HERO_28':
return 'TB_BaconShop_HP_028';
case 'TB_BaconShop_HERO_30':
return 'TB_BaconShop_HP_043';
case 'TB_BaconShop_HERO_31':
return 'TB_BaconShop_HP_009';
case 'TB_BaconShop_HERO_33':
return 'TB_BaconShop_HP_033';
case 'TB_BaconShop_HERO_34':
return 'TB_BaconShop_HP_035';
case 'TB_BaconShop_HERO_35':
return 'TB_BaconShop_HP_039';
case 'TB_BaconShop_HERO_36':
return 'TB_BaconShop_HP_042';
case 'TB_BaconShop_HERO_37':
return 'TB_BaconShop_HP_036';
case 'TB_BaconShop_HERO_38':
return 'TB_BaconShop_HP_038';
case 'TB_BaconShop_HERO_39':
return 'TB_BaconShop_HP_040';
case 'TB_BaconShop_HERO_40':
return 'TB_BaconShop_HP_057';
case 'TB_BaconShop_HERO_41':
return 'TB_BaconShop_HP_046';
case 'TB_BaconShop_HERO_42':
return 'TB_BaconShop_HP_047';
case 'TB_BaconShop_HERO_43':
return 'TB_BaconShop_HP_048';
// case 'TB_BaconShop_HERO_44':
// return 'TB_BaconShop_HP_050';
case 'TB_BaconShop_HERO_45':
return 'TB_BaconShop_HP_053';
case 'TB_BaconShop_HERO_47':
return 'TB_BaconShop_HP_051';
case 'TB_BaconShop_HERO_49':
return 'TB_BaconShop_HP_054';
case 'TB_BaconShop_HERO_50':
return 'TB_BaconShop_HP_077';
case 'TB_BaconShop_HERO_52':
return 'TB_BaconShop_HP_061';
case 'TB_BaconShop_HERO_53':
return 'TB_BaconShop_HP_062';
case 'TB_BaconShop_HERO_55':
return 'TB_BaconShop_HP_056';
case 'TB_BaconShop_HERO_56':
return 'TB_BaconShop_HP_064';
case 'TB_BaconShop_HERO_57':
return 'TB_BaconShop_HP_063';
case 'TB_BaconShop_HERO_58':
return 'TB_BaconShop_HP_052';
case 'TB_BaconShop_HERO_59t':
return 'TB_BaconShop_HP_065t2';
case 'TB_BaconShop_HERO_59':
return 'TB_BaconShop_HP_065';
case 'TB_BaconShop_HERO_60':
return 'TB_BaconShop_HP_066';
case 'TB_BaconShop_HERO_61':
return 'TB_BaconShop_HP_067';
case 'TB_BaconShop_HERO_62':
return 'TB_BaconShop_HP_068';
case 'TB_BaconShop_HERO_64':
return 'TB_BaconShop_HP_074';
case CardIds.CaptainHooktusk_TB_BaconShop_HERO_67:
return CardIds.TrashForTreasure;
case 'TB_BaconShop_HERO_68':
return 'TB_BaconShop_HP_076';
case 'TB_BaconShop_HERO_70':
return 'TB_BaconShop_HP_080';
case 'TB_BaconShop_HERO_71':
return 'TB_BaconShop_HP_084';
case 'TB_BaconShop_HERO_72':
return 'TB_BaconShop_HP_081';
case 'TB_BaconShop_HERO_74':
return 'TB_BaconShop_HP_082';
case CardIds.Chenvaala_TB_BaconShop_HERO_78:
return CardIds.Avalanche_TB_BaconShop_HP_088;
case CardIds.Rakanishu_TB_BaconShop_HERO_75:
return CardIds.TavernLighting;
case CardIds.Alakir:
return CardIds.SwattingInsects;
case CardIds.ZephrysTheGreat_TB_BaconShop_HERO_91:
return CardIds.ThreeWishes;
case CardIds.SilasDarkmoon_TB_BaconShop_HERO_90:
return CardIds.ComeOneComeAll;
case CardIds.Cthun_TB_BaconShop_HERO_29:
return CardIds.SaturdayCthuns;
case CardIds.Nzoth:
return CardIds.AvatarOfNzoth;
case CardIds.Yshaarj:
return CardIds.EmbraceYourRage;
case CardIds.Tickatus_TB_BaconShop_HERO_94:
return CardIds.PrizeWall;
case CardIds.Greybough_TB_BaconShop_HERO_95:
return CardIds.SproutItOut;
case CardIds.OverlordSaurfang_BG20_HERO_102:
return CardIds.OverlordSaurfang_ForTheHorde;
case CardIds.DeathSpeakerBlackthorn_BG20_HERO_103:
return CardIds.DeathSpeakerBlackthorn_Bloodbound;
case CardIds.Voljin_BG20_HERO_201:
return CardIds.Voljin_SpiritSwap_BG20_HERO_201p;
case CardIds.Xyrella_BG20_HERO_101:
return CardIds.Xyrella_SeeTheLight;
case CardIds.MutanusTheDevourer_BG20_HERO_301:
return CardIds.MutanusTheDevourer_Devour;
case CardIds.GuffRunetotem_BG20_HERO_242:
return CardIds.GuffRunetotem_NaturalBalance;
case CardIds.KurtrusAshfallen_BG20_HERO_280:
return CardIds.KurtrusAshfallen_FinalShowdown;
case CardIds.Galewing:
return CardIds.Galewing_DungarsGryphon;
case CardIds.TradePrinceGallywix_TB_BaconShop_HERO_10:
return CardIds.SmartSavings;
case CardIds.MasterNguyen:
return CardIds.MasterNguyen_PowerOfTheStorm;
case CardIds.CarielRoame_BG21_HERO_000:
return CardIds.CarielRoame_Conviction;
case CardIds.Sneed_BG21_HERO_030:
return CardIds.Sneed_SneedsReplicator;
case CardIds.CookieTheCook_BG21_HERO_020:
return CardIds.CookieTheCook_StirThePot;
case CardIds.TamsinRoame_BG20_HERO_282:
return CardIds.TamsinRoame_FragrantPhylactery;
case CardIds.ScabbsCutterbutter_BG21_HERO_010:
return CardIds.ScabbsCutterbutter_ISpy;
case CardIds.Brukan_BG22_HERO_001:
return CardIds.Brukan_EmbraceTheElements;
case CardIds.Drekthar_BG22_HERO_002:
return CardIds.Drekthar_LeadTheFrostwolves;
case CardIds.VanndarStormpike_BG22_HERO_003:
return CardIds.VanndarStormpike_LeadTheStormpikes;
case CardIds.TavishStormpike_BG22_HERO_000:
return CardIds.TavishStormpike_Deadeye;
case CardIds.VardenDawngrasp_BG22_HERO_004:
return CardIds.VardenDawngrasp_TwiceAsNice;
case CardIds.Rokara_BG20_HERO_100:
return CardIds.Rokara_GloryOfCombat;
case CardIds.Onyxia_BG22_HERO_305:
return CardIds.Onyxia_Broodmother;
case CardIds.AmbassadorFaelin_BG22_HERO_201:
return CardIds.AmbassadorFaelin_ExpeditionPlans;
case CardIds.IniStormcoil_BG22_HERO_200:
return CardIds.IniStormcoil_Mechgyver;
case CardIds.QueenAzshara_BG22_HERO_007:
return CardIds.QueenAzshara_AzsharasAmbition;
case CardIds.QueenAzshara_NagaQueenAzsharaToken:
return CardIds.QueenAzshara_NagaConquest;
case CardIds.Ozumat_BG23_HERO_201:
return CardIds.Ozumat_Tentacular;
case CardIds.LadyVashj_BG23_HERO_304:
return CardIds.LadyVashj_RelicsOfTheDeep;
case CardIds.HeistbaronTogwaggle_BG23_HERO_305:
return CardIds.HeistbaronTogwaggle_ThePerfectCrime;
case CardIds.MurlocHolmes_BG23_HERO_303:
return CardIds.MurlocHolmes_DetectiveForHire;
case CardIds.SireDenathrius_BG24_HERO_100:
return CardIds.SireDenathrius_Whodunitquestion;
case CardIds.SylvanasWindrunner_BG23_HERO_306:
return CardIds.SylvanasWindrunner_ReclaimedSouls;
case CardIds.TheJailer_TB_BaconShop_HERO_702:
return CardIds.RunicEmpowerment;
case CardIds.EnhanceOMechano_BG24_HERO_204:
return CardIds.EnhanceOMechano_Enhancification;
case CardIds.ProfessorPutricide_BG25_HERO_100:
return CardIds.ProfessorPutricide_BuildAnUndead;
case CardIds.TeronGorefiend_BG25_HERO_103:
return CardIds.TeronGorefiend_RapidReanimation;
case CardIds.ETCBandManager_BG25_HERO_105:
return CardIds.ETCBandManager_SignANewArtist;
case CardIds.RockMasterVoone_BG26_HERO_104:
return CardIds.RockMasterVoone_UpbeatHarmony;
case CardIds.IngeTheIronHymn:
return CardIds.IngeTheIronHymn_MajorHymn;
case CardIds.CapnHoggarr_BG26_HERO_101:
return CardIds.CapnHoggarr_ImTheCapnNow;
case CardIds.Diablo:
return CardIds.Diablo_RealmOfTerror;
case '':
return null; // new heroes
}
}; | export const NON_DISCOVERABLE_BUDDIES = [ | random_line_split |
make_final_plot.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
import matplotlib
from biaxread import *
def load_event_properties(experiment):
|
def load_blacklist(experiment):
"""
Load event numbers from the blacklist file for each experiment and
return them as an array
"""
blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)
return blacklist
def load_events(experiment):
"""
Loads all events from a given experiment that are not on the blacklist
file for that experiment. Returns array of event properties.
"""
event_properties = load_event_properties(experiment)
blacklist = load_blacklist(experiment)
return np.delete(event_properties,blacklist,axis=0)
def filter(data,col,low,high):
"""
Take array, filter out rows in which the element in the given column
is not in the range low-high (inclusive)
"""
inds = np.where(data[:,col]>=low)
data_trim = data[inds]
inds = np.where(data_trim[:,col]<=high)
data_trim = data_trim[inds]
return data_trim
def get_kc(disp):
slope = (7.e-4-2.6e-6)/(16.-6.)
if disp >= 16:
return 7e-4
else:
return slope*disp - 0.0004
def get_aminusb(experiment,steps):
data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_data.txt'%(experiment,experiment))
picks = np.loadtxt('%s_picks.txt'%experiment,delimiter=',')
V0,V1,row= np.loadtxt('%s_step_rows.csv'%experiment,delimiter=',',skiprows=1,unpack=True)
dv = V1-V0
friction = picks[:,1].reshape((steps,2))
temp = picks[:,0].reshape((steps,2))
disp = temp[:,0]/1000
d_mu = friction[:,1]-friction[:,0]
amb = d_mu/np.log(V1/V0)
res = np.array([disp,amb,dv])
return np.transpose(res)
def bin_steps(steps,bin_width):
min_disp = np.min(steps[:,0])
max_disp = np.max(steps[:,0])
print "min, max", min_disp,max_disp
print np.shape(steps)
exclude_dv = [-7]
for dv in exclude_dv:
steps = steps[steps[:,2]!=dv]
disp_means = []
amb_means = []
for i in range(int(max_disp/bin_width)+1):
bin_bottom = i * bin_width
bin_top = i * bin_width + bin_width
print "Bin bot,top", bin_bottom, bin_top
#print steps[:,0] > bin_bottom
#print steps[:,0] < bin_top
bin_steps = steps[(steps[:,0] > bin_bottom)]
bin_steps = bin_steps[(bin_steps[:,0] < bin_top)]
print "Steps:", np.shape(bin_steps)
if len(bin_steps)!= 0:
disp_means.append(np.mean(bin_steps[:,0]))
amb_means.append(np.mean(bin_steps[:,1]))
#amb_means.append(np.median(bin_steps[:,1]))
print bin_steps[:,2]
return disp_means,amb_means
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Tuple of experiments we'll consider for plotting even data from
# Removed p4342 due to data quality issues 2/16/15
experiments_with_event_data = ('p4343','p4344','p4345','p4346',
'p4347','p4348','p4350','p4351')
# Tuple of experiments we'll plot unload/reload stiffness from
experiments_with_unload_reload = ('p4267','p4268','p4269','p4270','p4271',
'p4272','p4273','p4309','p4310','p4311',
'p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330','p4338',
'p4339')
# Read those experiments into a dictionary of event data
experiment_event_data = dict()
for experiment in experiments_with_event_data:
experiment_event_data[experiment] = load_events(experiment)
# Make the plot
# Setup figure and axes
# Generally plots is ~1.33x width to height (10,7.5 or 12,9)
fig = plt.figure(figsize=(12,13))
#ax1 = plt.subplot(311)
#ax2 = plt.subplot(312)
#ax3 = plt.subplot(313)
ax1 = plt.subplot2grid((5,1), (0,0), rowspan=1)
ax2 = plt.subplot2grid((5,1), (1,0), rowspan=2)
ax3 = plt.subplot2grid((5,1), (3,0), rowspan=2)
ax1.set_position([0.125,0.745,0.775,0.2])
ax3.set_position([0.125,0.1,0.775,0.28])
#
# Plot A top (a-b)
#
p4309_a,p4309_b,p4309_Dc,p4309_amb,step_row = np.loadtxt('p4309_ruina_fits.csv',usecols=[0,2,4,6,9],delimiter=',',skiprows=1,unpack=True)
p4309_data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_data.txt')
step_row = step_row.astype(int)
step_disp = p4309_data['LP_Disp'][step_row]
p4309_step_disp = step_disp/1000.
ax1.set_ylabel(r'(a-b)',fontsize=16)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax1.text(-0.1,0.9,'A',transform = ax1.transAxes,fontsize=24)
ax1.set_xticklabels([])
ax1.get_yaxis().set_ticks([-0.004,-0.002,0.,0.002,0.004])
ax1.scatter(p4309_step_disp,p4309_amb,color='k',
s=70,marker='.',label='p4309')
ax1.axhline(y=0,color='k',linewidth='2',linestyle='--')
# Label velocity regions
ax1.text(35,0.001,'Velocity Strengthening',fontsize=12)
ax1.text(35,-0.003,'Velocity Weakening',fontsize=12)
ax1.set_xlim(0, 55)
ax1.set_ylim(-0.005 ,0.004)
#ax1.text(48,0.003,'p4309',fontsize=12)
p4381_steps = get_aminusb('p4381',83)
p4382_steps = get_aminusb('p4382',84)
p4381_d,p4381_amb = bin_steps(p4381_steps,5)
p4382_d,p4382_amb = bin_steps(p4382_steps,5)
ax1.scatter(p4381_d,p4381_amb,color='k',marker='v',s=70,label='P4381')
ax1.scatter(p4382_d,p4382_amb,color='k',marker='*',s=70,label='P4382')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, scatterpoints=1, frameon=False)
#
# Plot A
#
exps = ['p4267','p4268','p4269','p4270','p4271','p4272','p4273',
'p4309','p4310','p4311','p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330']
# Set labels and tick sizes
#ax2.set_xlabel(r'Average LP Displacement [mm]',fontsize=18)
ax2.set_ylabel(r"""Stiffness, $k$' [1/$\mu$m]x1000""",fontsize=18)
ax2.tick_params(axis='both', which='major', labelsize=16)
ax2.get_yaxis().set_ticks([0,0.5,1,1.5,2,2.5,3,3.5])
ax2.text(-0.1,0.9,'B',transform = ax2.transAxes,fontsize=24)
# Turns off chart clutter
# Turn off top and right tick marks
#ax2.get_xaxis().tick_bottom()
#ax2.get_yaxis().tick_left()
# Turn off top and right splines
#ax2.spines["top"].set_visible(False)
#ax2.spines["right"].set_visible(False)
# Plotting
for exp in experiments_with_unload_reload:
df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(exp,exp))
temp = df[df['Behavior']=='stable']
ax2.scatter(temp['AvgDisp']/1000.,temp['Slope']*1000,color='k',s=50,alpha=0.6,zorder=50,edgecolor='k')
#temp = df[df['Behavior']=='slow']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
#temp = df[df['Behavior']=='fast']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
# Add rectangle for where figure B comes from
# rect_x1 = 10.
# rect_x2 = 50.
# rect_y1 = 0.
# rect_y2 = 0.0009*1000
# rect_width = rect_x2-rect_x1
# rect_height = rect_y2-rect_y1
# ax2.add_patch(Rectangle((rect_x1,rect_y1),rect_width,rect_height,alpha=0.2, zorder=0,facecolor="k"))
# Set limits
ax2.set_xlim(0,52)
ax2.set_ylim(0,0.004*1000)
low_color = 10./1000.
high_color = 4600./1000.
marker_size = 40
marker_alpha=0.7
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
sc = ax2.scatter(event_data[:,9]/1000.,event_data[:,5]*1000,s=40,alpha=marker_alpha,color='r',edgecolor='r')
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# Plot line for kc definition
ax2.plot([6,16,52],[2.6e-6*1000,7e-4*1000,7e-4*1000],color='k',linewidth=4)
# Add text
ax2.text(35,0.95,'Stable',fontsize=22)
ax2.text(35,0.15,'Unstable',fontsize=22,color='r')
ax2.text(46,0.88,r'$k_c$',fontsize=26,color='k')
# # Plot Kc
# df = pd.read_excel('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_rsf_fits.xlsx')
#
#
#
#
# for i,fit in df.iterrows():
#
# if fit['Grade'] == 'A':
# #color='#000066'
# #color='#FFFFFF'
# color='#0000FF'
# elif fit['Grade'] == 'B':
# color='#0066CC'
# color='#0000FF'
# #color='#FFFFFF'
# elif fit['Grade'] == 'C':
# #color='#00CCFF'
# color='#FFFFFF'
# continue
# elif fit['Grade'] == 'D':
# #color='#00FFFF'
# color='#FFFFFF'
# continue
#
# if fit['Type']=='Down' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='v',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
#
# elif fit['Type']=='Up' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='^',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
# else:
# pass
#
# Plot B
#
# Set labels and tick sizes
ax3.set_xlabel(r'Load Point Displacement [mm]',fontsize=18,labelpad=15)
ax3.set_ylabel(r'$\kappa = k/k_c$',fontsize=25)
ax3.tick_params(axis='both', which='major', labelsize=16)
# Turns off chart clutter
# Turn off top and right tick marks
#ax3.get_xaxis().tick_bottom()
#ax3.get_yaxis().tick_left()
ax3.get_yaxis().set_ticks([0,0.2,0.4,0.6,0.8,1.0,1.2])
# Turn off top and right splines
#ax3.spines["top"].set_visible(False)
#ax3.spines["right"].set_visible(False)
# Plotting
# Make panel A of displacement/stiffness
ax3.text(-0.1,0.9,'C',transform = ax3.transAxes,fontsize=24)
low_color = 10./1000.
low_color = 0
high_color = 4000./1000.
cmap = plt.get_cmap('rainbow_r')
start=0.15
stop = 1.
colors = cmap(np.linspace(start, stop, cmap.N))
# Create a new colormap from those colors
color_map = LinearSegmentedColormap.from_list('Upper Half', colors)
marker_size = 40
marker_alpha=0.5
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
k_kc_ratio = []
for k,disp in zip(event_data[:,5],event_data[:,9]/1000.):
k_kc_ratio.append(k/get_kc(disp))
sc = ax3.scatter(event_data[:,9]/1000.,k_kc_ratio,c=event_data[:,color_col]/1000.,s=marker_size,alpha=marker_alpha,vmin=low_color,vmax=high_color,cmap=color_map)
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# plt.colorbar(sc,cax=cbar_ax)
# for experiment in experiments_with_unload_reload:
# df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(experiment,experiment))
#
# ax3.scatter(df['AvgDisp']/1000.,df['Slope'],color='g',s=50,alpha=0.6)
position=fig.add_axes([0.37,0.16,0.5,0.02]) ## the parameters are the specified position you set [left, bottom, width, height]
cb = fig.colorbar(sc,cax=position,orientation='horizontal', drawedges=False)
cb.solids.set_edgecolor("face")
cb.set_label(r'Peak Slip Velocity [$mm/s$]',fontsize=14)
cb.set_alpha(1)
cb.draw_all()
#position.set_xlim(0,4)
ax3.set_ylim(0,1.4)
ax3.set_xlim(16,50)
ax3.axvspan(40, 50, alpha=0.2, color='k', zorder=0)
# Add call out lines between plots
transFigure = fig.transFigure.inverted()
### LEFT
coord1 = transFigure.transform(ax2.transData.transform([16,0]))
coord2 = transFigure.transform(ax2.transData.transform([16,-0.3]))
line1 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([16,1.4]))
line2 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
### RIGHT
coord1 = transFigure.transform(ax2.transData.transform([50,0]))
coord2 = transFigure.transform(ax2.transData.transform([50,-0.3]))
line3 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([50,1.4]))
line4 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
fig.lines = line1,line2,line3,line4
# coord1 = transFigure.transform(ax2.transData.transform([16,0]))
# coord2 = transFigure.transform(ax3.transData.transform([16,1.4]))
#
# line = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
# transform=fig.transFigure,color='k')
# fig.lines = line,
plt.savefig('figure.png', bbox_inches="tight");
| """
Load event property file picks for a given experiment number and return
that data as an array
"""
return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1) | identifier_body |
make_final_plot.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
import matplotlib
from biaxread import *
def load_event_properties(experiment):
"""
Load event property file picks for a given experiment number and return
that data as an array
"""
return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)
def load_blacklist(experiment):
"""
Load event numbers from the blacklist file for each experiment and
return them as an array
"""
blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)
return blacklist
def load_events(experiment):
"""
Loads all events from a given experiment that are not on the blacklist
file for that experiment. Returns array of event properties.
"""
event_properties = load_event_properties(experiment)
blacklist = load_blacklist(experiment)
return np.delete(event_properties,blacklist,axis=0)
def filter(data,col,low,high):
"""
Take array, filter out rows in which the element in the given column
is not in the range low-high (inclusive)
"""
inds = np.where(data[:,col]>=low)
data_trim = data[inds]
inds = np.where(data_trim[:,col]<=high)
data_trim = data_trim[inds]
return data_trim
def get_kc(disp):
slope = (7.e-4-2.6e-6)/(16.-6.)
if disp >= 16:
return 7e-4
else:
return slope*disp - 0.0004
def get_aminusb(experiment,steps):
data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_data.txt'%(experiment,experiment))
picks = np.loadtxt('%s_picks.txt'%experiment,delimiter=',')
V0,V1,row= np.loadtxt('%s_step_rows.csv'%experiment,delimiter=',',skiprows=1,unpack=True)
dv = V1-V0
friction = picks[:,1].reshape((steps,2))
temp = picks[:,0].reshape((steps,2))
disp = temp[:,0]/1000
d_mu = friction[:,1]-friction[:,0]
amb = d_mu/np.log(V1/V0)
res = np.array([disp,amb,dv])
return np.transpose(res)
def bin_steps(steps,bin_width):
min_disp = np.min(steps[:,0])
max_disp = np.max(steps[:,0])
print "min, max", min_disp,max_disp
print np.shape(steps)
exclude_dv = [-7]
for dv in exclude_dv:
steps = steps[steps[:,2]!=dv]
disp_means = []
amb_means = []
for i in range(int(max_disp/bin_width)+1):
bin_bottom = i * bin_width
bin_top = i * bin_width + bin_width
print "Bin bot,top", bin_bottom, bin_top
#print steps[:,0] > bin_bottom
#print steps[:,0] < bin_top
bin_steps = steps[(steps[:,0] > bin_bottom)]
bin_steps = bin_steps[(bin_steps[:,0] < bin_top)]
print "Steps:", np.shape(bin_steps)
if len(bin_steps)!= 0:
disp_means.append(np.mean(bin_steps[:,0]))
amb_means.append(np.mean(bin_steps[:,1]))
#amb_means.append(np.median(bin_steps[:,1]))
print bin_steps[:,2]
return disp_means,amb_means
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Tuple of experiments we'll consider for plotting even data from
# Removed p4342 due to data quality issues 2/16/15
experiments_with_event_data = ('p4343','p4344','p4345','p4346',
'p4347','p4348','p4350','p4351')
# Tuple of experiments we'll plot unload/reload stiffness from
experiments_with_unload_reload = ('p4267','p4268','p4269','p4270','p4271',
'p4272','p4273','p4309','p4310','p4311',
'p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330','p4338',
'p4339')
# Read those experiments into a dictionary of event data
experiment_event_data = dict()
for experiment in experiments_with_event_data:
experiment_event_data[experiment] = load_events(experiment)
# Make the plot
# Setup figure and axes
# Generally plots is ~1.33x width to height (10,7.5 or 12,9)
fig = plt.figure(figsize=(12,13))
#ax1 = plt.subplot(311)
#ax2 = plt.subplot(312)
#ax3 = plt.subplot(313)
ax1 = plt.subplot2grid((5,1), (0,0), rowspan=1)
ax2 = plt.subplot2grid((5,1), (1,0), rowspan=2)
ax3 = plt.subplot2grid((5,1), (3,0), rowspan=2)
ax1.set_position([0.125,0.745,0.775,0.2])
ax3.set_position([0.125,0.1,0.775,0.28])
#
# Plot A top (a-b)
#
p4309_a,p4309_b,p4309_Dc,p4309_amb,step_row = np.loadtxt('p4309_ruina_fits.csv',usecols=[0,2,4,6,9],delimiter=',',skiprows=1,unpack=True)
p4309_data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_data.txt')
step_row = step_row.astype(int)
step_disp = p4309_data['LP_Disp'][step_row]
p4309_step_disp = step_disp/1000.
ax1.set_ylabel(r'(a-b)',fontsize=16)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax1.text(-0.1,0.9,'A',transform = ax1.transAxes,fontsize=24)
ax1.set_xticklabels([])
ax1.get_yaxis().set_ticks([-0.004,-0.002,0.,0.002,0.004])
ax1.scatter(p4309_step_disp,p4309_amb,color='k',
s=70,marker='.',label='p4309')
ax1.axhline(y=0,color='k',linewidth='2',linestyle='--')
# Label velocity regions
ax1.text(35,0.001,'Velocity Strengthening',fontsize=12)
ax1.text(35,-0.003,'Velocity Weakening',fontsize=12)
ax1.set_xlim(0, 55)
ax1.set_ylim(-0.005 ,0.004)
#ax1.text(48,0.003,'p4309',fontsize=12)
p4381_steps = get_aminusb('p4381',83)
p4382_steps = get_aminusb('p4382',84)
p4381_d,p4381_amb = bin_steps(p4381_steps,5)
p4382_d,p4382_amb = bin_steps(p4382_steps,5)
ax1.scatter(p4381_d,p4381_amb,color='k',marker='v',s=70,label='P4381')
ax1.scatter(p4382_d,p4382_amb,color='k',marker='*',s=70,label='P4382')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, scatterpoints=1, frameon=False)
#
# Plot A
#
exps = ['p4267','p4268','p4269','p4270','p4271','p4272','p4273',
'p4309','p4310','p4311','p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330']
# Set labels and tick sizes
#ax2.set_xlabel(r'Average LP Displacement [mm]',fontsize=18)
ax2.set_ylabel(r"""Stiffness, $k$' [1/$\mu$m]x1000""",fontsize=18)
ax2.tick_params(axis='both', which='major', labelsize=16)
ax2.get_yaxis().set_ticks([0,0.5,1,1.5,2,2.5,3,3.5])
ax2.text(-0.1,0.9,'B',transform = ax2.transAxes,fontsize=24)
# Turns off chart clutter
# Turn off top and right tick marks
#ax2.get_xaxis().tick_bottom()
#ax2.get_yaxis().tick_left()
# Turn off top and right splines
#ax2.spines["top"].set_visible(False)
#ax2.spines["right"].set_visible(False)
# Plotting
for exp in experiments_with_unload_reload:
df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(exp,exp))
temp = df[df['Behavior']=='stable']
ax2.scatter(temp['AvgDisp']/1000.,temp['Slope']*1000,color='k',s=50,alpha=0.6,zorder=50,edgecolor='k')
#temp = df[df['Behavior']=='slow']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
#temp = df[df['Behavior']=='fast']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
# Add rectangle for where figure B comes from
# rect_x1 = 10.
# rect_x2 = 50.
# rect_y1 = 0.
# rect_y2 = 0.0009*1000
# rect_width = rect_x2-rect_x1
# rect_height = rect_y2-rect_y1
# ax2.add_patch(Rectangle((rect_x1,rect_y1),rect_width,rect_height,alpha=0.2, zorder=0,facecolor="k"))
# Set limits
ax2.set_xlim(0,52)
ax2.set_ylim(0,0.004*1000)
low_color = 10./1000.
high_color = 4600./1000.
marker_size = 40
marker_alpha=0.7
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
sc = ax2.scatter(event_data[:,9]/1000.,event_data[:,5]*1000,s=40,alpha=marker_alpha,color='r',edgecolor='r')
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# Plot line for kc definition
ax2.plot([6,16,52],[2.6e-6*1000,7e-4*1000,7e-4*1000],color='k',linewidth=4)
# Add text
ax2.text(35,0.95,'Stable',fontsize=22)
ax2.text(35,0.15,'Unstable',fontsize=22,color='r')
ax2.text(46,0.88,r'$k_c$',fontsize=26,color='k')
# # Plot Kc
# df = pd.read_excel('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_rsf_fits.xlsx')
#
#
#
#
# for i,fit in df.iterrows():
#
# if fit['Grade'] == 'A':
# #color='#000066'
# #color='#FFFFFF'
# color='#0000FF'
# elif fit['Grade'] == 'B':
# color='#0066CC'
# color='#0000FF'
# #color='#FFFFFF'
# elif fit['Grade'] == 'C':
# #color='#00CCFF'
# color='#FFFFFF'
# continue
# elif fit['Grade'] == 'D':
# #color='#00FFFF'
# color='#FFFFFF'
# continue
#
# if fit['Type']=='Down' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='v',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
#
# elif fit['Type']=='Up' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='^',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
# else:
# pass
#
# Plot B
#
# Set labels and tick sizes
ax3.set_xlabel(r'Load Point Displacement [mm]',fontsize=18,labelpad=15)
ax3.set_ylabel(r'$\kappa = k/k_c$',fontsize=25)
ax3.tick_params(axis='both', which='major', labelsize=16)
# Turns off chart clutter
# Turn off top and right tick marks
#ax3.get_xaxis().tick_bottom()
#ax3.get_yaxis().tick_left()
ax3.get_yaxis().set_ticks([0,0.2,0.4,0.6,0.8,1.0,1.2])
# Turn off top and right splines
#ax3.spines["top"].set_visible(False)
#ax3.spines["right"].set_visible(False)
# Plotting
# Make panel A of displacement/stiffness
ax3.text(-0.1,0.9,'C',transform = ax3.transAxes,fontsize=24)
low_color = 10./1000.
low_color = 0
high_color = 4000./1000.
cmap = plt.get_cmap('rainbow_r')
start=0.15
stop = 1.
colors = cmap(np.linspace(start, stop, cmap.N))
# Create a new colormap from those colors
color_map = LinearSegmentedColormap.from_list('Upper Half', colors)
marker_size = 40
marker_alpha=0.5
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
k_kc_ratio = []
for k,disp in zip(event_data[:,5],event_data[:,9]/1000.):
k_kc_ratio.append(k/get_kc(disp))
sc = ax3.scatter(event_data[:,9]/1000.,k_kc_ratio,c=event_data[:,color_col]/1000.,s=marker_size,alpha=marker_alpha,vmin=low_color,vmax=high_color,cmap=color_map)
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# plt.colorbar(sc,cax=cbar_ax)
# for experiment in experiments_with_unload_reload:
# df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(experiment,experiment))
#
# ax3.scatter(df['AvgDisp']/1000.,df['Slope'],color='g',s=50,alpha=0.6)
position=fig.add_axes([0.37,0.16,0.5,0.02]) ## the parameters are the specified position you set [left, bottom, width, height]
cb = fig.colorbar(sc,cax=position,orientation='horizontal', drawedges=False)
cb.solids.set_edgecolor("face")
cb.set_label(r'Peak Slip Velocity [$mm/s$]',fontsize=14)
cb.set_alpha(1)
cb.draw_all()
#position.set_xlim(0,4)
ax3.set_ylim(0,1.4)
ax3.set_xlim(16,50)
ax3.axvspan(40, 50, alpha=0.2, color='k', zorder=0)
# Add call out lines between plots
transFigure = fig.transFigure.inverted()
### LEFT
coord1 = transFigure.transform(ax2.transData.transform([16,0]))
coord2 = transFigure.transform(ax2.transData.transform([16,-0.3]))
line1 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([16,1.4]))
line2 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
### RIGHT | coord1 = transFigure.transform(ax2.transData.transform([50,0]))
coord2 = transFigure.transform(ax2.transData.transform([50,-0.3]))
line3 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([50,1.4]))
line4 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
fig.lines = line1,line2,line3,line4
# coord1 = transFigure.transform(ax2.transData.transform([16,0]))
# coord2 = transFigure.transform(ax3.transData.transform([16,1.4]))
#
# line = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
# transform=fig.transFigure,color='k')
# fig.lines = line,
plt.savefig('figure.png', bbox_inches="tight"); | random_line_split | |
make_final_plot.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
import matplotlib
from biaxread import *
def | (experiment):
"""
Load event property file picks for a given experiment number and return
that data as an array
"""
return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)
def load_blacklist(experiment):
"""
Load event numbers from the blacklist file for each experiment and
return them as an array
"""
blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)
return blacklist
def load_events(experiment):
"""
Loads all events from a given experiment that are not on the blacklist
file for that experiment. Returns array of event properties.
"""
event_properties = load_event_properties(experiment)
blacklist = load_blacklist(experiment)
return np.delete(event_properties,blacklist,axis=0)
def filter(data,col,low,high):
"""
Take array, filter out rows in which the element in the given column
is not in the range low-high (inclusive)
"""
inds = np.where(data[:,col]>=low)
data_trim = data[inds]
inds = np.where(data_trim[:,col]<=high)
data_trim = data_trim[inds]
return data_trim
def get_kc(disp):
slope = (7.e-4-2.6e-6)/(16.-6.)
if disp >= 16:
return 7e-4
else:
return slope*disp - 0.0004
def get_aminusb(experiment,steps):
data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_data.txt'%(experiment,experiment))
picks = np.loadtxt('%s_picks.txt'%experiment,delimiter=',')
V0,V1,row= np.loadtxt('%s_step_rows.csv'%experiment,delimiter=',',skiprows=1,unpack=True)
dv = V1-V0
friction = picks[:,1].reshape((steps,2))
temp = picks[:,0].reshape((steps,2))
disp = temp[:,0]/1000
d_mu = friction[:,1]-friction[:,0]
amb = d_mu/np.log(V1/V0)
res = np.array([disp,amb,dv])
return np.transpose(res)
def bin_steps(steps,bin_width):
min_disp = np.min(steps[:,0])
max_disp = np.max(steps[:,0])
print "min, max", min_disp,max_disp
print np.shape(steps)
exclude_dv = [-7]
for dv in exclude_dv:
steps = steps[steps[:,2]!=dv]
disp_means = []
amb_means = []
for i in range(int(max_disp/bin_width)+1):
bin_bottom = i * bin_width
bin_top = i * bin_width + bin_width
print "Bin bot,top", bin_bottom, bin_top
#print steps[:,0] > bin_bottom
#print steps[:,0] < bin_top
bin_steps = steps[(steps[:,0] > bin_bottom)]
bin_steps = bin_steps[(bin_steps[:,0] < bin_top)]
print "Steps:", np.shape(bin_steps)
if len(bin_steps)!= 0:
disp_means.append(np.mean(bin_steps[:,0]))
amb_means.append(np.mean(bin_steps[:,1]))
#amb_means.append(np.median(bin_steps[:,1]))
print bin_steps[:,2]
return disp_means,amb_means
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Tuple of experiments we'll consider for plotting even data from
# Removed p4342 due to data quality issues 2/16/15
experiments_with_event_data = ('p4343','p4344','p4345','p4346',
'p4347','p4348','p4350','p4351')
# Tuple of experiments we'll plot unload/reload stiffness from
experiments_with_unload_reload = ('p4267','p4268','p4269','p4270','p4271',
'p4272','p4273','p4309','p4310','p4311',
'p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330','p4338',
'p4339')
# Read those experiments into a dictionary of event data
experiment_event_data = dict()
for experiment in experiments_with_event_data:
experiment_event_data[experiment] = load_events(experiment)
# Make the plot
# Setup figure and axes
# Generally plots is ~1.33x width to height (10,7.5 or 12,9)
fig = plt.figure(figsize=(12,13))
#ax1 = plt.subplot(311)
#ax2 = plt.subplot(312)
#ax3 = plt.subplot(313)
ax1 = plt.subplot2grid((5,1), (0,0), rowspan=1)
ax2 = plt.subplot2grid((5,1), (1,0), rowspan=2)
ax3 = plt.subplot2grid((5,1), (3,0), rowspan=2)
ax1.set_position([0.125,0.745,0.775,0.2])
ax3.set_position([0.125,0.1,0.775,0.28])
#
# Plot A top (a-b)
#
p4309_a,p4309_b,p4309_Dc,p4309_amb,step_row = np.loadtxt('p4309_ruina_fits.csv',usecols=[0,2,4,6,9],delimiter=',',skiprows=1,unpack=True)
p4309_data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_data.txt')
step_row = step_row.astype(int)
step_disp = p4309_data['LP_Disp'][step_row]
p4309_step_disp = step_disp/1000.
ax1.set_ylabel(r'(a-b)',fontsize=16)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax1.text(-0.1,0.9,'A',transform = ax1.transAxes,fontsize=24)
ax1.set_xticklabels([])
ax1.get_yaxis().set_ticks([-0.004,-0.002,0.,0.002,0.004])
ax1.scatter(p4309_step_disp,p4309_amb,color='k',
s=70,marker='.',label='p4309')
ax1.axhline(y=0,color='k',linewidth='2',linestyle='--')
# Label velocity regions
ax1.text(35,0.001,'Velocity Strengthening',fontsize=12)
ax1.text(35,-0.003,'Velocity Weakening',fontsize=12)
ax1.set_xlim(0, 55)
ax1.set_ylim(-0.005 ,0.004)
#ax1.text(48,0.003,'p4309',fontsize=12)
p4381_steps = get_aminusb('p4381',83)
p4382_steps = get_aminusb('p4382',84)
p4381_d,p4381_amb = bin_steps(p4381_steps,5)
p4382_d,p4382_amb = bin_steps(p4382_steps,5)
ax1.scatter(p4381_d,p4381_amb,color='k',marker='v',s=70,label='P4381')
ax1.scatter(p4382_d,p4382_amb,color='k',marker='*',s=70,label='P4382')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, scatterpoints=1, frameon=False)
#
# Plot A
#
exps = ['p4267','p4268','p4269','p4270','p4271','p4272','p4273',
'p4309','p4310','p4311','p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330']
# Set labels and tick sizes
#ax2.set_xlabel(r'Average LP Displacement [mm]',fontsize=18)
ax2.set_ylabel(r"""Stiffness, $k$' [1/$\mu$m]x1000""",fontsize=18)
ax2.tick_params(axis='both', which='major', labelsize=16)
ax2.get_yaxis().set_ticks([0,0.5,1,1.5,2,2.5,3,3.5])
ax2.text(-0.1,0.9,'B',transform = ax2.transAxes,fontsize=24)
# Turns off chart clutter
# Turn off top and right tick marks
#ax2.get_xaxis().tick_bottom()
#ax2.get_yaxis().tick_left()
# Turn off top and right splines
#ax2.spines["top"].set_visible(False)
#ax2.spines["right"].set_visible(False)
# Plotting
for exp in experiments_with_unload_reload:
df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(exp,exp))
temp = df[df['Behavior']=='stable']
ax2.scatter(temp['AvgDisp']/1000.,temp['Slope']*1000,color='k',s=50,alpha=0.6,zorder=50,edgecolor='k')
#temp = df[df['Behavior']=='slow']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
#temp = df[df['Behavior']=='fast']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
# Add rectangle for where figure B comes from
# rect_x1 = 10.
# rect_x2 = 50.
# rect_y1 = 0.
# rect_y2 = 0.0009*1000
# rect_width = rect_x2-rect_x1
# rect_height = rect_y2-rect_y1
# ax2.add_patch(Rectangle((rect_x1,rect_y1),rect_width,rect_height,alpha=0.2, zorder=0,facecolor="k"))
# Set limits
ax2.set_xlim(0,52)
ax2.set_ylim(0,0.004*1000)
low_color = 10./1000.
high_color = 4600./1000.
marker_size = 40
marker_alpha=0.7
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
sc = ax2.scatter(event_data[:,9]/1000.,event_data[:,5]*1000,s=40,alpha=marker_alpha,color='r',edgecolor='r')
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# Plot line for kc definition
ax2.plot([6,16,52],[2.6e-6*1000,7e-4*1000,7e-4*1000],color='k',linewidth=4)
# Add text
ax2.text(35,0.95,'Stable',fontsize=22)
ax2.text(35,0.15,'Unstable',fontsize=22,color='r')
ax2.text(46,0.88,r'$k_c$',fontsize=26,color='k')
# # Plot Kc
# df = pd.read_excel('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_rsf_fits.xlsx')
#
#
#
#
# for i,fit in df.iterrows():
#
# if fit['Grade'] == 'A':
# #color='#000066'
# #color='#FFFFFF'
# color='#0000FF'
# elif fit['Grade'] == 'B':
# color='#0066CC'
# color='#0000FF'
# #color='#FFFFFF'
# elif fit['Grade'] == 'C':
# #color='#00CCFF'
# color='#FFFFFF'
# continue
# elif fit['Grade'] == 'D':
# #color='#00FFFF'
# color='#FFFFFF'
# continue
#
# if fit['Type']=='Down' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='v',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
#
# elif fit['Type']=='Up' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='^',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
# else:
# pass
#
# Plot B
#
# Set labels and tick sizes
ax3.set_xlabel(r'Load Point Displacement [mm]',fontsize=18,labelpad=15)
ax3.set_ylabel(r'$\kappa = k/k_c$',fontsize=25)
ax3.tick_params(axis='both', which='major', labelsize=16)
# Turns off chart clutter
# Turn off top and right tick marks
#ax3.get_xaxis().tick_bottom()
#ax3.get_yaxis().tick_left()
ax3.get_yaxis().set_ticks([0,0.2,0.4,0.6,0.8,1.0,1.2])
# Turn off top and right splines
#ax3.spines["top"].set_visible(False)
#ax3.spines["right"].set_visible(False)
# Plotting
# Make panel A of displacement/stiffness
ax3.text(-0.1,0.9,'C',transform = ax3.transAxes,fontsize=24)
low_color = 10./1000.
low_color = 0
high_color = 4000./1000.
cmap = plt.get_cmap('rainbow_r')
start=0.15
stop = 1.
colors = cmap(np.linspace(start, stop, cmap.N))
# Create a new colormap from those colors
color_map = LinearSegmentedColormap.from_list('Upper Half', colors)
marker_size = 40
marker_alpha=0.5
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
k_kc_ratio = []
for k,disp in zip(event_data[:,5],event_data[:,9]/1000.):
k_kc_ratio.append(k/get_kc(disp))
sc = ax3.scatter(event_data[:,9]/1000.,k_kc_ratio,c=event_data[:,color_col]/1000.,s=marker_size,alpha=marker_alpha,vmin=low_color,vmax=high_color,cmap=color_map)
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# plt.colorbar(sc,cax=cbar_ax)
# for experiment in experiments_with_unload_reload:
# df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(experiment,experiment))
#
# ax3.scatter(df['AvgDisp']/1000.,df['Slope'],color='g',s=50,alpha=0.6)
position=fig.add_axes([0.37,0.16,0.5,0.02]) ## the parameters are the specified position you set [left, bottom, width, height]
cb = fig.colorbar(sc,cax=position,orientation='horizontal', drawedges=False)
cb.solids.set_edgecolor("face")
cb.set_label(r'Peak Slip Velocity [$mm/s$]',fontsize=14)
cb.set_alpha(1)
cb.draw_all()
#position.set_xlim(0,4)
ax3.set_ylim(0,1.4)
ax3.set_xlim(16,50)
ax3.axvspan(40, 50, alpha=0.2, color='k', zorder=0)
# Add call out lines between plots
transFigure = fig.transFigure.inverted()
### LEFT
coord1 = transFigure.transform(ax2.transData.transform([16,0]))
coord2 = transFigure.transform(ax2.transData.transform([16,-0.3]))
line1 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([16,1.4]))
line2 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
### RIGHT
coord1 = transFigure.transform(ax2.transData.transform([50,0]))
coord2 = transFigure.transform(ax2.transData.transform([50,-0.3]))
line3 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([50,1.4]))
line4 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
fig.lines = line1,line2,line3,line4
# coord1 = transFigure.transform(ax2.transData.transform([16,0]))
# coord2 = transFigure.transform(ax3.transData.transform([16,1.4]))
#
# line = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
# transform=fig.transFigure,color='k')
# fig.lines = line,
plt.savefig('figure.png', bbox_inches="tight");
| load_event_properties | identifier_name |
make_final_plot.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.patches import Rectangle
from matplotlib.colors import LinearSegmentedColormap
import matplotlib
from biaxread import *
def load_event_properties(experiment):
"""
Load event property file picks for a given experiment number and return
that data as an array
"""
return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)
def load_blacklist(experiment):
"""
Load event numbers from the blacklist file for each experiment and
return them as an array
"""
blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)
return blacklist
def load_events(experiment):
"""
Loads all events from a given experiment that are not on the blacklist
file for that experiment. Returns array of event properties.
"""
event_properties = load_event_properties(experiment)
blacklist = load_blacklist(experiment)
return np.delete(event_properties,blacklist,axis=0)
def filter(data,col,low,high):
"""
Take array, filter out rows in which the element in the given column
is not in the range low-high (inclusive)
"""
inds = np.where(data[:,col]>=low)
data_trim = data[inds]
inds = np.where(data_trim[:,col]<=high)
data_trim = data_trim[inds]
return data_trim
def get_kc(disp):
slope = (7.e-4-2.6e-6)/(16.-6.)
if disp >= 16:
return 7e-4
else:
return slope*disp - 0.0004
def get_aminusb(experiment,steps):
data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_data.txt'%(experiment,experiment))
picks = np.loadtxt('%s_picks.txt'%experiment,delimiter=',')
V0,V1,row= np.loadtxt('%s_step_rows.csv'%experiment,delimiter=',',skiprows=1,unpack=True)
dv = V1-V0
friction = picks[:,1].reshape((steps,2))
temp = picks[:,0].reshape((steps,2))
disp = temp[:,0]/1000
d_mu = friction[:,1]-friction[:,0]
amb = d_mu/np.log(V1/V0)
res = np.array([disp,amb,dv])
return np.transpose(res)
def bin_steps(steps,bin_width):
min_disp = np.min(steps[:,0])
max_disp = np.max(steps[:,0])
print "min, max", min_disp,max_disp
print np.shape(steps)
exclude_dv = [-7]
for dv in exclude_dv:
steps = steps[steps[:,2]!=dv]
disp_means = []
amb_means = []
for i in range(int(max_disp/bin_width)+1):
bin_bottom = i * bin_width
bin_top = i * bin_width + bin_width
print "Bin bot,top", bin_bottom, bin_top
#print steps[:,0] > bin_bottom
#print steps[:,0] < bin_top
bin_steps = steps[(steps[:,0] > bin_bottom)]
bin_steps = bin_steps[(bin_steps[:,0] < bin_top)]
print "Steps:", np.shape(bin_steps)
if len(bin_steps)!= 0:
|
print bin_steps[:,2]
return disp_means,amb_means
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Tuple of experiments we'll consider for plotting even data from
# Removed p4342 due to data quality issues 2/16/15
experiments_with_event_data = ('p4343','p4344','p4345','p4346',
'p4347','p4348','p4350','p4351')
# Tuple of experiments we'll plot unload/reload stiffness from
experiments_with_unload_reload = ('p4267','p4268','p4269','p4270','p4271',
'p4272','p4273','p4309','p4310','p4311',
'p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330','p4338',
'p4339')
# Read those experiments into a dictionary of event data
experiment_event_data = dict()
for experiment in experiments_with_event_data:
experiment_event_data[experiment] = load_events(experiment)
# Make the plot
# Setup figure and axes
# Generally plots is ~1.33x width to height (10,7.5 or 12,9)
fig = plt.figure(figsize=(12,13))
#ax1 = plt.subplot(311)
#ax2 = plt.subplot(312)
#ax3 = plt.subplot(313)
ax1 = plt.subplot2grid((5,1), (0,0), rowspan=1)
ax2 = plt.subplot2grid((5,1), (1,0), rowspan=2)
ax3 = plt.subplot2grid((5,1), (3,0), rowspan=2)
ax1.set_position([0.125,0.745,0.775,0.2])
ax3.set_position([0.125,0.1,0.775,0.28])
#
# Plot A top (a-b)
#
p4309_a,p4309_b,p4309_Dc,p4309_amb,step_row = np.loadtxt('p4309_ruina_fits.csv',usecols=[0,2,4,6,9],delimiter=',',skiprows=1,unpack=True)
p4309_data = ReadAscii('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_data.txt')
step_row = step_row.astype(int)
step_disp = p4309_data['LP_Disp'][step_row]
p4309_step_disp = step_disp/1000.
ax1.set_ylabel(r'(a-b)',fontsize=16)
ax1.tick_params(axis='both', which='major', labelsize=14)
ax1.text(-0.1,0.9,'A',transform = ax1.transAxes,fontsize=24)
ax1.set_xticklabels([])
ax1.get_yaxis().set_ticks([-0.004,-0.002,0.,0.002,0.004])
ax1.scatter(p4309_step_disp,p4309_amb,color='k',
s=70,marker='.',label='p4309')
ax1.axhline(y=0,color='k',linewidth='2',linestyle='--')
# Label velocity regions
ax1.text(35,0.001,'Velocity Strengthening',fontsize=12)
ax1.text(35,-0.003,'Velocity Weakening',fontsize=12)
ax1.set_xlim(0, 55)
ax1.set_ylim(-0.005 ,0.004)
#ax1.text(48,0.003,'p4309',fontsize=12)
p4381_steps = get_aminusb('p4381',83)
p4382_steps = get_aminusb('p4382',84)
p4381_d,p4381_amb = bin_steps(p4381_steps,5)
p4382_d,p4382_amb = bin_steps(p4382_steps,5)
ax1.scatter(p4381_d,p4381_amb,color='k',marker='v',s=70,label='P4381')
ax1.scatter(p4382_d,p4382_amb,color='k',marker='*',s=70,label='P4382')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles, labels, scatterpoints=1, frameon=False)
#
# Plot A
#
exps = ['p4267','p4268','p4269','p4270','p4271','p4272','p4273',
'p4309','p4310','p4311','p4312','p4313','p4314','p4316','p4317',
'p4327','p4328','p4329','p4330']
# Set labels and tick sizes
#ax2.set_xlabel(r'Average LP Displacement [mm]',fontsize=18)
ax2.set_ylabel(r"""Stiffness, $k$' [1/$\mu$m]x1000""",fontsize=18)
ax2.tick_params(axis='both', which='major', labelsize=16)
ax2.get_yaxis().set_ticks([0,0.5,1,1.5,2,2.5,3,3.5])
ax2.text(-0.1,0.9,'B',transform = ax2.transAxes,fontsize=24)
# Turns off chart clutter
# Turn off top and right tick marks
#ax2.get_xaxis().tick_bottom()
#ax2.get_yaxis().tick_left()
# Turn off top and right splines
#ax2.spines["top"].set_visible(False)
#ax2.spines["right"].set_visible(False)
# Plotting
for exp in experiments_with_unload_reload:
df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(exp,exp))
temp = df[df['Behavior']=='stable']
ax2.scatter(temp['AvgDisp']/1000.,temp['Slope']*1000,color='k',s=50,alpha=0.6,zorder=50,edgecolor='k')
#temp = df[df['Behavior']=='slow']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
#temp = df[df['Behavior']=='fast']
#ax2.scatter(temp['AvgDisp']/1000.,temp['Slope'],color='r',s=50,alpha=0.6)
# Add rectangle for where figure B comes from
# rect_x1 = 10.
# rect_x2 = 50.
# rect_y1 = 0.
# rect_y2 = 0.0009*1000
# rect_width = rect_x2-rect_x1
# rect_height = rect_y2-rect_y1
# ax2.add_patch(Rectangle((rect_x1,rect_y1),rect_width,rect_height,alpha=0.2, zorder=0,facecolor="k"))
# Set limits
ax2.set_xlim(0,52)
ax2.set_ylim(0,0.004*1000)
low_color = 10./1000.
high_color = 4600./1000.
marker_size = 40
marker_alpha=0.7
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
sc = ax2.scatter(event_data[:,9]/1000.,event_data[:,5]*1000,s=40,alpha=marker_alpha,color='r',edgecolor='r')
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# Plot line for kc definition
ax2.plot([6,16,52],[2.6e-6*1000,7e-4*1000,7e-4*1000],color='k',linewidth=4)
# Add text
ax2.text(35,0.95,'Stable',fontsize=22)
ax2.text(35,0.15,'Unstable',fontsize=22,color='r')
ax2.text(46,0.88,r'$k_c$',fontsize=26,color='k')
# # Plot Kc
# df = pd.read_excel('/Users/jleeman/Dropbox/PennState/BiaxExperiments/p4309/p4309_rsf_fits.xlsx')
#
#
#
#
# for i,fit in df.iterrows():
#
# if fit['Grade'] == 'A':
# #color='#000066'
# #color='#FFFFFF'
# color='#0000FF'
# elif fit['Grade'] == 'B':
# color='#0066CC'
# color='#0000FF'
# #color='#FFFFFF'
# elif fit['Grade'] == 'C':
# #color='#00CCFF'
# color='#FFFFFF'
# continue
# elif fit['Grade'] == 'D':
# #color='#00FFFF'
# color='#FFFFFF'
# continue
#
# if fit['Type']=='Down' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='v',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
#
# elif fit['Type']=='Up' and fit['Law']=='r' and fit['k']==0.0055:
# #ax2.scatter(fit['LP_Disp']/1000.,fit['Kc']*1000,c=color,s=60,marker='^',zorder=50)
# print fit['LP_Disp']/1000.,fit['Kc']
# else:
# pass
#
# Plot B
#
# Set labels and tick sizes
ax3.set_xlabel(r'Load Point Displacement [mm]',fontsize=18,labelpad=15)
ax3.set_ylabel(r'$\kappa = k/k_c$',fontsize=25)
ax3.tick_params(axis='both', which='major', labelsize=16)
# Turns off chart clutter
# Turn off top and right tick marks
#ax3.get_xaxis().tick_bottom()
#ax3.get_yaxis().tick_left()
ax3.get_yaxis().set_ticks([0,0.2,0.4,0.6,0.8,1.0,1.2])
# Turn off top and right splines
#ax3.spines["top"].set_visible(False)
#ax3.spines["right"].set_visible(False)
# Plotting
# Make panel A of displacement/stiffness
ax3.text(-0.1,0.9,'C',transform = ax3.transAxes,fontsize=24)
low_color = 10./1000.
low_color = 0
high_color = 4000./1000.
cmap = plt.get_cmap('rainbow_r')
start=0.15
stop = 1.
colors = cmap(np.linspace(start, stop, cmap.N))
# Create a new colormap from those colors
color_map = LinearSegmentedColormap.from_list('Upper Half', colors)
marker_size = 40
marker_alpha=0.5
color_col=11
for key in experiment_event_data:
event_data = experiment_event_data[key]
k_kc_ratio = []
for k,disp in zip(event_data[:,5],event_data[:,9]/1000.):
k_kc_ratio.append(k/get_kc(disp))
sc = ax3.scatter(event_data[:,9]/1000.,k_kc_ratio,c=event_data[:,color_col]/1000.,s=marker_size,alpha=marker_alpha,vmin=low_color,vmax=high_color,cmap=color_map)
print key,np.min(event_data[:,color_col]), np.max(event_data[:,color_col])
# cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
# plt.colorbar(sc,cax=cbar_ax)
# for experiment in experiments_with_unload_reload:
# df = pd.read_csv('/Users/jleeman/Dropbox/PennState/BiaxExperiments/%s/%s_stiffness_cycles.txt'%(experiment,experiment))
#
# ax3.scatter(df['AvgDisp']/1000.,df['Slope'],color='g',s=50,alpha=0.6)
position=fig.add_axes([0.37,0.16,0.5,0.02]) ## the parameters are the specified position you set [left, bottom, width, height]
cb = fig.colorbar(sc,cax=position,orientation='horizontal', drawedges=False)
cb.solids.set_edgecolor("face")
cb.set_label(r'Peak Slip Velocity [$mm/s$]',fontsize=14)
cb.set_alpha(1)
cb.draw_all()
#position.set_xlim(0,4)
ax3.set_ylim(0,1.4)
ax3.set_xlim(16,50)
ax3.axvspan(40, 50, alpha=0.2, color='k', zorder=0)
# Add call out lines between plots
transFigure = fig.transFigure.inverted()
### LEFT
coord1 = transFigure.transform(ax2.transData.transform([16,0]))
coord2 = transFigure.transform(ax2.transData.transform([16,-0.3]))
line1 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([16,1.4]))
line2 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
### RIGHT
coord1 = transFigure.transform(ax2.transData.transform([50,0]))
coord2 = transFigure.transform(ax2.transData.transform([50,-0.3]))
line3 = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
transform=fig.transFigure,color='k')
coord3 = transFigure.transform(ax3.transData.transform([50,1.4]))
line4 = matplotlib.lines.Line2D((coord2[0],coord3[0]),(coord2[1],coord3[1]),
transform=fig.transFigure,color='k')
fig.lines = line1,line2,line3,line4
# coord1 = transFigure.transform(ax2.transData.transform([16,0]))
# coord2 = transFigure.transform(ax3.transData.transform([16,1.4]))
#
# line = matplotlib.lines.Line2D((coord1[0],coord2[0]),(coord1[1],coord2[1]),
# transform=fig.transFigure,color='k')
# fig.lines = line,
plt.savefig('figure.png', bbox_inches="tight");
| disp_means.append(np.mean(bin_steps[:,0]))
amb_means.append(np.mean(bin_steps[:,1]))
#amb_means.append(np.median(bin_steps[:,1])) | conditional_block |
process.go | // Copyright 2020-2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql/transform"
"github.com/dolthub/go-mysql-server/sql"
)
// QueryProcess represents a running query process node. It will use a callback
// to notify when it has finished running.
// TODO: QueryProcess -> trackedRowIter is required to dispose certain iter caches.
// Make a proper scheduler interface to perform lifecycle management, caching, and
// scan attaching
type QueryProcess struct {
UnaryNode
Notify NotifyFunc
}
var _ sql.Node = (*QueryProcess)(nil)
var _ sql.CollationCoercible = (*QueryProcess)(nil)
// NotifyFunc is a function to notify about some event.
type NotifyFunc func()
// NewQueryProcess creates a new QueryProcess node.
func NewQueryProcess(node sql.Node, notify NotifyFunc) *QueryProcess {
return &QueryProcess{UnaryNode{Child: node}, notify}
}
func (p *QueryProcess) Child() sql.Node {
return p.UnaryNode.Child
}
func (p *QueryProcess) IsReadOnly() bool {
return p.Child().IsReadOnly()
}
// WithChildren implements the Node interface.
func (p *QueryProcess) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(p, len(children), 1)
}
return NewQueryProcess(children[0], p.Notify), nil
}
// CheckPrivileges implements the interface sql.Node.
func (p *QueryProcess) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
return p.Child().CheckPrivileges(ctx, opChecker)
}
// CollationCoercibility implements the interface sql.CollationCoercible.
func (p *QueryProcess) CollationCoercibility(ctx *sql.Context) (collation sql.CollationID, coercibility byte) {
return sql.GetCoercibility(ctx, p.Child())
}
func (p *QueryProcess) String() string { return p.Child().String() }
func (p *QueryProcess) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("QueryProcess")
_ = tp.WriteChildren(sql.DebugString(p.Child()))
return tp.String()
}
// ShouldSetFoundRows returns whether the query process should set the FOUND_ROWS query variable. It should do this for
// any select except a Limit with a SQL_CALC_FOUND_ROWS modifier, which is handled in the Limit node itself.
func (p *QueryProcess) ShouldSetFoundRows() bool {
var fromLimit *bool
var fromTopN *bool
transform.Inspect(p.Child(), func(n sql.Node) bool {
switch n := n.(type) {
case *StartTransaction:
return true
case *Limit:
fromLimit = &n.CalcFoundRows
return true
case *TopN:
fromTopN = &n.CalcFoundRows
return true
default:
return true
}
})
if fromLimit == nil && fromTopN == nil {
return true
}
if fromTopN != nil {
return !*fromTopN
}
return !*fromLimit
}
// ProcessIndexableTable is a wrapper for sql.Tables inside a query process
// that support indexing.
// It notifies the process manager about the status of a query when a
// partition is processed.
type ProcessIndexableTable struct {
sql.DriverIndexableTable
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (t *ProcessIndexableTable) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("ProcessIndexableTable")
_ = tp.WriteChildren(sql.DebugString(t.Underlying()))
return tp.String()
}
// NewProcessIndexableTable returns a new ProcessIndexableTable.
func NewProcessIndexableTable(t sql.DriverIndexableTable, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessIndexableTable {
return &ProcessIndexableTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessIndexableTable) Underlying() sql.Table {
return t.DriverIndexableTable
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (t *ProcessIndexableTable) IndexKeyValues(
ctx *sql.Context,
columns []string,
) (sql.PartitionIndexKeyValueIter, error) {
iter, err := t.DriverIndexableTable.IndexKeyValues(ctx, columns)
if err != nil {
return nil, err
}
return &trackedPartitionIndexKeyValueIter{iter, t.OnPartitionDone, t.OnPartitionStart, t.OnRowNext}, nil
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessIndexableTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.DriverIndexableTable.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
return t.newPartIter(p, iter)
}
func (t *ProcessIndexableTable) newPartIter(p sql.Partition, iter sql.RowIter) (sql.RowIter, error) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
var _ sql.DriverIndexableTable = (*ProcessIndexableTable)(nil)
// NamedNotifyFunc is a function to notify about some event with a string argument.
type NamedNotifyFunc func(name string)
// ProcessTable is a wrapper for sql.Tables inside a query process. It
// notifies the process manager about the status of a query when a partition
// is processed.
type ProcessTable struct {
sql.Table
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
// NewProcessTable returns a new ProcessTable.
func NewProcessTable(t sql.Table, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessTable {
return &ProcessTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessTable) Underlying() sql.Table {
return t.Table
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.Table.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
onDone, onNext := t.notifyFuncsForPartition(p)
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
// notifyFuncsForPartition returns the OnDone and OnNext NotifyFuncs for the partition given
func (t *ProcessTable) notifyFuncsForPartition(p sql.Partition) (NotifyFunc, NotifyFunc) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return onDone, onNext
}
func GetQueryType(child sql.Node) queryType {
// TODO: behavior of CALL is not specified in the docs. Needs investigation
var queryType queryType = QueryTypeSelect
transform.Inspect(child, func(node sql.Node) bool {
if IsNoRowNode(node) {
queryType = QueryTypeDdl
return false
}
switch node.(type) {
case *Signal:
queryType = QueryTypeDdl
return false
case nil:
return false
case *TriggerExecutor, *InsertInto, *Update, *DeleteFrom, *LoadData:
// TODO: AlterTable belongs here too, but we don't keep track of updated rows there so we can't return an
// accurate ROW_COUNT() anyway.
queryType = QueryTypeUpdate
return false
}
return true
})
return queryType
}
type queryType byte
const (
QueryTypeSelect = iota
QueryTypeDdl
QueryTypeUpdate
)
type trackedRowIter struct {
node sql.Node
iter sql.RowIter
numRows int64
QueryType queryType
ShouldSetFoundRows bool
onDone NotifyFunc
onNext NotifyFunc
}
func NewTrackedRowIter(
node sql.Node,
iter sql.RowIter,
onNext NotifyFunc,
onDone NotifyFunc,
) *trackedRowIter {
return &trackedRowIter{node: node, iter: iter, onDone: onDone, onNext: onNext}
}
func (i *trackedRowIter) | () {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
if i.node != nil {
i.Dispose()
i.node = nil
}
}
func disposeNode(n sql.Node) {
transform.Inspect(n, func(node sql.Node) bool {
sql.Dispose(node)
return true
})
transform.InspectExpressions(n, func(e sql.Expression) bool {
sql.Dispose(e)
return true
})
}
func (i *trackedRowIter) Dispose() {
if i.node != nil {
disposeNode(i.node)
}
}
func (i *trackedRowIter) Next(ctx *sql.Context) (sql.Row, error) {
row, err := i.iter.Next(ctx)
if err != nil {
return nil, err
}
i.numRows++
if i.onNext != nil {
i.onNext()
}
return row, nil
}
func (i *trackedRowIter) Close(ctx *sql.Context) error {
err := i.iter.Close(ctx)
i.updateSessionVars(ctx)
i.done()
return err
}
func (i *trackedRowIter) updateSessionVars(ctx *sql.Context) {
switch i.QueryType {
case QueryTypeSelect:
ctx.SetLastQueryInfo(sql.RowCount, -1)
case QueryTypeDdl:
ctx.SetLastQueryInfo(sql.RowCount, 0)
case QueryTypeUpdate:
// This is handled by RowUpdateAccumulator
default:
panic(fmt.Sprintf("Unexpected query type %v", i.QueryType))
}
if i.ShouldSetFoundRows {
ctx.SetLastQueryInfo(sql.FoundRows, i.numRows)
}
}
type trackedPartitionIndexKeyValueIter struct {
sql.PartitionIndexKeyValueIter
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (i *trackedPartitionIndexKeyValueIter) Next(ctx *sql.Context) (sql.Partition, sql.IndexKeyValueIter, error) {
p, iter, err := i.PartitionIndexKeyValueIter.Next(ctx)
if err != nil {
return nil, nil, err
}
partitionName := partitionName(p)
if i.OnPartitionStart != nil {
i.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if i.OnPartitionDone != nil {
onDone = func() {
i.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if i.OnRowNext != nil {
onNext = func() {
i.OnRowNext(partitionName)
}
}
return p, &trackedIndexKeyValueIter{iter, onDone, onNext}, nil
}
type trackedIndexKeyValueIter struct {
iter sql.IndexKeyValueIter
onDone NotifyFunc
onNext NotifyFunc
}
func (i *trackedIndexKeyValueIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
}
func (i *trackedIndexKeyValueIter) Close(ctx *sql.Context) (err error) {
if i.iter != nil {
err = i.iter.Close(ctx)
}
i.done()
return err
}
func (i *trackedIndexKeyValueIter) Next(ctx *sql.Context) ([]interface{}, []byte, error) {
v, k, err := i.iter.Next(ctx)
if err != nil {
return nil, nil, err
}
if i.onNext != nil {
i.onNext()
}
return v, k, nil
}
func partitionName(p sql.Partition) string {
if n, ok := p.(sql.Nameable); ok {
return n.Name()
}
return string(p.Key())
}
func IsDDLNode(node sql.Node) bool {
switch node.(type) {
case *CreateTable, *DropTable, *Truncate,
*AddColumn, *ModifyColumn, *DropColumn,
*CreateDB, *DropDB, *AlterDB,
*RenameTable, *RenameColumn,
*CreateView, *DropView,
*CreateIndex, *AlterIndex, *DropIndex,
*CreateProcedure, *DropProcedure,
*CreateEvent, *DropEvent,
*CreateForeignKey, *DropForeignKey,
*CreateCheck, *DropCheck,
*CreateTrigger, *DropTrigger, *AlterPK,
*Block: // Block as a top level node wraps a set of ALTER TABLE statements
return true
default:
return false
}
}
func IsShowNode(node sql.Node) bool {
switch node.(type) {
case *ShowTables, *ShowCreateTable,
*ShowTriggers, *ShowCreateTrigger,
*ShowDatabases, *ShowCreateDatabase,
*ShowColumns, *ShowIndexes,
*ShowProcessList, *ShowTableStatus,
*ShowVariables, ShowWarnings,
*ShowEvents, *ShowCreateEvent:
return true
default:
return false
}
}
// IsNoRowNode returns whether this are node interacts only with schema and the catalog, not with any table
// rows.
func IsNoRowNode(node sql.Node) bool {
return IsDDLNode(node) || IsShowNode(node)
}
func IsReadOnly(node sql.Node) bool {
return node.IsReadOnly()
}
| done | identifier_name |
process.go | // Copyright 2020-2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql/transform"
"github.com/dolthub/go-mysql-server/sql"
)
// QueryProcess represents a running query process node. It will use a callback
// to notify when it has finished running.
// TODO: QueryProcess -> trackedRowIter is required to dispose certain iter caches.
// Make a proper scheduler interface to perform lifecycle management, caching, and
// scan attaching
type QueryProcess struct {
UnaryNode
Notify NotifyFunc
}
var _ sql.Node = (*QueryProcess)(nil)
var _ sql.CollationCoercible = (*QueryProcess)(nil)
// NotifyFunc is a function to notify about some event.
type NotifyFunc func()
// NewQueryProcess creates a new QueryProcess node.
func NewQueryProcess(node sql.Node, notify NotifyFunc) *QueryProcess {
return &QueryProcess{UnaryNode{Child: node}, notify}
}
func (p *QueryProcess) Child() sql.Node {
return p.UnaryNode.Child |
func (p *QueryProcess) IsReadOnly() bool {
return p.Child().IsReadOnly()
}
// WithChildren implements the Node interface.
func (p *QueryProcess) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(p, len(children), 1)
}
return NewQueryProcess(children[0], p.Notify), nil
}
// CheckPrivileges implements the interface sql.Node.
func (p *QueryProcess) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
return p.Child().CheckPrivileges(ctx, opChecker)
}
// CollationCoercibility implements the interface sql.CollationCoercible.
func (p *QueryProcess) CollationCoercibility(ctx *sql.Context) (collation sql.CollationID, coercibility byte) {
return sql.GetCoercibility(ctx, p.Child())
}
func (p *QueryProcess) String() string { return p.Child().String() }
func (p *QueryProcess) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("QueryProcess")
_ = tp.WriteChildren(sql.DebugString(p.Child()))
return tp.String()
}
// ShouldSetFoundRows returns whether the query process should set the FOUND_ROWS query variable. It should do this for
// any select except a Limit with a SQL_CALC_FOUND_ROWS modifier, which is handled in the Limit node itself.
func (p *QueryProcess) ShouldSetFoundRows() bool {
var fromLimit *bool
var fromTopN *bool
transform.Inspect(p.Child(), func(n sql.Node) bool {
switch n := n.(type) {
case *StartTransaction:
return true
case *Limit:
fromLimit = &n.CalcFoundRows
return true
case *TopN:
fromTopN = &n.CalcFoundRows
return true
default:
return true
}
})
if fromLimit == nil && fromTopN == nil {
return true
}
if fromTopN != nil {
return !*fromTopN
}
return !*fromLimit
}
// ProcessIndexableTable is a wrapper for sql.Tables inside a query process
// that support indexing.
// It notifies the process manager about the status of a query when a
// partition is processed.
type ProcessIndexableTable struct {
sql.DriverIndexableTable
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (t *ProcessIndexableTable) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("ProcessIndexableTable")
_ = tp.WriteChildren(sql.DebugString(t.Underlying()))
return tp.String()
}
// NewProcessIndexableTable returns a new ProcessIndexableTable.
func NewProcessIndexableTable(t sql.DriverIndexableTable, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessIndexableTable {
return &ProcessIndexableTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessIndexableTable) Underlying() sql.Table {
return t.DriverIndexableTable
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (t *ProcessIndexableTable) IndexKeyValues(
ctx *sql.Context,
columns []string,
) (sql.PartitionIndexKeyValueIter, error) {
iter, err := t.DriverIndexableTable.IndexKeyValues(ctx, columns)
if err != nil {
return nil, err
}
return &trackedPartitionIndexKeyValueIter{iter, t.OnPartitionDone, t.OnPartitionStart, t.OnRowNext}, nil
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessIndexableTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.DriverIndexableTable.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
return t.newPartIter(p, iter)
}
func (t *ProcessIndexableTable) newPartIter(p sql.Partition, iter sql.RowIter) (sql.RowIter, error) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
var _ sql.DriverIndexableTable = (*ProcessIndexableTable)(nil)
// NamedNotifyFunc is a function to notify about some event with a string argument.
type NamedNotifyFunc func(name string)
// ProcessTable is a wrapper for sql.Tables inside a query process. It
// notifies the process manager about the status of a query when a partition
// is processed.
type ProcessTable struct {
sql.Table
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
// NewProcessTable returns a new ProcessTable.
func NewProcessTable(t sql.Table, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessTable {
return &ProcessTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessTable) Underlying() sql.Table {
return t.Table
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.Table.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
onDone, onNext := t.notifyFuncsForPartition(p)
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
// notifyFuncsForPartition returns the OnDone and OnNext NotifyFuncs for the partition given
func (t *ProcessTable) notifyFuncsForPartition(p sql.Partition) (NotifyFunc, NotifyFunc) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return onDone, onNext
}
func GetQueryType(child sql.Node) queryType {
// TODO: behavior of CALL is not specified in the docs. Needs investigation
var queryType queryType = QueryTypeSelect
transform.Inspect(child, func(node sql.Node) bool {
if IsNoRowNode(node) {
queryType = QueryTypeDdl
return false
}
switch node.(type) {
case *Signal:
queryType = QueryTypeDdl
return false
case nil:
return false
case *TriggerExecutor, *InsertInto, *Update, *DeleteFrom, *LoadData:
// TODO: AlterTable belongs here too, but we don't keep track of updated rows there so we can't return an
// accurate ROW_COUNT() anyway.
queryType = QueryTypeUpdate
return false
}
return true
})
return queryType
}
type queryType byte
const (
QueryTypeSelect = iota
QueryTypeDdl
QueryTypeUpdate
)
type trackedRowIter struct {
node sql.Node
iter sql.RowIter
numRows int64
QueryType queryType
ShouldSetFoundRows bool
onDone NotifyFunc
onNext NotifyFunc
}
func NewTrackedRowIter(
node sql.Node,
iter sql.RowIter,
onNext NotifyFunc,
onDone NotifyFunc,
) *trackedRowIter {
return &trackedRowIter{node: node, iter: iter, onDone: onDone, onNext: onNext}
}
func (i *trackedRowIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
if i.node != nil {
i.Dispose()
i.node = nil
}
}
func disposeNode(n sql.Node) {
transform.Inspect(n, func(node sql.Node) bool {
sql.Dispose(node)
return true
})
transform.InspectExpressions(n, func(e sql.Expression) bool {
sql.Dispose(e)
return true
})
}
func (i *trackedRowIter) Dispose() {
if i.node != nil {
disposeNode(i.node)
}
}
func (i *trackedRowIter) Next(ctx *sql.Context) (sql.Row, error) {
row, err := i.iter.Next(ctx)
if err != nil {
return nil, err
}
i.numRows++
if i.onNext != nil {
i.onNext()
}
return row, nil
}
func (i *trackedRowIter) Close(ctx *sql.Context) error {
err := i.iter.Close(ctx)
i.updateSessionVars(ctx)
i.done()
return err
}
func (i *trackedRowIter) updateSessionVars(ctx *sql.Context) {
switch i.QueryType {
case QueryTypeSelect:
ctx.SetLastQueryInfo(sql.RowCount, -1)
case QueryTypeDdl:
ctx.SetLastQueryInfo(sql.RowCount, 0)
case QueryTypeUpdate:
// This is handled by RowUpdateAccumulator
default:
panic(fmt.Sprintf("Unexpected query type %v", i.QueryType))
}
if i.ShouldSetFoundRows {
ctx.SetLastQueryInfo(sql.FoundRows, i.numRows)
}
}
type trackedPartitionIndexKeyValueIter struct {
sql.PartitionIndexKeyValueIter
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (i *trackedPartitionIndexKeyValueIter) Next(ctx *sql.Context) (sql.Partition, sql.IndexKeyValueIter, error) {
p, iter, err := i.PartitionIndexKeyValueIter.Next(ctx)
if err != nil {
return nil, nil, err
}
partitionName := partitionName(p)
if i.OnPartitionStart != nil {
i.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if i.OnPartitionDone != nil {
onDone = func() {
i.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if i.OnRowNext != nil {
onNext = func() {
i.OnRowNext(partitionName)
}
}
return p, &trackedIndexKeyValueIter{iter, onDone, onNext}, nil
}
type trackedIndexKeyValueIter struct {
iter sql.IndexKeyValueIter
onDone NotifyFunc
onNext NotifyFunc
}
func (i *trackedIndexKeyValueIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
}
func (i *trackedIndexKeyValueIter) Close(ctx *sql.Context) (err error) {
if i.iter != nil {
err = i.iter.Close(ctx)
}
i.done()
return err
}
func (i *trackedIndexKeyValueIter) Next(ctx *sql.Context) ([]interface{}, []byte, error) {
v, k, err := i.iter.Next(ctx)
if err != nil {
return nil, nil, err
}
if i.onNext != nil {
i.onNext()
}
return v, k, nil
}
func partitionName(p sql.Partition) string {
if n, ok := p.(sql.Nameable); ok {
return n.Name()
}
return string(p.Key())
}
func IsDDLNode(node sql.Node) bool {
switch node.(type) {
case *CreateTable, *DropTable, *Truncate,
*AddColumn, *ModifyColumn, *DropColumn,
*CreateDB, *DropDB, *AlterDB,
*RenameTable, *RenameColumn,
*CreateView, *DropView,
*CreateIndex, *AlterIndex, *DropIndex,
*CreateProcedure, *DropProcedure,
*CreateEvent, *DropEvent,
*CreateForeignKey, *DropForeignKey,
*CreateCheck, *DropCheck,
*CreateTrigger, *DropTrigger, *AlterPK,
*Block: // Block as a top level node wraps a set of ALTER TABLE statements
return true
default:
return false
}
}
func IsShowNode(node sql.Node) bool {
switch node.(type) {
case *ShowTables, *ShowCreateTable,
*ShowTriggers, *ShowCreateTrigger,
*ShowDatabases, *ShowCreateDatabase,
*ShowColumns, *ShowIndexes,
*ShowProcessList, *ShowTableStatus,
*ShowVariables, ShowWarnings,
*ShowEvents, *ShowCreateEvent:
return true
default:
return false
}
}
// IsNoRowNode returns whether this are node interacts only with schema and the catalog, not with any table
// rows.
func IsNoRowNode(node sql.Node) bool {
return IsDDLNode(node) || IsShowNode(node)
}
func IsReadOnly(node sql.Node) bool {
return node.IsReadOnly()
} | } | random_line_split |
process.go | // Copyright 2020-2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql/transform"
"github.com/dolthub/go-mysql-server/sql"
)
// QueryProcess represents a running query process node. It will use a callback
// to notify when it has finished running.
// TODO: QueryProcess -> trackedRowIter is required to dispose certain iter caches.
// Make a proper scheduler interface to perform lifecycle management, caching, and
// scan attaching
type QueryProcess struct {
UnaryNode
Notify NotifyFunc
}
var _ sql.Node = (*QueryProcess)(nil)
var _ sql.CollationCoercible = (*QueryProcess)(nil)
// NotifyFunc is a function to notify about some event.
type NotifyFunc func()
// NewQueryProcess creates a new QueryProcess node.
func NewQueryProcess(node sql.Node, notify NotifyFunc) *QueryProcess {
return &QueryProcess{UnaryNode{Child: node}, notify}
}
func (p *QueryProcess) Child() sql.Node {
return p.UnaryNode.Child
}
func (p *QueryProcess) IsReadOnly() bool {
return p.Child().IsReadOnly()
}
// WithChildren implements the Node interface.
func (p *QueryProcess) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(p, len(children), 1)
}
return NewQueryProcess(children[0], p.Notify), nil
}
// CheckPrivileges implements the interface sql.Node.
func (p *QueryProcess) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool |
// CollationCoercibility implements the interface sql.CollationCoercible.
func (p *QueryProcess) CollationCoercibility(ctx *sql.Context) (collation sql.CollationID, coercibility byte) {
return sql.GetCoercibility(ctx, p.Child())
}
func (p *QueryProcess) String() string { return p.Child().String() }
func (p *QueryProcess) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("QueryProcess")
_ = tp.WriteChildren(sql.DebugString(p.Child()))
return tp.String()
}
// ShouldSetFoundRows returns whether the query process should set the FOUND_ROWS query variable. It should do this for
// any select except a Limit with a SQL_CALC_FOUND_ROWS modifier, which is handled in the Limit node itself.
func (p *QueryProcess) ShouldSetFoundRows() bool {
var fromLimit *bool
var fromTopN *bool
transform.Inspect(p.Child(), func(n sql.Node) bool {
switch n := n.(type) {
case *StartTransaction:
return true
case *Limit:
fromLimit = &n.CalcFoundRows
return true
case *TopN:
fromTopN = &n.CalcFoundRows
return true
default:
return true
}
})
if fromLimit == nil && fromTopN == nil {
return true
}
if fromTopN != nil {
return !*fromTopN
}
return !*fromLimit
}
// ProcessIndexableTable is a wrapper for sql.Tables inside a query process
// that support indexing.
// It notifies the process manager about the status of a query when a
// partition is processed.
type ProcessIndexableTable struct {
sql.DriverIndexableTable
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (t *ProcessIndexableTable) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("ProcessIndexableTable")
_ = tp.WriteChildren(sql.DebugString(t.Underlying()))
return tp.String()
}
// NewProcessIndexableTable returns a new ProcessIndexableTable.
func NewProcessIndexableTable(t sql.DriverIndexableTable, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessIndexableTable {
return &ProcessIndexableTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessIndexableTable) Underlying() sql.Table {
return t.DriverIndexableTable
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (t *ProcessIndexableTable) IndexKeyValues(
ctx *sql.Context,
columns []string,
) (sql.PartitionIndexKeyValueIter, error) {
iter, err := t.DriverIndexableTable.IndexKeyValues(ctx, columns)
if err != nil {
return nil, err
}
return &trackedPartitionIndexKeyValueIter{iter, t.OnPartitionDone, t.OnPartitionStart, t.OnRowNext}, nil
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessIndexableTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.DriverIndexableTable.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
return t.newPartIter(p, iter)
}
func (t *ProcessIndexableTable) newPartIter(p sql.Partition, iter sql.RowIter) (sql.RowIter, error) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
var _ sql.DriverIndexableTable = (*ProcessIndexableTable)(nil)
// NamedNotifyFunc is a function to notify about some event with a string argument.
type NamedNotifyFunc func(name string)
// ProcessTable is a wrapper for sql.Tables inside a query process. It
// notifies the process manager about the status of a query when a partition
// is processed.
type ProcessTable struct {
sql.Table
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
// NewProcessTable returns a new ProcessTable.
func NewProcessTable(t sql.Table, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessTable {
return &ProcessTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessTable) Underlying() sql.Table {
return t.Table
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.Table.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
onDone, onNext := t.notifyFuncsForPartition(p)
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
// notifyFuncsForPartition returns the OnDone and OnNext NotifyFuncs for the partition given
func (t *ProcessTable) notifyFuncsForPartition(p sql.Partition) (NotifyFunc, NotifyFunc) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return onDone, onNext
}
func GetQueryType(child sql.Node) queryType {
// TODO: behavior of CALL is not specified in the docs. Needs investigation
var queryType queryType = QueryTypeSelect
transform.Inspect(child, func(node sql.Node) bool {
if IsNoRowNode(node) {
queryType = QueryTypeDdl
return false
}
switch node.(type) {
case *Signal:
queryType = QueryTypeDdl
return false
case nil:
return false
case *TriggerExecutor, *InsertInto, *Update, *DeleteFrom, *LoadData:
// TODO: AlterTable belongs here too, but we don't keep track of updated rows there so we can't return an
// accurate ROW_COUNT() anyway.
queryType = QueryTypeUpdate
return false
}
return true
})
return queryType
}
type queryType byte
const (
QueryTypeSelect = iota
QueryTypeDdl
QueryTypeUpdate
)
type trackedRowIter struct {
node sql.Node
iter sql.RowIter
numRows int64
QueryType queryType
ShouldSetFoundRows bool
onDone NotifyFunc
onNext NotifyFunc
}
func NewTrackedRowIter(
node sql.Node,
iter sql.RowIter,
onNext NotifyFunc,
onDone NotifyFunc,
) *trackedRowIter {
return &trackedRowIter{node: node, iter: iter, onDone: onDone, onNext: onNext}
}
func (i *trackedRowIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
if i.node != nil {
i.Dispose()
i.node = nil
}
}
func disposeNode(n sql.Node) {
transform.Inspect(n, func(node sql.Node) bool {
sql.Dispose(node)
return true
})
transform.InspectExpressions(n, func(e sql.Expression) bool {
sql.Dispose(e)
return true
})
}
func (i *trackedRowIter) Dispose() {
if i.node != nil {
disposeNode(i.node)
}
}
func (i *trackedRowIter) Next(ctx *sql.Context) (sql.Row, error) {
row, err := i.iter.Next(ctx)
if err != nil {
return nil, err
}
i.numRows++
if i.onNext != nil {
i.onNext()
}
return row, nil
}
func (i *trackedRowIter) Close(ctx *sql.Context) error {
err := i.iter.Close(ctx)
i.updateSessionVars(ctx)
i.done()
return err
}
func (i *trackedRowIter) updateSessionVars(ctx *sql.Context) {
switch i.QueryType {
case QueryTypeSelect:
ctx.SetLastQueryInfo(sql.RowCount, -1)
case QueryTypeDdl:
ctx.SetLastQueryInfo(sql.RowCount, 0)
case QueryTypeUpdate:
// This is handled by RowUpdateAccumulator
default:
panic(fmt.Sprintf("Unexpected query type %v", i.QueryType))
}
if i.ShouldSetFoundRows {
ctx.SetLastQueryInfo(sql.FoundRows, i.numRows)
}
}
type trackedPartitionIndexKeyValueIter struct {
sql.PartitionIndexKeyValueIter
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (i *trackedPartitionIndexKeyValueIter) Next(ctx *sql.Context) (sql.Partition, sql.IndexKeyValueIter, error) {
p, iter, err := i.PartitionIndexKeyValueIter.Next(ctx)
if err != nil {
return nil, nil, err
}
partitionName := partitionName(p)
if i.OnPartitionStart != nil {
i.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if i.OnPartitionDone != nil {
onDone = func() {
i.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if i.OnRowNext != nil {
onNext = func() {
i.OnRowNext(partitionName)
}
}
return p, &trackedIndexKeyValueIter{iter, onDone, onNext}, nil
}
type trackedIndexKeyValueIter struct {
iter sql.IndexKeyValueIter
onDone NotifyFunc
onNext NotifyFunc
}
func (i *trackedIndexKeyValueIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
}
func (i *trackedIndexKeyValueIter) Close(ctx *sql.Context) (err error) {
if i.iter != nil {
err = i.iter.Close(ctx)
}
i.done()
return err
}
func (i *trackedIndexKeyValueIter) Next(ctx *sql.Context) ([]interface{}, []byte, error) {
v, k, err := i.iter.Next(ctx)
if err != nil {
return nil, nil, err
}
if i.onNext != nil {
i.onNext()
}
return v, k, nil
}
func partitionName(p sql.Partition) string {
if n, ok := p.(sql.Nameable); ok {
return n.Name()
}
return string(p.Key())
}
func IsDDLNode(node sql.Node) bool {
switch node.(type) {
case *CreateTable, *DropTable, *Truncate,
*AddColumn, *ModifyColumn, *DropColumn,
*CreateDB, *DropDB, *AlterDB,
*RenameTable, *RenameColumn,
*CreateView, *DropView,
*CreateIndex, *AlterIndex, *DropIndex,
*CreateProcedure, *DropProcedure,
*CreateEvent, *DropEvent,
*CreateForeignKey, *DropForeignKey,
*CreateCheck, *DropCheck,
*CreateTrigger, *DropTrigger, *AlterPK,
*Block: // Block as a top level node wraps a set of ALTER TABLE statements
return true
default:
return false
}
}
func IsShowNode(node sql.Node) bool {
switch node.(type) {
case *ShowTables, *ShowCreateTable,
*ShowTriggers, *ShowCreateTrigger,
*ShowDatabases, *ShowCreateDatabase,
*ShowColumns, *ShowIndexes,
*ShowProcessList, *ShowTableStatus,
*ShowVariables, ShowWarnings,
*ShowEvents, *ShowCreateEvent:
return true
default:
return false
}
}
// IsNoRowNode returns whether this are node interacts only with schema and the catalog, not with any table
// rows.
func IsNoRowNode(node sql.Node) bool {
return IsDDLNode(node) || IsShowNode(node)
}
func IsReadOnly(node sql.Node) bool {
return node.IsReadOnly()
}
| {
return p.Child().CheckPrivileges(ctx, opChecker)
} | identifier_body |
process.go | // Copyright 2020-2021 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plan
import (
"fmt"
"github.com/dolthub/go-mysql-server/sql/transform"
"github.com/dolthub/go-mysql-server/sql"
)
// QueryProcess represents a running query process node. It will use a callback
// to notify when it has finished running.
// TODO: QueryProcess -> trackedRowIter is required to dispose certain iter caches.
// Make a proper scheduler interface to perform lifecycle management, caching, and
// scan attaching
type QueryProcess struct {
UnaryNode
Notify NotifyFunc
}
var _ sql.Node = (*QueryProcess)(nil)
var _ sql.CollationCoercible = (*QueryProcess)(nil)
// NotifyFunc is a function to notify about some event.
type NotifyFunc func()
// NewQueryProcess creates a new QueryProcess node.
func NewQueryProcess(node sql.Node, notify NotifyFunc) *QueryProcess {
return &QueryProcess{UnaryNode{Child: node}, notify}
}
func (p *QueryProcess) Child() sql.Node {
return p.UnaryNode.Child
}
func (p *QueryProcess) IsReadOnly() bool {
return p.Child().IsReadOnly()
}
// WithChildren implements the Node interface.
func (p *QueryProcess) WithChildren(children ...sql.Node) (sql.Node, error) {
if len(children) != 1 {
return nil, sql.ErrInvalidChildrenNumber.New(p, len(children), 1)
}
return NewQueryProcess(children[0], p.Notify), nil
}
// CheckPrivileges implements the interface sql.Node.
func (p *QueryProcess) CheckPrivileges(ctx *sql.Context, opChecker sql.PrivilegedOperationChecker) bool {
return p.Child().CheckPrivileges(ctx, opChecker)
}
// CollationCoercibility implements the interface sql.CollationCoercible.
func (p *QueryProcess) CollationCoercibility(ctx *sql.Context) (collation sql.CollationID, coercibility byte) {
return sql.GetCoercibility(ctx, p.Child())
}
func (p *QueryProcess) String() string { return p.Child().String() }
func (p *QueryProcess) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("QueryProcess")
_ = tp.WriteChildren(sql.DebugString(p.Child()))
return tp.String()
}
// ShouldSetFoundRows returns whether the query process should set the FOUND_ROWS query variable. It should do this for
// any select except a Limit with a SQL_CALC_FOUND_ROWS modifier, which is handled in the Limit node itself.
func (p *QueryProcess) ShouldSetFoundRows() bool {
var fromLimit *bool
var fromTopN *bool
transform.Inspect(p.Child(), func(n sql.Node) bool {
switch n := n.(type) {
case *StartTransaction:
return true
case *Limit:
fromLimit = &n.CalcFoundRows
return true
case *TopN:
fromTopN = &n.CalcFoundRows
return true
default:
return true
}
})
if fromLimit == nil && fromTopN == nil {
return true
}
if fromTopN != nil {
return !*fromTopN
}
return !*fromLimit
}
// ProcessIndexableTable is a wrapper for sql.Tables inside a query process
// that support indexing.
// It notifies the process manager about the status of a query when a
// partition is processed.
type ProcessIndexableTable struct {
sql.DriverIndexableTable
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (t *ProcessIndexableTable) DebugString() string {
tp := sql.NewTreePrinter()
_ = tp.WriteNode("ProcessIndexableTable")
_ = tp.WriteChildren(sql.DebugString(t.Underlying()))
return tp.String()
}
// NewProcessIndexableTable returns a new ProcessIndexableTable.
func NewProcessIndexableTable(t sql.DriverIndexableTable, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessIndexableTable {
return &ProcessIndexableTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessIndexableTable) Underlying() sql.Table {
return t.DriverIndexableTable
}
// IndexKeyValues implements the sql.IndexableTable interface.
func (t *ProcessIndexableTable) IndexKeyValues(
ctx *sql.Context,
columns []string,
) (sql.PartitionIndexKeyValueIter, error) {
iter, err := t.DriverIndexableTable.IndexKeyValues(ctx, columns)
if err != nil {
return nil, err
}
return &trackedPartitionIndexKeyValueIter{iter, t.OnPartitionDone, t.OnPartitionStart, t.OnRowNext}, nil
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessIndexableTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.DriverIndexableTable.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
return t.newPartIter(p, iter)
}
func (t *ProcessIndexableTable) newPartIter(p sql.Partition, iter sql.RowIter) (sql.RowIter, error) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
var _ sql.DriverIndexableTable = (*ProcessIndexableTable)(nil)
// NamedNotifyFunc is a function to notify about some event with a string argument.
type NamedNotifyFunc func(name string)
// ProcessTable is a wrapper for sql.Tables inside a query process. It
// notifies the process manager about the status of a query when a partition
// is processed.
type ProcessTable struct {
sql.Table
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
// NewProcessTable returns a new ProcessTable.
func NewProcessTable(t sql.Table, onPartitionDone, onPartitionStart, OnRowNext NamedNotifyFunc) *ProcessTable {
return &ProcessTable{t, onPartitionDone, onPartitionStart, OnRowNext}
}
// Underlying implements sql.TableWrapper interface.
func (t *ProcessTable) Underlying() sql.Table {
return t.Table
}
// PartitionRows implements the sql.Table interface.
func (t *ProcessTable) PartitionRows(ctx *sql.Context, p sql.Partition) (sql.RowIter, error) {
iter, err := t.Table.PartitionRows(ctx, p)
if err != nil {
return nil, err
}
onDone, onNext := t.notifyFuncsForPartition(p)
return NewTrackedRowIter(nil, iter, onNext, onDone), nil
}
// notifyFuncsForPartition returns the OnDone and OnNext NotifyFuncs for the partition given
func (t *ProcessTable) notifyFuncsForPartition(p sql.Partition) (NotifyFunc, NotifyFunc) {
partitionName := partitionName(p)
if t.OnPartitionStart != nil {
t.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if t.OnPartitionDone != nil {
onDone = func() {
t.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if t.OnRowNext != nil {
onNext = func() {
t.OnRowNext(partitionName)
}
}
return onDone, onNext
}
func GetQueryType(child sql.Node) queryType {
// TODO: behavior of CALL is not specified in the docs. Needs investigation
var queryType queryType = QueryTypeSelect
transform.Inspect(child, func(node sql.Node) bool {
if IsNoRowNode(node) {
queryType = QueryTypeDdl
return false
}
switch node.(type) {
case *Signal:
queryType = QueryTypeDdl
return false
case nil:
return false
case *TriggerExecutor, *InsertInto, *Update, *DeleteFrom, *LoadData:
// TODO: AlterTable belongs here too, but we don't keep track of updated rows there so we can't return an
// accurate ROW_COUNT() anyway.
queryType = QueryTypeUpdate
return false
}
return true
})
return queryType
}
type queryType byte
const (
QueryTypeSelect = iota
QueryTypeDdl
QueryTypeUpdate
)
type trackedRowIter struct {
node sql.Node
iter sql.RowIter
numRows int64
QueryType queryType
ShouldSetFoundRows bool
onDone NotifyFunc
onNext NotifyFunc
}
func NewTrackedRowIter(
node sql.Node,
iter sql.RowIter,
onNext NotifyFunc,
onDone NotifyFunc,
) *trackedRowIter {
return &trackedRowIter{node: node, iter: iter, onDone: onDone, onNext: onNext}
}
func (i *trackedRowIter) done() {
if i.onDone != nil |
if i.node != nil {
i.Dispose()
i.node = nil
}
}
func disposeNode(n sql.Node) {
transform.Inspect(n, func(node sql.Node) bool {
sql.Dispose(node)
return true
})
transform.InspectExpressions(n, func(e sql.Expression) bool {
sql.Dispose(e)
return true
})
}
func (i *trackedRowIter) Dispose() {
if i.node != nil {
disposeNode(i.node)
}
}
func (i *trackedRowIter) Next(ctx *sql.Context) (sql.Row, error) {
row, err := i.iter.Next(ctx)
if err != nil {
return nil, err
}
i.numRows++
if i.onNext != nil {
i.onNext()
}
return row, nil
}
func (i *trackedRowIter) Close(ctx *sql.Context) error {
err := i.iter.Close(ctx)
i.updateSessionVars(ctx)
i.done()
return err
}
func (i *trackedRowIter) updateSessionVars(ctx *sql.Context) {
switch i.QueryType {
case QueryTypeSelect:
ctx.SetLastQueryInfo(sql.RowCount, -1)
case QueryTypeDdl:
ctx.SetLastQueryInfo(sql.RowCount, 0)
case QueryTypeUpdate:
// This is handled by RowUpdateAccumulator
default:
panic(fmt.Sprintf("Unexpected query type %v", i.QueryType))
}
if i.ShouldSetFoundRows {
ctx.SetLastQueryInfo(sql.FoundRows, i.numRows)
}
}
type trackedPartitionIndexKeyValueIter struct {
sql.PartitionIndexKeyValueIter
OnPartitionDone NamedNotifyFunc
OnPartitionStart NamedNotifyFunc
OnRowNext NamedNotifyFunc
}
func (i *trackedPartitionIndexKeyValueIter) Next(ctx *sql.Context) (sql.Partition, sql.IndexKeyValueIter, error) {
p, iter, err := i.PartitionIndexKeyValueIter.Next(ctx)
if err != nil {
return nil, nil, err
}
partitionName := partitionName(p)
if i.OnPartitionStart != nil {
i.OnPartitionStart(partitionName)
}
var onDone NotifyFunc
if i.OnPartitionDone != nil {
onDone = func() {
i.OnPartitionDone(partitionName)
}
}
var onNext NotifyFunc
if i.OnRowNext != nil {
onNext = func() {
i.OnRowNext(partitionName)
}
}
return p, &trackedIndexKeyValueIter{iter, onDone, onNext}, nil
}
type trackedIndexKeyValueIter struct {
iter sql.IndexKeyValueIter
onDone NotifyFunc
onNext NotifyFunc
}
func (i *trackedIndexKeyValueIter) done() {
if i.onDone != nil {
i.onDone()
i.onDone = nil
}
}
func (i *trackedIndexKeyValueIter) Close(ctx *sql.Context) (err error) {
if i.iter != nil {
err = i.iter.Close(ctx)
}
i.done()
return err
}
func (i *trackedIndexKeyValueIter) Next(ctx *sql.Context) ([]interface{}, []byte, error) {
v, k, err := i.iter.Next(ctx)
if err != nil {
return nil, nil, err
}
if i.onNext != nil {
i.onNext()
}
return v, k, nil
}
func partitionName(p sql.Partition) string {
if n, ok := p.(sql.Nameable); ok {
return n.Name()
}
return string(p.Key())
}
func IsDDLNode(node sql.Node) bool {
switch node.(type) {
case *CreateTable, *DropTable, *Truncate,
*AddColumn, *ModifyColumn, *DropColumn,
*CreateDB, *DropDB, *AlterDB,
*RenameTable, *RenameColumn,
*CreateView, *DropView,
*CreateIndex, *AlterIndex, *DropIndex,
*CreateProcedure, *DropProcedure,
*CreateEvent, *DropEvent,
*CreateForeignKey, *DropForeignKey,
*CreateCheck, *DropCheck,
*CreateTrigger, *DropTrigger, *AlterPK,
*Block: // Block as a top level node wraps a set of ALTER TABLE statements
return true
default:
return false
}
}
func IsShowNode(node sql.Node) bool {
switch node.(type) {
case *ShowTables, *ShowCreateTable,
*ShowTriggers, *ShowCreateTrigger,
*ShowDatabases, *ShowCreateDatabase,
*ShowColumns, *ShowIndexes,
*ShowProcessList, *ShowTableStatus,
*ShowVariables, ShowWarnings,
*ShowEvents, *ShowCreateEvent:
return true
default:
return false
}
}
// IsNoRowNode returns whether this are node interacts only with schema and the catalog, not with any table
// rows.
func IsNoRowNode(node sql.Node) bool {
return IsDDLNode(node) || IsShowNode(node)
}
func IsReadOnly(node sql.Node) bool {
return node.IsReadOnly()
}
| {
i.onDone()
i.onDone = nil
} | conditional_block |
peer.go | package fluidbackup
import "sync"
import "fmt"
import "io/ioutil"
import "time"
import "math/rand"
import "strings"
import "os"
const (
STATUS_ONLINE = 0
STATUS_OFFLINE = 1
)
type PeerId struct {
Address string
Port int
}
func (this *PeerId) String() string |
func strToPeerId(str string) PeerId {
parts := strings.Split(str, ":")
return PeerId{
Address: parts[0],
Port: strToInt(parts[1]),
}
}
/*
* Represents another peer, storing information about
* the other peer as necessary, and handling requests/actions
* involving that other peer (storeShard, etc.)
*
* Note: Does not represent the local peer. The local peer
* is perhaps best represented by a combination of Protocol,
* and PeerList, and FileStorage, which comprise a system.
*/
type Peer struct {
mu sync.Mutex
protocol *Protocol
fluidBackup *FluidBackup
peerList *PeerList
id PeerId
status int
localBytes int // how many bytes we've agreed to store for this peer
remoteBytes int // how many bytes peer is willing to store for us
// cached values
localUsedBytes int
remoteUsedBytes int
lastVerifyTime time.Time // last time we performed a shard storage verification
lastVerifySuccessTime time.Time
verifyFailCount int // count of successive verification failures
// set of shards that we have accounted for in the cached remoteUsedBytes
// false if not replicated yet, true otherwise
shardsAccounted map[BlockShardId]bool
}
func MakePeer(id PeerId, fluidBackup *FluidBackup, protocol *Protocol, peerList *PeerList) *Peer {
this := new(Peer)
this.fluidBackup = fluidBackup
this.protocol = protocol
this.id = id
this.status = STATUS_ONLINE
// save peerList for operations on our local peer in
// response to simulations
this.peerList = peerList
this.shardsAccounted = make(map[BlockShardId]bool)
this.accountLocalUsedBytes()
go func() {
/* keep updating until eternity */
for !fluidBackup.Stopping() {
this.update()
if Debug {
time.Sleep(time.Duration(rand.Intn(3000))*time.Millisecond + 3*time.Second)
} else {
time.Sleep(time.Duration(rand.Intn(60000))*time.Millisecond + 30*time.Second)
}
}
}()
return this
}
/*
* Our local peer wants to propose agreement
* with the represented remote peer.
* currently ONE Agreement per shard (TODO: change?)
*/
func (this *Peer) proposeAgreement(localBytes int, remoteBytes int) bool {
if this.protocol.proposeAgreement(this.id, localBytes, remoteBytes) {
this.eventAgreement(localBytes, remoteBytes)
return true
}
return false
}
func (this *Peer) eventAgreement(localBytes int, remoteBytes int) {
this.mu.Lock()
this.localBytes += localBytes
this.remoteBytes += remoteBytes
Log.Debug.Printf("New agreement with %s (%d to %d; total %d/%d to %d/%d)", this.id.String(), localBytes, remoteBytes, this.localUsedBytes, this.localBytes, this.remoteUsedBytes, this.remoteBytes)
this.mu.Unlock()
}
/*
* Recomputes the number of bytes we are storing for this peer by searching filesystem.
* Assumes caller has the lock.
*/
func (this *Peer) accountLocalUsedBytes() {
oldUsedBytes := this.localUsedBytes
this.localUsedBytes = 0
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
fi, err := os.Stat("store/" + f.Name())
if err == nil {
this.localUsedBytes += int(fi.Size())
}
}
}
Log.Debug.Printf("Re-accounted stored bytes from %d to %d", oldUsedBytes, this.localUsedBytes)
}
/*
* Replicates a shard that the local peer wants to store on this peer.
*/
func (this *Peer) storeShard(shard *BlockShard) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shard.Id]
if !ok {
// this is bad, blockstore is trying to store a shard that hasn't been reserved yet?
Log.Error.Printf("Peer handler %s received unaccounted shard %d!", this.id.String(), shard.Id)
return false
}
this.shardsAccounted[shard.Id] = true
result := this.protocol.storeShard(this.id, int64(shard.Id), shard.Contents)
if result == 0 {
return true
} else if result == -2 {
// peer actively refused the shard!
// probably the agreement is not synchronized or peer terminated agreement
go this.terminateAgreement()
}
return false
}
func (this *Peer) deleteShard(shardId BlockShardId) bool {
this.mu.Lock()
defer this.mu.Unlock()
replicated, ok := this.shardsAccounted[shardId]
if !ok || !replicated {
// this is bad, blockstore is trying to delete a shard that hasn't been replicated yet?
Log.Error.Printf("Peer handler %s received deletion request for unaccounted shard %d!", this.id.String(), shardId)
return false
}
shard := this.fluidBackup.blockStore.GetShard(shardId)
if shard == nil {
Log.Error.Printf("Peer handler %s received deletion request for non-existent shard %d!", this.id.String(), shardId)
return false
}
delete(this.shardsAccounted, shardId)
this.protocol.deleteShard(this.id, int64(shardId))
this.remoteUsedBytes -= shard.Length
return true
}
func (this *Peer) retrieveShard(shardId BlockShardId) ([]byte, bool) {
return this.protocol.retrieveShard(this.id, int64(shardId))
}
/*
* Attempts to reserve a number of bytes for storage on this peer.
* Returns true if the bytes have been reserved for use by caller, or false if reservation failed.
*
* Note that this is also used on startup to register reservations that were made earlier.
*/
func (this *Peer) reserveBytes(bytes int, shardId BlockShardId, alreadyReplicated bool) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shardId]
if ok {
// this should never happen: we should only make a reservation once
// to try and handle this, we (asynchronously) notify remote end that this shard
// should be removed from their storage; we fail the reservation until this deletion
// is completed
// it is possible but even more unlikely that this is triggered on startup when we
// are registering past reservations; in that case this is still handled correctly
// since the caller will delete the reservation detail and re-replicate
go this.deleteShard(shardId)
return false
}
if this.remoteBytes-this.remoteUsedBytes >= bytes {
this.remoteUsedBytes += bytes
this.shardsAccounted[shardId] = alreadyReplicated
return true
} else {
return false
}
}
func (this *Peer) getShardPath(label int64) string {
// todo: make the store directory automatically
return fmt.Sprintf("store/%s_%d.obj", this.id.String(), label)
}
/*
* called on a representation
* to say that the peer it represents is trying to
* store data on our peer
*/
func (this *Peer) eventStoreShard(label int64, bytes []byte) bool {
this.mu.Lock()
defer this.mu.Unlock()
// confirm the peer still has space on our storage to reserve
if this.localBytes < this.localUsedBytes+len(bytes) {
return false
}
// okay, store it in the file and update cached usage
err := ioutil.WriteFile(this.getShardPath(label), bytes, 0644)
if err != nil {
Log.Warn.Printf("Failed to write peer shard (%s #%d): %s", this.id.String(), label, err.Error())
return false
}
Log.Debug.Printf("Stored shard %d for peer %s (len=%d)", label, this.id.String(), len(bytes))
this.remoteUsedBytes += len(bytes)
return true
}
func (this *Peer) eventDeleteShard(label int64) {
this.mu.Lock()
defer this.mu.Unlock()
os.Remove(this.getShardPath(label))
this.accountLocalUsedBytes()
}
func (this *Peer) eventRetrieveShard(label int64) []byte {
this.mu.Lock()
defer this.mu.Unlock()
shardBytes, err := ioutil.ReadFile(this.getShardPath(label))
if err != nil {
Log.Warn.Printf("Failed to handle shard retrieval request (%s #%d): %s", this.id.String(), label, err.Error())
return nil
} else {
return shardBytes
}
}
func (this *Peer) isOnline() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.status == STATUS_ONLINE
}
/*
* Call as often as reasonable.
* Syncs this peer representation with the actual remote
* peer.
*/
func (this *Peer) update() {
// ping the peer
// we do this outside the lock to avoid communication latency in the lock
online := this.protocol.ping(this.id)
this.mu.Lock()
if online && this.status == STATUS_OFFLINE {
Log.Info.Printf("Peer %s came online", this.id.String())
this.status = STATUS_ONLINE
} else if !online && this.status == STATUS_ONLINE {
Log.Info.Printf("Peer %s went offline", this.id.String())
this.status = STATUS_OFFLINE
}
verificationDelay := 300 * time.Second
if Debug {
verificationDelay = time.Second
}
var randomShard *BlockShardId
if time.Now().After(this.lastVerifyTime.Add(verificationDelay)) {
this.lastVerifyTime = time.Now()
// pick a random shard to verify
availableShards := make([]BlockShardId, 0)
for shardId, available := range this.shardsAccounted {
if available {
availableShards = append(availableShards, shardId)
}
}
if len(availableShards) > 0 {
randomShard = &availableShards[rand.Intn(len(availableShards))]
}
}
this.mu.Unlock()
if randomShard != nil {
Log.Debug.Printf("Verifying shard %d on peer %s", *randomShard, this.id.String())
bytes, success := this.retrieveShard(*randomShard)
if !success || !this.fluidBackup.blockStore.VerifyShard(this, *randomShard, bytes) {
// either we failed to communicate with the peer (if !success),
// or the peer sent us corrupted shard data (if success)
failReason := "invalid shard data"
if !success {
failReason = "peer communication failed"
}
Log.Info.Printf("Failed verification of shard %d on peer %s: %s", *randomShard, this.id.String(), failReason)
this.mu.Lock()
if success {
// shard is invalid, delete from remote end
// block store will re-assign it already from the verifyshard call, so don't need to do anything else about that
delete(this.shardsAccounted, *randomShard)
go this.deleteShard(*randomShard)
}
// we also check if this peer is failed per our one-day policy, in which case we would want to clear all accounted shards
this.verifyFailCount++
if this.verifyFailCount > 5 && time.Now().After(this.lastVerifySuccessTime.Add(time.Second * VERIFY_FAIL_WAIT_INTERVAL)) {
go this.terminateAgreement()
}
this.mu.Unlock()
// Decrease trust
this.peerList.UpdateTrustPostVerification(this.id, false)
} else {
this.peerList.UpdateTrustPostVerification(this.id, true)
this.mu.Lock()
this.verifyFailCount = 0
this.lastVerifySuccessTime = time.Now()
this.mu.Unlock()
}
}
}
func (this *Peer) terminateAgreement() {
this.mu.Lock()
Log.Info.Printf("Terminating agreement with peer %s", this.id.String())
this.peerList.UpdateTrustPostVerification(this.id, false)
for shardId := range this.shardsAccounted {
this.fluidBackup.blockStore.VerifyShard(this, shardId, nil)
delete(this.shardsAccounted, shardId)
}
this.localBytes = 0
this.localUsedBytes = 0
this.remoteBytes = 0
this.remoteUsedBytes = 0
// clear this peer's files from filesystem
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
os.Remove("store/" + f.Name())
}
}
this.mu.Unlock()
}
/* ============== *
* Peer Discovery *
* ============== */
// Ask this remote peer for the
// specified number of peers
func (this *Peer) askForPeers(num int) []PeerId {
sharedPeerIds := this.protocol.askForPeers(this.id, num)
// Insert peer handler here if necesssay.
return sharedPeerIds
}
| {
return fmt.Sprintf("%s:%d", this.Address, this.Port)
} | identifier_body |
peer.go | package fluidbackup
import "sync"
import "fmt"
import "io/ioutil"
import "time"
import "math/rand"
import "strings"
import "os"
const (
STATUS_ONLINE = 0
STATUS_OFFLINE = 1
)
type PeerId struct {
Address string
Port int
}
func (this *PeerId) String() string {
return fmt.Sprintf("%s:%d", this.Address, this.Port)
}
func strToPeerId(str string) PeerId {
parts := strings.Split(str, ":")
return PeerId{
Address: parts[0],
Port: strToInt(parts[1]),
}
}
/*
* Represents another peer, storing information about
* the other peer as necessary, and handling requests/actions
* involving that other peer (storeShard, etc.)
*
* Note: Does not represent the local peer. The local peer
* is perhaps best represented by a combination of Protocol,
* and PeerList, and FileStorage, which comprise a system.
*/
type Peer struct {
mu sync.Mutex
protocol *Protocol
fluidBackup *FluidBackup
peerList *PeerList
id PeerId
status int
localBytes int // how many bytes we've agreed to store for this peer
remoteBytes int // how many bytes peer is willing to store for us
// cached values
localUsedBytes int
remoteUsedBytes int
lastVerifyTime time.Time // last time we performed a shard storage verification
lastVerifySuccessTime time.Time
verifyFailCount int // count of successive verification failures
// set of shards that we have accounted for in the cached remoteUsedBytes
// false if not replicated yet, true otherwise
shardsAccounted map[BlockShardId]bool
}
func MakePeer(id PeerId, fluidBackup *FluidBackup, protocol *Protocol, peerList *PeerList) *Peer {
this := new(Peer)
this.fluidBackup = fluidBackup
this.protocol = protocol
this.id = id
this.status = STATUS_ONLINE
// save peerList for operations on our local peer in
// response to simulations
this.peerList = peerList
this.shardsAccounted = make(map[BlockShardId]bool)
this.accountLocalUsedBytes()
go func() {
/* keep updating until eternity */
for !fluidBackup.Stopping() {
this.update()
if Debug {
time.Sleep(time.Duration(rand.Intn(3000))*time.Millisecond + 3*time.Second)
} else {
time.Sleep(time.Duration(rand.Intn(60000))*time.Millisecond + 30*time.Second)
}
}
}()
return this
}
/*
* Our local peer wants to propose agreement
* with the represented remote peer.
* currently ONE Agreement per shard (TODO: change?)
*/
func (this *Peer) proposeAgreement(localBytes int, remoteBytes int) bool {
if this.protocol.proposeAgreement(this.id, localBytes, remoteBytes) {
this.eventAgreement(localBytes, remoteBytes)
return true
}
return false
}
func (this *Peer) eventAgreement(localBytes int, remoteBytes int) {
this.mu.Lock()
this.localBytes += localBytes
this.remoteBytes += remoteBytes
Log.Debug.Printf("New agreement with %s (%d to %d; total %d/%d to %d/%d)", this.id.String(), localBytes, remoteBytes, this.localUsedBytes, this.localBytes, this.remoteUsedBytes, this.remoteBytes)
this.mu.Unlock()
}
/*
* Recomputes the number of bytes we are storing for this peer by searching filesystem.
* Assumes caller has the lock.
*/
func (this *Peer) accountLocalUsedBytes() {
oldUsedBytes := this.localUsedBytes
this.localUsedBytes = 0
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
fi, err := os.Stat("store/" + f.Name())
if err == nil {
this.localUsedBytes += int(fi.Size())
}
}
}
Log.Debug.Printf("Re-accounted stored bytes from %d to %d", oldUsedBytes, this.localUsedBytes)
}
/*
* Replicates a shard that the local peer wants to store on this peer.
*/
func (this *Peer) storeShard(shard *BlockShard) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shard.Id]
if !ok {
// this is bad, blockstore is trying to store a shard that hasn't been reserved yet?
Log.Error.Printf("Peer handler %s received unaccounted shard %d!", this.id.String(), shard.Id)
return false
}
this.shardsAccounted[shard.Id] = true
result := this.protocol.storeShard(this.id, int64(shard.Id), shard.Contents)
if result == 0 {
return true
} else if result == -2 {
// peer actively refused the shard!
// probably the agreement is not synchronized or peer terminated agreement
go this.terminateAgreement()
}
return false
}
func (this *Peer) deleteShard(shardId BlockShardId) bool {
this.mu.Lock()
defer this.mu.Unlock()
replicated, ok := this.shardsAccounted[shardId]
if !ok || !replicated {
// this is bad, blockstore is trying to delete a shard that hasn't been replicated yet?
Log.Error.Printf("Peer handler %s received deletion request for unaccounted shard %d!", this.id.String(), shardId)
return false
}
shard := this.fluidBackup.blockStore.GetShard(shardId)
if shard == nil {
Log.Error.Printf("Peer handler %s received deletion request for non-existent shard %d!", this.id.String(), shardId)
return false
}
delete(this.shardsAccounted, shardId)
this.protocol.deleteShard(this.id, int64(shardId))
this.remoteUsedBytes -= shard.Length
return true
}
func (this *Peer) retrieveShard(shardId BlockShardId) ([]byte, bool) {
return this.protocol.retrieveShard(this.id, int64(shardId))
}
/*
* Attempts to reserve a number of bytes for storage on this peer.
* Returns true if the bytes have been reserved for use by caller, or false if reservation failed.
*
* Note that this is also used on startup to register reservations that were made earlier.
*/
func (this *Peer) reserveBytes(bytes int, shardId BlockShardId, alreadyReplicated bool) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shardId]
if ok {
// this should never happen: we should only make a reservation once
// to try and handle this, we (asynchronously) notify remote end that this shard
// should be removed from their storage; we fail the reservation until this deletion
// is completed
// it is possible but even more unlikely that this is triggered on startup when we
// are registering past reservations; in that case this is still handled correctly
// since the caller will delete the reservation detail and re-replicate
go this.deleteShard(shardId)
return false
}
if this.remoteBytes-this.remoteUsedBytes >= bytes {
this.remoteUsedBytes += bytes
this.shardsAccounted[shardId] = alreadyReplicated
return true
} else { | }
}
func (this *Peer) getShardPath(label int64) string {
// todo: make the store directory automatically
return fmt.Sprintf("store/%s_%d.obj", this.id.String(), label)
}
/*
* called on a representation
* to say that the peer it represents is trying to
* store data on our peer
*/
func (this *Peer) eventStoreShard(label int64, bytes []byte) bool {
this.mu.Lock()
defer this.mu.Unlock()
// confirm the peer still has space on our storage to reserve
if this.localBytes < this.localUsedBytes+len(bytes) {
return false
}
// okay, store it in the file and update cached usage
err := ioutil.WriteFile(this.getShardPath(label), bytes, 0644)
if err != nil {
Log.Warn.Printf("Failed to write peer shard (%s #%d): %s", this.id.String(), label, err.Error())
return false
}
Log.Debug.Printf("Stored shard %d for peer %s (len=%d)", label, this.id.String(), len(bytes))
this.remoteUsedBytes += len(bytes)
return true
}
func (this *Peer) eventDeleteShard(label int64) {
this.mu.Lock()
defer this.mu.Unlock()
os.Remove(this.getShardPath(label))
this.accountLocalUsedBytes()
}
func (this *Peer) eventRetrieveShard(label int64) []byte {
this.mu.Lock()
defer this.mu.Unlock()
shardBytes, err := ioutil.ReadFile(this.getShardPath(label))
if err != nil {
Log.Warn.Printf("Failed to handle shard retrieval request (%s #%d): %s", this.id.String(), label, err.Error())
return nil
} else {
return shardBytes
}
}
func (this *Peer) isOnline() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.status == STATUS_ONLINE
}
/*
* Call as often as reasonable.
* Syncs this peer representation with the actual remote
* peer.
*/
func (this *Peer) update() {
// ping the peer
// we do this outside the lock to avoid communication latency in the lock
online := this.protocol.ping(this.id)
this.mu.Lock()
if online && this.status == STATUS_OFFLINE {
Log.Info.Printf("Peer %s came online", this.id.String())
this.status = STATUS_ONLINE
} else if !online && this.status == STATUS_ONLINE {
Log.Info.Printf("Peer %s went offline", this.id.String())
this.status = STATUS_OFFLINE
}
verificationDelay := 300 * time.Second
if Debug {
verificationDelay = time.Second
}
var randomShard *BlockShardId
if time.Now().After(this.lastVerifyTime.Add(verificationDelay)) {
this.lastVerifyTime = time.Now()
// pick a random shard to verify
availableShards := make([]BlockShardId, 0)
for shardId, available := range this.shardsAccounted {
if available {
availableShards = append(availableShards, shardId)
}
}
if len(availableShards) > 0 {
randomShard = &availableShards[rand.Intn(len(availableShards))]
}
}
this.mu.Unlock()
if randomShard != nil {
Log.Debug.Printf("Verifying shard %d on peer %s", *randomShard, this.id.String())
bytes, success := this.retrieveShard(*randomShard)
if !success || !this.fluidBackup.blockStore.VerifyShard(this, *randomShard, bytes) {
// either we failed to communicate with the peer (if !success),
// or the peer sent us corrupted shard data (if success)
failReason := "invalid shard data"
if !success {
failReason = "peer communication failed"
}
Log.Info.Printf("Failed verification of shard %d on peer %s: %s", *randomShard, this.id.String(), failReason)
this.mu.Lock()
if success {
// shard is invalid, delete from remote end
// block store will re-assign it already from the verifyshard call, so don't need to do anything else about that
delete(this.shardsAccounted, *randomShard)
go this.deleteShard(*randomShard)
}
// we also check if this peer is failed per our one-day policy, in which case we would want to clear all accounted shards
this.verifyFailCount++
if this.verifyFailCount > 5 && time.Now().After(this.lastVerifySuccessTime.Add(time.Second * VERIFY_FAIL_WAIT_INTERVAL)) {
go this.terminateAgreement()
}
this.mu.Unlock()
// Decrease trust
this.peerList.UpdateTrustPostVerification(this.id, false)
} else {
this.peerList.UpdateTrustPostVerification(this.id, true)
this.mu.Lock()
this.verifyFailCount = 0
this.lastVerifySuccessTime = time.Now()
this.mu.Unlock()
}
}
}
func (this *Peer) terminateAgreement() {
this.mu.Lock()
Log.Info.Printf("Terminating agreement with peer %s", this.id.String())
this.peerList.UpdateTrustPostVerification(this.id, false)
for shardId := range this.shardsAccounted {
this.fluidBackup.blockStore.VerifyShard(this, shardId, nil)
delete(this.shardsAccounted, shardId)
}
this.localBytes = 0
this.localUsedBytes = 0
this.remoteBytes = 0
this.remoteUsedBytes = 0
// clear this peer's files from filesystem
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
os.Remove("store/" + f.Name())
}
}
this.mu.Unlock()
}
/* ============== *
* Peer Discovery *
* ============== */
// Ask this remote peer for the
// specified number of peers
func (this *Peer) askForPeers(num int) []PeerId {
sharedPeerIds := this.protocol.askForPeers(this.id, num)
// Insert peer handler here if necesssay.
return sharedPeerIds
} | return false | random_line_split |
peer.go | package fluidbackup
import "sync"
import "fmt"
import "io/ioutil"
import "time"
import "math/rand"
import "strings"
import "os"
const (
STATUS_ONLINE = 0
STATUS_OFFLINE = 1
)
type PeerId struct {
Address string
Port int
}
func (this *PeerId) String() string {
return fmt.Sprintf("%s:%d", this.Address, this.Port)
}
func strToPeerId(str string) PeerId {
parts := strings.Split(str, ":")
return PeerId{
Address: parts[0],
Port: strToInt(parts[1]),
}
}
/*
* Represents another peer, storing information about
* the other peer as necessary, and handling requests/actions
* involving that other peer (storeShard, etc.)
*
* Note: Does not represent the local peer. The local peer
* is perhaps best represented by a combination of Protocol,
* and PeerList, and FileStorage, which comprise a system.
*/
type Peer struct {
mu sync.Mutex
protocol *Protocol
fluidBackup *FluidBackup
peerList *PeerList
id PeerId
status int
localBytes int // how many bytes we've agreed to store for this peer
remoteBytes int // how many bytes peer is willing to store for us
// cached values
localUsedBytes int
remoteUsedBytes int
lastVerifyTime time.Time // last time we performed a shard storage verification
lastVerifySuccessTime time.Time
verifyFailCount int // count of successive verification failures
// set of shards that we have accounted for in the cached remoteUsedBytes
// false if not replicated yet, true otherwise
shardsAccounted map[BlockShardId]bool
}
func MakePeer(id PeerId, fluidBackup *FluidBackup, protocol *Protocol, peerList *PeerList) *Peer {
this := new(Peer)
this.fluidBackup = fluidBackup
this.protocol = protocol
this.id = id
this.status = STATUS_ONLINE
// save peerList for operations on our local peer in
// response to simulations
this.peerList = peerList
this.shardsAccounted = make(map[BlockShardId]bool)
this.accountLocalUsedBytes()
go func() {
/* keep updating until eternity */
for !fluidBackup.Stopping() {
this.update()
if Debug {
time.Sleep(time.Duration(rand.Intn(3000))*time.Millisecond + 3*time.Second)
} else {
time.Sleep(time.Duration(rand.Intn(60000))*time.Millisecond + 30*time.Second)
}
}
}()
return this
}
/*
* Our local peer wants to propose agreement
* with the represented remote peer.
* currently ONE Agreement per shard (TODO: change?)
*/
func (this *Peer) proposeAgreement(localBytes int, remoteBytes int) bool {
if this.protocol.proposeAgreement(this.id, localBytes, remoteBytes) {
this.eventAgreement(localBytes, remoteBytes)
return true
}
return false
}
func (this *Peer) eventAgreement(localBytes int, remoteBytes int) {
this.mu.Lock()
this.localBytes += localBytes
this.remoteBytes += remoteBytes
Log.Debug.Printf("New agreement with %s (%d to %d; total %d/%d to %d/%d)", this.id.String(), localBytes, remoteBytes, this.localUsedBytes, this.localBytes, this.remoteUsedBytes, this.remoteBytes)
this.mu.Unlock()
}
/*
* Recomputes the number of bytes we are storing for this peer by searching filesystem.
* Assumes caller has the lock.
*/
func (this *Peer) accountLocalUsedBytes() {
oldUsedBytes := this.localUsedBytes
this.localUsedBytes = 0
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
fi, err := os.Stat("store/" + f.Name())
if err == nil {
this.localUsedBytes += int(fi.Size())
}
}
}
Log.Debug.Printf("Re-accounted stored bytes from %d to %d", oldUsedBytes, this.localUsedBytes)
}
/*
* Replicates a shard that the local peer wants to store on this peer.
*/
func (this *Peer) storeShard(shard *BlockShard) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shard.Id]
if !ok |
this.shardsAccounted[shard.Id] = true
result := this.protocol.storeShard(this.id, int64(shard.Id), shard.Contents)
if result == 0 {
return true
} else if result == -2 {
// peer actively refused the shard!
// probably the agreement is not synchronized or peer terminated agreement
go this.terminateAgreement()
}
return false
}
func (this *Peer) deleteShard(shardId BlockShardId) bool {
this.mu.Lock()
defer this.mu.Unlock()
replicated, ok := this.shardsAccounted[shardId]
if !ok || !replicated {
// this is bad, blockstore is trying to delete a shard that hasn't been replicated yet?
Log.Error.Printf("Peer handler %s received deletion request for unaccounted shard %d!", this.id.String(), shardId)
return false
}
shard := this.fluidBackup.blockStore.GetShard(shardId)
if shard == nil {
Log.Error.Printf("Peer handler %s received deletion request for non-existent shard %d!", this.id.String(), shardId)
return false
}
delete(this.shardsAccounted, shardId)
this.protocol.deleteShard(this.id, int64(shardId))
this.remoteUsedBytes -= shard.Length
return true
}
func (this *Peer) retrieveShard(shardId BlockShardId) ([]byte, bool) {
return this.protocol.retrieveShard(this.id, int64(shardId))
}
/*
* Attempts to reserve a number of bytes for storage on this peer.
* Returns true if the bytes have been reserved for use by caller, or false if reservation failed.
*
* Note that this is also used on startup to register reservations that were made earlier.
*/
func (this *Peer) reserveBytes(bytes int, shardId BlockShardId, alreadyReplicated bool) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shardId]
if ok {
// this should never happen: we should only make a reservation once
// to try and handle this, we (asynchronously) notify remote end that this shard
// should be removed from their storage; we fail the reservation until this deletion
// is completed
// it is possible but even more unlikely that this is triggered on startup when we
// are registering past reservations; in that case this is still handled correctly
// since the caller will delete the reservation detail and re-replicate
go this.deleteShard(shardId)
return false
}
if this.remoteBytes-this.remoteUsedBytes >= bytes {
this.remoteUsedBytes += bytes
this.shardsAccounted[shardId] = alreadyReplicated
return true
} else {
return false
}
}
func (this *Peer) getShardPath(label int64) string {
// todo: make the store directory automatically
return fmt.Sprintf("store/%s_%d.obj", this.id.String(), label)
}
/*
* called on a representation
* to say that the peer it represents is trying to
* store data on our peer
*/
func (this *Peer) eventStoreShard(label int64, bytes []byte) bool {
this.mu.Lock()
defer this.mu.Unlock()
// confirm the peer still has space on our storage to reserve
if this.localBytes < this.localUsedBytes+len(bytes) {
return false
}
// okay, store it in the file and update cached usage
err := ioutil.WriteFile(this.getShardPath(label), bytes, 0644)
if err != nil {
Log.Warn.Printf("Failed to write peer shard (%s #%d): %s", this.id.String(), label, err.Error())
return false
}
Log.Debug.Printf("Stored shard %d for peer %s (len=%d)", label, this.id.String(), len(bytes))
this.remoteUsedBytes += len(bytes)
return true
}
func (this *Peer) eventDeleteShard(label int64) {
this.mu.Lock()
defer this.mu.Unlock()
os.Remove(this.getShardPath(label))
this.accountLocalUsedBytes()
}
func (this *Peer) eventRetrieveShard(label int64) []byte {
this.mu.Lock()
defer this.mu.Unlock()
shardBytes, err := ioutil.ReadFile(this.getShardPath(label))
if err != nil {
Log.Warn.Printf("Failed to handle shard retrieval request (%s #%d): %s", this.id.String(), label, err.Error())
return nil
} else {
return shardBytes
}
}
func (this *Peer) isOnline() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.status == STATUS_ONLINE
}
/*
* Call as often as reasonable.
* Syncs this peer representation with the actual remote
* peer.
*/
func (this *Peer) update() {
// ping the peer
// we do this outside the lock to avoid communication latency in the lock
online := this.protocol.ping(this.id)
this.mu.Lock()
if online && this.status == STATUS_OFFLINE {
Log.Info.Printf("Peer %s came online", this.id.String())
this.status = STATUS_ONLINE
} else if !online && this.status == STATUS_ONLINE {
Log.Info.Printf("Peer %s went offline", this.id.String())
this.status = STATUS_OFFLINE
}
verificationDelay := 300 * time.Second
if Debug {
verificationDelay = time.Second
}
var randomShard *BlockShardId
if time.Now().After(this.lastVerifyTime.Add(verificationDelay)) {
this.lastVerifyTime = time.Now()
// pick a random shard to verify
availableShards := make([]BlockShardId, 0)
for shardId, available := range this.shardsAccounted {
if available {
availableShards = append(availableShards, shardId)
}
}
if len(availableShards) > 0 {
randomShard = &availableShards[rand.Intn(len(availableShards))]
}
}
this.mu.Unlock()
if randomShard != nil {
Log.Debug.Printf("Verifying shard %d on peer %s", *randomShard, this.id.String())
bytes, success := this.retrieveShard(*randomShard)
if !success || !this.fluidBackup.blockStore.VerifyShard(this, *randomShard, bytes) {
// either we failed to communicate with the peer (if !success),
// or the peer sent us corrupted shard data (if success)
failReason := "invalid shard data"
if !success {
failReason = "peer communication failed"
}
Log.Info.Printf("Failed verification of shard %d on peer %s: %s", *randomShard, this.id.String(), failReason)
this.mu.Lock()
if success {
// shard is invalid, delete from remote end
// block store will re-assign it already from the verifyshard call, so don't need to do anything else about that
delete(this.shardsAccounted, *randomShard)
go this.deleteShard(*randomShard)
}
// we also check if this peer is failed per our one-day policy, in which case we would want to clear all accounted shards
this.verifyFailCount++
if this.verifyFailCount > 5 && time.Now().After(this.lastVerifySuccessTime.Add(time.Second * VERIFY_FAIL_WAIT_INTERVAL)) {
go this.terminateAgreement()
}
this.mu.Unlock()
// Decrease trust
this.peerList.UpdateTrustPostVerification(this.id, false)
} else {
this.peerList.UpdateTrustPostVerification(this.id, true)
this.mu.Lock()
this.verifyFailCount = 0
this.lastVerifySuccessTime = time.Now()
this.mu.Unlock()
}
}
}
func (this *Peer) terminateAgreement() {
this.mu.Lock()
Log.Info.Printf("Terminating agreement with peer %s", this.id.String())
this.peerList.UpdateTrustPostVerification(this.id, false)
for shardId := range this.shardsAccounted {
this.fluidBackup.blockStore.VerifyShard(this, shardId, nil)
delete(this.shardsAccounted, shardId)
}
this.localBytes = 0
this.localUsedBytes = 0
this.remoteBytes = 0
this.remoteUsedBytes = 0
// clear this peer's files from filesystem
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
os.Remove("store/" + f.Name())
}
}
this.mu.Unlock()
}
/* ============== *
* Peer Discovery *
* ============== */
// Ask this remote peer for the
// specified number of peers
func (this *Peer) askForPeers(num int) []PeerId {
sharedPeerIds := this.protocol.askForPeers(this.id, num)
// Insert peer handler here if necesssay.
return sharedPeerIds
}
| {
// this is bad, blockstore is trying to store a shard that hasn't been reserved yet?
Log.Error.Printf("Peer handler %s received unaccounted shard %d!", this.id.String(), shard.Id)
return false
} | conditional_block |
peer.go | package fluidbackup
import "sync"
import "fmt"
import "io/ioutil"
import "time"
import "math/rand"
import "strings"
import "os"
const (
STATUS_ONLINE = 0
STATUS_OFFLINE = 1
)
type PeerId struct {
Address string
Port int
}
func (this *PeerId) String() string {
return fmt.Sprintf("%s:%d", this.Address, this.Port)
}
func strToPeerId(str string) PeerId {
parts := strings.Split(str, ":")
return PeerId{
Address: parts[0],
Port: strToInt(parts[1]),
}
}
/*
* Represents another peer, storing information about
* the other peer as necessary, and handling requests/actions
* involving that other peer (storeShard, etc.)
*
* Note: Does not represent the local peer. The local peer
* is perhaps best represented by a combination of Protocol,
* and PeerList, and FileStorage, which comprise a system.
*/
type Peer struct {
mu sync.Mutex
protocol *Protocol
fluidBackup *FluidBackup
peerList *PeerList
id PeerId
status int
localBytes int // how many bytes we've agreed to store for this peer
remoteBytes int // how many bytes peer is willing to store for us
// cached values
localUsedBytes int
remoteUsedBytes int
lastVerifyTime time.Time // last time we performed a shard storage verification
lastVerifySuccessTime time.Time
verifyFailCount int // count of successive verification failures
// set of shards that we have accounted for in the cached remoteUsedBytes
// false if not replicated yet, true otherwise
shardsAccounted map[BlockShardId]bool
}
func MakePeer(id PeerId, fluidBackup *FluidBackup, protocol *Protocol, peerList *PeerList) *Peer {
this := new(Peer)
this.fluidBackup = fluidBackup
this.protocol = protocol
this.id = id
this.status = STATUS_ONLINE
// save peerList for operations on our local peer in
// response to simulations
this.peerList = peerList
this.shardsAccounted = make(map[BlockShardId]bool)
this.accountLocalUsedBytes()
go func() {
/* keep updating until eternity */
for !fluidBackup.Stopping() {
this.update()
if Debug {
time.Sleep(time.Duration(rand.Intn(3000))*time.Millisecond + 3*time.Second)
} else {
time.Sleep(time.Duration(rand.Intn(60000))*time.Millisecond + 30*time.Second)
}
}
}()
return this
}
/*
* Our local peer wants to propose agreement
* with the represented remote peer.
* currently ONE Agreement per shard (TODO: change?)
*/
func (this *Peer) proposeAgreement(localBytes int, remoteBytes int) bool {
if this.protocol.proposeAgreement(this.id, localBytes, remoteBytes) {
this.eventAgreement(localBytes, remoteBytes)
return true
}
return false
}
func (this *Peer) | (localBytes int, remoteBytes int) {
this.mu.Lock()
this.localBytes += localBytes
this.remoteBytes += remoteBytes
Log.Debug.Printf("New agreement with %s (%d to %d; total %d/%d to %d/%d)", this.id.String(), localBytes, remoteBytes, this.localUsedBytes, this.localBytes, this.remoteUsedBytes, this.remoteBytes)
this.mu.Unlock()
}
/*
* Recomputes the number of bytes we are storing for this peer by searching filesystem.
* Assumes caller has the lock.
*/
func (this *Peer) accountLocalUsedBytes() {
oldUsedBytes := this.localUsedBytes
this.localUsedBytes = 0
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
fi, err := os.Stat("store/" + f.Name())
if err == nil {
this.localUsedBytes += int(fi.Size())
}
}
}
Log.Debug.Printf("Re-accounted stored bytes from %d to %d", oldUsedBytes, this.localUsedBytes)
}
/*
* Replicates a shard that the local peer wants to store on this peer.
*/
func (this *Peer) storeShard(shard *BlockShard) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shard.Id]
if !ok {
// this is bad, blockstore is trying to store a shard that hasn't been reserved yet?
Log.Error.Printf("Peer handler %s received unaccounted shard %d!", this.id.String(), shard.Id)
return false
}
this.shardsAccounted[shard.Id] = true
result := this.protocol.storeShard(this.id, int64(shard.Id), shard.Contents)
if result == 0 {
return true
} else if result == -2 {
// peer actively refused the shard!
// probably the agreement is not synchronized or peer terminated agreement
go this.terminateAgreement()
}
return false
}
func (this *Peer) deleteShard(shardId BlockShardId) bool {
this.mu.Lock()
defer this.mu.Unlock()
replicated, ok := this.shardsAccounted[shardId]
if !ok || !replicated {
// this is bad, blockstore is trying to delete a shard that hasn't been replicated yet?
Log.Error.Printf("Peer handler %s received deletion request for unaccounted shard %d!", this.id.String(), shardId)
return false
}
shard := this.fluidBackup.blockStore.GetShard(shardId)
if shard == nil {
Log.Error.Printf("Peer handler %s received deletion request for non-existent shard %d!", this.id.String(), shardId)
return false
}
delete(this.shardsAccounted, shardId)
this.protocol.deleteShard(this.id, int64(shardId))
this.remoteUsedBytes -= shard.Length
return true
}
func (this *Peer) retrieveShard(shardId BlockShardId) ([]byte, bool) {
return this.protocol.retrieveShard(this.id, int64(shardId))
}
/*
* Attempts to reserve a number of bytes for storage on this peer.
* Returns true if the bytes have been reserved for use by caller, or false if reservation failed.
*
* Note that this is also used on startup to register reservations that were made earlier.
*/
func (this *Peer) reserveBytes(bytes int, shardId BlockShardId, alreadyReplicated bool) bool {
this.mu.Lock()
defer this.mu.Unlock()
_, ok := this.shardsAccounted[shardId]
if ok {
// this should never happen: we should only make a reservation once
// to try and handle this, we (asynchronously) notify remote end that this shard
// should be removed from their storage; we fail the reservation until this deletion
// is completed
// it is possible but even more unlikely that this is triggered on startup when we
// are registering past reservations; in that case this is still handled correctly
// since the caller will delete the reservation detail and re-replicate
go this.deleteShard(shardId)
return false
}
if this.remoteBytes-this.remoteUsedBytes >= bytes {
this.remoteUsedBytes += bytes
this.shardsAccounted[shardId] = alreadyReplicated
return true
} else {
return false
}
}
func (this *Peer) getShardPath(label int64) string {
// todo: make the store directory automatically
return fmt.Sprintf("store/%s_%d.obj", this.id.String(), label)
}
/*
* called on a representation
* to say that the peer it represents is trying to
* store data on our peer
*/
func (this *Peer) eventStoreShard(label int64, bytes []byte) bool {
this.mu.Lock()
defer this.mu.Unlock()
// confirm the peer still has space on our storage to reserve
if this.localBytes < this.localUsedBytes+len(bytes) {
return false
}
// okay, store it in the file and update cached usage
err := ioutil.WriteFile(this.getShardPath(label), bytes, 0644)
if err != nil {
Log.Warn.Printf("Failed to write peer shard (%s #%d): %s", this.id.String(), label, err.Error())
return false
}
Log.Debug.Printf("Stored shard %d for peer %s (len=%d)", label, this.id.String(), len(bytes))
this.remoteUsedBytes += len(bytes)
return true
}
func (this *Peer) eventDeleteShard(label int64) {
this.mu.Lock()
defer this.mu.Unlock()
os.Remove(this.getShardPath(label))
this.accountLocalUsedBytes()
}
func (this *Peer) eventRetrieveShard(label int64) []byte {
this.mu.Lock()
defer this.mu.Unlock()
shardBytes, err := ioutil.ReadFile(this.getShardPath(label))
if err != nil {
Log.Warn.Printf("Failed to handle shard retrieval request (%s #%d): %s", this.id.String(), label, err.Error())
return nil
} else {
return shardBytes
}
}
func (this *Peer) isOnline() bool {
this.mu.Lock()
defer this.mu.Unlock()
return this.status == STATUS_ONLINE
}
/*
* Call as often as reasonable.
* Syncs this peer representation with the actual remote
* peer.
*/
func (this *Peer) update() {
// ping the peer
// we do this outside the lock to avoid communication latency in the lock
online := this.protocol.ping(this.id)
this.mu.Lock()
if online && this.status == STATUS_OFFLINE {
Log.Info.Printf("Peer %s came online", this.id.String())
this.status = STATUS_ONLINE
} else if !online && this.status == STATUS_ONLINE {
Log.Info.Printf("Peer %s went offline", this.id.String())
this.status = STATUS_OFFLINE
}
verificationDelay := 300 * time.Second
if Debug {
verificationDelay = time.Second
}
var randomShard *BlockShardId
if time.Now().After(this.lastVerifyTime.Add(verificationDelay)) {
this.lastVerifyTime = time.Now()
// pick a random shard to verify
availableShards := make([]BlockShardId, 0)
for shardId, available := range this.shardsAccounted {
if available {
availableShards = append(availableShards, shardId)
}
}
if len(availableShards) > 0 {
randomShard = &availableShards[rand.Intn(len(availableShards))]
}
}
this.mu.Unlock()
if randomShard != nil {
Log.Debug.Printf("Verifying shard %d on peer %s", *randomShard, this.id.String())
bytes, success := this.retrieveShard(*randomShard)
if !success || !this.fluidBackup.blockStore.VerifyShard(this, *randomShard, bytes) {
// either we failed to communicate with the peer (if !success),
// or the peer sent us corrupted shard data (if success)
failReason := "invalid shard data"
if !success {
failReason = "peer communication failed"
}
Log.Info.Printf("Failed verification of shard %d on peer %s: %s", *randomShard, this.id.String(), failReason)
this.mu.Lock()
if success {
// shard is invalid, delete from remote end
// block store will re-assign it already from the verifyshard call, so don't need to do anything else about that
delete(this.shardsAccounted, *randomShard)
go this.deleteShard(*randomShard)
}
// we also check if this peer is failed per our one-day policy, in which case we would want to clear all accounted shards
this.verifyFailCount++
if this.verifyFailCount > 5 && time.Now().After(this.lastVerifySuccessTime.Add(time.Second * VERIFY_FAIL_WAIT_INTERVAL)) {
go this.terminateAgreement()
}
this.mu.Unlock()
// Decrease trust
this.peerList.UpdateTrustPostVerification(this.id, false)
} else {
this.peerList.UpdateTrustPostVerification(this.id, true)
this.mu.Lock()
this.verifyFailCount = 0
this.lastVerifySuccessTime = time.Now()
this.mu.Unlock()
}
}
}
func (this *Peer) terminateAgreement() {
this.mu.Lock()
Log.Info.Printf("Terminating agreement with peer %s", this.id.String())
this.peerList.UpdateTrustPostVerification(this.id, false)
for shardId := range this.shardsAccounted {
this.fluidBackup.blockStore.VerifyShard(this, shardId, nil)
delete(this.shardsAccounted, shardId)
}
this.localBytes = 0
this.localUsedBytes = 0
this.remoteBytes = 0
this.remoteUsedBytes = 0
// clear this peer's files from filesystem
files, _ := ioutil.ReadDir("store/")
for _, f := range files {
if strings.HasSuffix(f.Name(), ".obj") && strings.HasSuffix(f.Name(), this.id.String()+"_") {
os.Remove("store/" + f.Name())
}
}
this.mu.Unlock()
}
/* ============== *
* Peer Discovery *
* ============== */
// Ask this remote peer for the
// specified number of peers
func (this *Peer) askForPeers(num int) []PeerId {
sharedPeerIds := this.protocol.askForPeers(this.id, num)
// Insert peer handler here if necesssay.
return sharedPeerIds
}
| eventAgreement | identifier_name |
TelephonyBaseTest.py | #!/usr/bin/env python3.4
#
# Copyright 2016 - Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Class for Defining Common Telephony Test Functionality
"""
import os
import time
import inspect
import traceback
import acts.controllers.diag_logger
from acts.base_test import BaseTestClass
from acts.keys import Config
from acts.signals import TestSignal
from acts.signals import TestAbortClass
from acts.signals import TestAbortAll
from acts import utils
from acts.test_utils.tel.tel_subscription_utils import \
initial_set_up_for_subid_infomation
from acts.test_utils.tel.tel_test_utils import abort_all_tests
from acts.test_utils.tel.tel_test_utils import check_qxdm_logger_always_on
from acts.test_utils.tel.tel_test_utils import ensure_phones_default_state
from acts.test_utils.tel.tel_test_utils import ensure_phones_idle
from acts.test_utils.tel.tel_test_utils import refresh_droid_config
from acts.test_utils.tel.tel_test_utils import setup_droid_properties
from acts.test_utils.tel.tel_test_utils import set_phone_screen_on
from acts.test_utils.tel.tel_test_utils import set_phone_silent_mode
from acts.test_utils.tel.tel_test_utils import set_qxdm_logger_always_on
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_ENABLED
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_DISABLED
from acts.utils import force_airplane_mode
QXDM_LOG_PATH = "/data/vendor/radio/diag_logs/logs/"
class TelephonyBaseTest(BaseTestClass):
def __init__(self, controllers):
BaseTestClass.__init__(self, controllers)
self.logger_sessions = []
for ad in self.android_devices:
if getattr(ad, "qxdm_always_on", False):
#this is only supported on 2017 devices
ad.log.info("qxdm_always_on is set in config file")
mask = getattr(ad, "qxdm_mask", "Radio-general.cfg")
if not check_qxdm_logger_always_on(ad, mask):
ad.log.info("qxdm always on is not set, turn it on")
set_qxdm_logger_always_on(ad, mask)
else:
ad.log.info("qxdm always on is already set")
#The puk and pin should be provided in testbed config file.
#"AndroidDevice": [{"serial": "84B5T15A29018214",
# "adb_logcat_param": "-b all",
# "puk": "12345678",
# "puk_pin": "1234"}]
if hasattr(ad, 'puk'):
if not hasattr(ad, 'puk_pin'):
|
ad.log.info("Enter PUK code and pin")
if not ad.droid.telephonySupplyPuk(ad.puk, ad.puk_pin):
abort_all_tests(
ad.log,
"Puk and puk_pin provided in testbed config do NOT work"
)
self.skip_reset_between_cases = self.user_params.get(
"skip_reset_between_cases", True)
# Use for logging in the test cases to facilitate
# faster log lookup and reduce ambiguity in logging.
@staticmethod
def tel_test_wrap(fn):
def _safe_wrap_test_case(self, *args, **kwargs):
test_id = "%s:%s:%s" % (self.__class__.__name__, self.test_name,
self.begin_time.replace(' ', '-'))
self.test_id = test_id
log_string = "[Test ID] %s" % test_id
self.log.info(log_string)
try:
for ad in self.android_devices:
ad.droid.logI("Started %s" % log_string)
# TODO: b/19002120 start QXDM Logging
result = fn(self, *args, **kwargs)
for ad in self.android_devices:
ad.droid.logI("Finished %s" % log_string)
new_crash = ad.check_crash_report(self.test_name,
self.begin_time, result)
if new_crash:
ad.log.error("Find new crash reports %s", new_crash)
if not result and self.user_params.get("telephony_auto_rerun"):
self.teardown_test()
# re-run only once, if re-run pass, mark as pass
log_string = "[Rerun Test ID] %s. 1st run failed." % test_id
self.log.info(log_string)
self.setup_test()
for ad in self.android_devices:
ad.droid.logI("Rerun Started %s" % log_string)
result = fn(self, *args, **kwargs)
if result is True:
self.log.info("Rerun passed.")
elif result is False:
self.log.info("Rerun failed.")
else:
# In the event that we have a non-bool or null
# retval, we want to clearly distinguish this in the
# logs from an explicit failure, though the test will
# still be considered a failure for reporting purposes.
self.log.info("Rerun indeterminate.")
result = False
return result
except (TestSignal, TestAbortClass, TestAbortAll):
raise
except Exception as e:
self.log.error(str(e))
return False
finally:
# TODO: b/19002120 stop QXDM Logging
for ad in self.android_devices:
try:
ad.adb.wait_for_device()
except Exception as e:
self.log.error(str(e))
return _safe_wrap_test_case
def setup_class(self):
sim_conf_file = self.user_params.get("sim_conf_file")
if not sim_conf_file:
self.log.info("\"sim_conf_file\" is not provided test bed config!")
else:
# If the sim_conf_file is not a full path, attempt to find it
# relative to the config file.
if not os.path.isfile(sim_conf_file):
sim_conf_file = os.path.join(
self.user_params[Config.key_config_path], sim_conf_file)
if not os.path.isfile(sim_conf_file):
self.log.error("Unable to load user config %s ",
sim_conf_file)
return False
setattr(self, "diag_logger",
self.register_controller(
acts.controllers.diag_logger, required=False))
if not self.user_params.get("Attenuator"):
ensure_phones_default_state(self.log, self.android_devices)
else:
ensure_phones_idle(self.log, self.android_devices)
for ad in self.android_devices:
setup_droid_properties(self.log, ad, sim_conf_file)
# Setup VoWiFi MDN for Verizon. b/33187374
build_id = ad.build_info["build_id"]
if "vzw" in [
sub["operator"] for sub in ad.cfg["subscription"].values()
] and ad.is_apk_installed("com.google.android.wfcactivation"):
ad.log.info("setup VoWiFi MDN per b/33187374")
ad.adb.shell("setprop dbg.vzw.force_wfc_nv_enabled true")
ad.adb.shell("am start --ei EXTRA_LAUNCH_CARRIER_APP 0 -n "
"\"com.google.android.wfcactivation/"
".VzwEmergencyAddressActivity\"")
# Start telephony monitor
if not ad.is_apk_running("com.google.telephonymonitor"):
ad.log.info("TelephonyMonitor is not running, start it now")
ad.adb.shell(
'am broadcast -a '
'com.google.gservices.intent.action.GSERVICES_OVERRIDE -e '
'"ce.telephony_monitor_enable" "true"')
# Sub ID setup
initial_set_up_for_subid_infomation(self.log, ad)
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(WIFI_VERBOSE_LOGGING_ENABLED)
# If device is setup already, skip the following setup procedures
if getattr(ad, "telephony_test_setup", None):
continue
# Disable Emergency alerts
# Set chrome browser start with no-first-run verification and
# disable-fre. Give permission to read from and write to storage.
for cmd in (
"am start -n com.google.android.setupwizard/."
"SetupWizardExitActivity",
"pm disable com.android.cellbroadcastreceiver",
"pm grant com.android.chrome "
"android.permission.READ_EXTERNAL_STORAGE",
"pm grant com.android.chrome "
"android.permission.WRITE_EXTERNAL_STORAGE",
"rm /data/local/chrome-command-line",
"am set-debug-app --persistent com.android.chrome",
'echo "chrome --no-default-browser-check --no-first-run '
'--disable-fre" > /data/local/tmp/chrome-command-line'):
ad.adb.shell(cmd)
# Ensure that a test class starts from a consistent state that
# improves chances of valid network selection and facilitates
# logging.
try:
if not set_phone_screen_on(self.log, ad):
self.log.error("Failed to set phone screen-on time.")
return False
if not set_phone_silent_mode(self.log, ad):
self.log.error("Failed to set phone silent mode.")
return False
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND, True)
except Exception as e:
self.log.error("Failure with %s", e)
setattr(ad, "telephony_test_setup", True)
return True
def teardown_class(self):
try:
for ad in self.android_devices:
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(
WIFI_VERBOSE_LOGGING_DISABLED)
return True
except Exception as e:
self.log.error("Failure with %s", e)
def setup_test(self):
if getattr(self, "diag_logger", None):
for logger in self.diag_logger:
self.log.info("Starting a diagnostic session %s", logger)
self.logger_sessions.append((logger, logger.start()))
if self.skip_reset_between_cases:
ensure_phones_idle(self.log, self.android_devices)
ensure_phones_default_state(self.log, self.android_devices)
def teardown_test(self):
return True
def on_exception(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_fail(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_pass(self, test_name, begin_time):
self._cleanup_logger_sessions()
def get_stress_test_number(self):
"""Gets the stress_test_number param from user params.
Gets the stress_test_number param. If absent, returns default 100.
"""
return int(self.user_params.get("stress_test_number", 100))
| abort_all_tests(ad.log, "puk_pin is not provided") | conditional_block |
TelephonyBaseTest.py | #!/usr/bin/env python3.4
#
# Copyright 2016 - Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Class for Defining Common Telephony Test Functionality
"""
import os
import time
import inspect
import traceback
import acts.controllers.diag_logger
from acts.base_test import BaseTestClass
from acts.keys import Config
from acts.signals import TestSignal
from acts.signals import TestAbortClass
from acts.signals import TestAbortAll
from acts import utils
from acts.test_utils.tel.tel_subscription_utils import \
initial_set_up_for_subid_infomation
from acts.test_utils.tel.tel_test_utils import abort_all_tests
from acts.test_utils.tel.tel_test_utils import check_qxdm_logger_always_on
from acts.test_utils.tel.tel_test_utils import ensure_phones_default_state
from acts.test_utils.tel.tel_test_utils import ensure_phones_idle
from acts.test_utils.tel.tel_test_utils import refresh_droid_config
from acts.test_utils.tel.tel_test_utils import setup_droid_properties
from acts.test_utils.tel.tel_test_utils import set_phone_screen_on
from acts.test_utils.tel.tel_test_utils import set_phone_silent_mode
from acts.test_utils.tel.tel_test_utils import set_qxdm_logger_always_on
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_ENABLED
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_DISABLED
from acts.utils import force_airplane_mode
QXDM_LOG_PATH = "/data/vendor/radio/diag_logs/logs/"
class TelephonyBaseTest(BaseTestClass):
def __init__(self, controllers):
BaseTestClass.__init__(self, controllers)
self.logger_sessions = []
for ad in self.android_devices:
if getattr(ad, "qxdm_always_on", False):
#this is only supported on 2017 devices
ad.log.info("qxdm_always_on is set in config file")
mask = getattr(ad, "qxdm_mask", "Radio-general.cfg")
if not check_qxdm_logger_always_on(ad, mask):
ad.log.info("qxdm always on is not set, turn it on")
set_qxdm_logger_always_on(ad, mask)
else:
ad.log.info("qxdm always on is already set")
#The puk and pin should be provided in testbed config file.
#"AndroidDevice": [{"serial": "84B5T15A29018214",
# "adb_logcat_param": "-b all",
# "puk": "12345678",
# "puk_pin": "1234"}]
if hasattr(ad, 'puk'):
if not hasattr(ad, 'puk_pin'):
abort_all_tests(ad.log, "puk_pin is not provided")
ad.log.info("Enter PUK code and pin")
if not ad.droid.telephonySupplyPuk(ad.puk, ad.puk_pin):
abort_all_tests(
ad.log, |
# Use for logging in the test cases to facilitate
# faster log lookup and reduce ambiguity in logging.
@staticmethod
def tel_test_wrap(fn):
def _safe_wrap_test_case(self, *args, **kwargs):
test_id = "%s:%s:%s" % (self.__class__.__name__, self.test_name,
self.begin_time.replace(' ', '-'))
self.test_id = test_id
log_string = "[Test ID] %s" % test_id
self.log.info(log_string)
try:
for ad in self.android_devices:
ad.droid.logI("Started %s" % log_string)
# TODO: b/19002120 start QXDM Logging
result = fn(self, *args, **kwargs)
for ad in self.android_devices:
ad.droid.logI("Finished %s" % log_string)
new_crash = ad.check_crash_report(self.test_name,
self.begin_time, result)
if new_crash:
ad.log.error("Find new crash reports %s", new_crash)
if not result and self.user_params.get("telephony_auto_rerun"):
self.teardown_test()
# re-run only once, if re-run pass, mark as pass
log_string = "[Rerun Test ID] %s. 1st run failed." % test_id
self.log.info(log_string)
self.setup_test()
for ad in self.android_devices:
ad.droid.logI("Rerun Started %s" % log_string)
result = fn(self, *args, **kwargs)
if result is True:
self.log.info("Rerun passed.")
elif result is False:
self.log.info("Rerun failed.")
else:
# In the event that we have a non-bool or null
# retval, we want to clearly distinguish this in the
# logs from an explicit failure, though the test will
# still be considered a failure for reporting purposes.
self.log.info("Rerun indeterminate.")
result = False
return result
except (TestSignal, TestAbortClass, TestAbortAll):
raise
except Exception as e:
self.log.error(str(e))
return False
finally:
# TODO: b/19002120 stop QXDM Logging
for ad in self.android_devices:
try:
ad.adb.wait_for_device()
except Exception as e:
self.log.error(str(e))
return _safe_wrap_test_case
def setup_class(self):
sim_conf_file = self.user_params.get("sim_conf_file")
if not sim_conf_file:
self.log.info("\"sim_conf_file\" is not provided test bed config!")
else:
# If the sim_conf_file is not a full path, attempt to find it
# relative to the config file.
if not os.path.isfile(sim_conf_file):
sim_conf_file = os.path.join(
self.user_params[Config.key_config_path], sim_conf_file)
if not os.path.isfile(sim_conf_file):
self.log.error("Unable to load user config %s ",
sim_conf_file)
return False
setattr(self, "diag_logger",
self.register_controller(
acts.controllers.diag_logger, required=False))
if not self.user_params.get("Attenuator"):
ensure_phones_default_state(self.log, self.android_devices)
else:
ensure_phones_idle(self.log, self.android_devices)
for ad in self.android_devices:
setup_droid_properties(self.log, ad, sim_conf_file)
# Setup VoWiFi MDN for Verizon. b/33187374
build_id = ad.build_info["build_id"]
if "vzw" in [
sub["operator"] for sub in ad.cfg["subscription"].values()
] and ad.is_apk_installed("com.google.android.wfcactivation"):
ad.log.info("setup VoWiFi MDN per b/33187374")
ad.adb.shell("setprop dbg.vzw.force_wfc_nv_enabled true")
ad.adb.shell("am start --ei EXTRA_LAUNCH_CARRIER_APP 0 -n "
"\"com.google.android.wfcactivation/"
".VzwEmergencyAddressActivity\"")
# Start telephony monitor
if not ad.is_apk_running("com.google.telephonymonitor"):
ad.log.info("TelephonyMonitor is not running, start it now")
ad.adb.shell(
'am broadcast -a '
'com.google.gservices.intent.action.GSERVICES_OVERRIDE -e '
'"ce.telephony_monitor_enable" "true"')
# Sub ID setup
initial_set_up_for_subid_infomation(self.log, ad)
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(WIFI_VERBOSE_LOGGING_ENABLED)
# If device is setup already, skip the following setup procedures
if getattr(ad, "telephony_test_setup", None):
continue
# Disable Emergency alerts
# Set chrome browser start with no-first-run verification and
# disable-fre. Give permission to read from and write to storage.
for cmd in (
"am start -n com.google.android.setupwizard/."
"SetupWizardExitActivity",
"pm disable com.android.cellbroadcastreceiver",
"pm grant com.android.chrome "
"android.permission.READ_EXTERNAL_STORAGE",
"pm grant com.android.chrome "
"android.permission.WRITE_EXTERNAL_STORAGE",
"rm /data/local/chrome-command-line",
"am set-debug-app --persistent com.android.chrome",
'echo "chrome --no-default-browser-check --no-first-run '
'--disable-fre" > /data/local/tmp/chrome-command-line'):
ad.adb.shell(cmd)
# Ensure that a test class starts from a consistent state that
# improves chances of valid network selection and facilitates
# logging.
try:
if not set_phone_screen_on(self.log, ad):
self.log.error("Failed to set phone screen-on time.")
return False
if not set_phone_silent_mode(self.log, ad):
self.log.error("Failed to set phone silent mode.")
return False
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND, True)
except Exception as e:
self.log.error("Failure with %s", e)
setattr(ad, "telephony_test_setup", True)
return True
def teardown_class(self):
try:
for ad in self.android_devices:
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(
WIFI_VERBOSE_LOGGING_DISABLED)
return True
except Exception as e:
self.log.error("Failure with %s", e)
def setup_test(self):
if getattr(self, "diag_logger", None):
for logger in self.diag_logger:
self.log.info("Starting a diagnostic session %s", logger)
self.logger_sessions.append((logger, logger.start()))
if self.skip_reset_between_cases:
ensure_phones_idle(self.log, self.android_devices)
ensure_phones_default_state(self.log, self.android_devices)
def teardown_test(self):
return True
def on_exception(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_fail(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_pass(self, test_name, begin_time):
self._cleanup_logger_sessions()
def get_stress_test_number(self):
"""Gets the stress_test_number param from user params.
Gets the stress_test_number param. If absent, returns default 100.
"""
return int(self.user_params.get("stress_test_number", 100)) | "Puk and puk_pin provided in testbed config do NOT work"
)
self.skip_reset_between_cases = self.user_params.get(
"skip_reset_between_cases", True) | random_line_split |
TelephonyBaseTest.py | #!/usr/bin/env python3.4
#
# Copyright 2016 - Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Class for Defining Common Telephony Test Functionality
"""
import os
import time
import inspect
import traceback
import acts.controllers.diag_logger
from acts.base_test import BaseTestClass
from acts.keys import Config
from acts.signals import TestSignal
from acts.signals import TestAbortClass
from acts.signals import TestAbortAll
from acts import utils
from acts.test_utils.tel.tel_subscription_utils import \
initial_set_up_for_subid_infomation
from acts.test_utils.tel.tel_test_utils import abort_all_tests
from acts.test_utils.tel.tel_test_utils import check_qxdm_logger_always_on
from acts.test_utils.tel.tel_test_utils import ensure_phones_default_state
from acts.test_utils.tel.tel_test_utils import ensure_phones_idle
from acts.test_utils.tel.tel_test_utils import refresh_droid_config
from acts.test_utils.tel.tel_test_utils import setup_droid_properties
from acts.test_utils.tel.tel_test_utils import set_phone_screen_on
from acts.test_utils.tel.tel_test_utils import set_phone_silent_mode
from acts.test_utils.tel.tel_test_utils import set_qxdm_logger_always_on
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_ENABLED
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_DISABLED
from acts.utils import force_airplane_mode
QXDM_LOG_PATH = "/data/vendor/radio/diag_logs/logs/"
class TelephonyBaseTest(BaseTestClass):
def __init__(self, controllers):
BaseTestClass.__init__(self, controllers)
self.logger_sessions = []
for ad in self.android_devices:
if getattr(ad, "qxdm_always_on", False):
#this is only supported on 2017 devices
ad.log.info("qxdm_always_on is set in config file")
mask = getattr(ad, "qxdm_mask", "Radio-general.cfg")
if not check_qxdm_logger_always_on(ad, mask):
ad.log.info("qxdm always on is not set, turn it on")
set_qxdm_logger_always_on(ad, mask)
else:
ad.log.info("qxdm always on is already set")
#The puk and pin should be provided in testbed config file.
#"AndroidDevice": [{"serial": "84B5T15A29018214",
# "adb_logcat_param": "-b all",
# "puk": "12345678",
# "puk_pin": "1234"}]
if hasattr(ad, 'puk'):
if not hasattr(ad, 'puk_pin'):
abort_all_tests(ad.log, "puk_pin is not provided")
ad.log.info("Enter PUK code and pin")
if not ad.droid.telephonySupplyPuk(ad.puk, ad.puk_pin):
abort_all_tests(
ad.log,
"Puk and puk_pin provided in testbed config do NOT work"
)
self.skip_reset_between_cases = self.user_params.get(
"skip_reset_between_cases", True)
# Use for logging in the test cases to facilitate
# faster log lookup and reduce ambiguity in logging.
@staticmethod
def tel_test_wrap(fn):
def _safe_wrap_test_case(self, *args, **kwargs):
test_id = "%s:%s:%s" % (self.__class__.__name__, self.test_name,
self.begin_time.replace(' ', '-'))
self.test_id = test_id
log_string = "[Test ID] %s" % test_id
self.log.info(log_string)
try:
for ad in self.android_devices:
ad.droid.logI("Started %s" % log_string)
# TODO: b/19002120 start QXDM Logging
result = fn(self, *args, **kwargs)
for ad in self.android_devices:
ad.droid.logI("Finished %s" % log_string)
new_crash = ad.check_crash_report(self.test_name,
self.begin_time, result)
if new_crash:
ad.log.error("Find new crash reports %s", new_crash)
if not result and self.user_params.get("telephony_auto_rerun"):
self.teardown_test()
# re-run only once, if re-run pass, mark as pass
log_string = "[Rerun Test ID] %s. 1st run failed." % test_id
self.log.info(log_string)
self.setup_test()
for ad in self.android_devices:
ad.droid.logI("Rerun Started %s" % log_string)
result = fn(self, *args, **kwargs)
if result is True:
self.log.info("Rerun passed.")
elif result is False:
self.log.info("Rerun failed.")
else:
# In the event that we have a non-bool or null
# retval, we want to clearly distinguish this in the
# logs from an explicit failure, though the test will
# still be considered a failure for reporting purposes.
self.log.info("Rerun indeterminate.")
result = False
return result
except (TestSignal, TestAbortClass, TestAbortAll):
raise
except Exception as e:
self.log.error(str(e))
return False
finally:
# TODO: b/19002120 stop QXDM Logging
for ad in self.android_devices:
try:
ad.adb.wait_for_device()
except Exception as e:
self.log.error(str(e))
return _safe_wrap_test_case
def setup_class(self):
sim_conf_file = self.user_params.get("sim_conf_file")
if not sim_conf_file:
self.log.info("\"sim_conf_file\" is not provided test bed config!")
else:
# If the sim_conf_file is not a full path, attempt to find it
# relative to the config file.
if not os.path.isfile(sim_conf_file):
sim_conf_file = os.path.join(
self.user_params[Config.key_config_path], sim_conf_file)
if not os.path.isfile(sim_conf_file):
self.log.error("Unable to load user config %s ",
sim_conf_file)
return False
setattr(self, "diag_logger",
self.register_controller(
acts.controllers.diag_logger, required=False))
if not self.user_params.get("Attenuator"):
ensure_phones_default_state(self.log, self.android_devices)
else:
ensure_phones_idle(self.log, self.android_devices)
for ad in self.android_devices:
setup_droid_properties(self.log, ad, sim_conf_file)
# Setup VoWiFi MDN for Verizon. b/33187374
build_id = ad.build_info["build_id"]
if "vzw" in [
sub["operator"] for sub in ad.cfg["subscription"].values()
] and ad.is_apk_installed("com.google.android.wfcactivation"):
ad.log.info("setup VoWiFi MDN per b/33187374")
ad.adb.shell("setprop dbg.vzw.force_wfc_nv_enabled true")
ad.adb.shell("am start --ei EXTRA_LAUNCH_CARRIER_APP 0 -n "
"\"com.google.android.wfcactivation/"
".VzwEmergencyAddressActivity\"")
# Start telephony monitor
if not ad.is_apk_running("com.google.telephonymonitor"):
ad.log.info("TelephonyMonitor is not running, start it now")
ad.adb.shell(
'am broadcast -a '
'com.google.gservices.intent.action.GSERVICES_OVERRIDE -e '
'"ce.telephony_monitor_enable" "true"')
# Sub ID setup
initial_set_up_for_subid_infomation(self.log, ad)
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(WIFI_VERBOSE_LOGGING_ENABLED)
# If device is setup already, skip the following setup procedures
if getattr(ad, "telephony_test_setup", None):
continue
# Disable Emergency alerts
# Set chrome browser start with no-first-run verification and
# disable-fre. Give permission to read from and write to storage.
for cmd in (
"am start -n com.google.android.setupwizard/."
"SetupWizardExitActivity",
"pm disable com.android.cellbroadcastreceiver",
"pm grant com.android.chrome "
"android.permission.READ_EXTERNAL_STORAGE",
"pm grant com.android.chrome "
"android.permission.WRITE_EXTERNAL_STORAGE",
"rm /data/local/chrome-command-line",
"am set-debug-app --persistent com.android.chrome",
'echo "chrome --no-default-browser-check --no-first-run '
'--disable-fre" > /data/local/tmp/chrome-command-line'):
ad.adb.shell(cmd)
# Ensure that a test class starts from a consistent state that
# improves chances of valid network selection and facilitates
# logging.
try:
if not set_phone_screen_on(self.log, ad):
self.log.error("Failed to set phone screen-on time.")
return False
if not set_phone_silent_mode(self.log, ad):
self.log.error("Failed to set phone silent mode.")
return False
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND, True)
except Exception as e:
self.log.error("Failure with %s", e)
setattr(ad, "telephony_test_setup", True)
return True
def teardown_class(self):
try:
for ad in self.android_devices:
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(
WIFI_VERBOSE_LOGGING_DISABLED)
return True
except Exception as e:
self.log.error("Failure with %s", e)
def setup_test(self):
|
def teardown_test(self):
return True
def on_exception(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_fail(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_pass(self, test_name, begin_time):
self._cleanup_logger_sessions()
def get_stress_test_number(self):
"""Gets the stress_test_number param from user params.
Gets the stress_test_number param. If absent, returns default 100.
"""
return int(self.user_params.get("stress_test_number", 100))
| if getattr(self, "diag_logger", None):
for logger in self.diag_logger:
self.log.info("Starting a diagnostic session %s", logger)
self.logger_sessions.append((logger, logger.start()))
if self.skip_reset_between_cases:
ensure_phones_idle(self.log, self.android_devices)
ensure_phones_default_state(self.log, self.android_devices) | identifier_body |
TelephonyBaseTest.py | #!/usr/bin/env python3.4
#
# Copyright 2016 - Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Class for Defining Common Telephony Test Functionality
"""
import os
import time
import inspect
import traceback
import acts.controllers.diag_logger
from acts.base_test import BaseTestClass
from acts.keys import Config
from acts.signals import TestSignal
from acts.signals import TestAbortClass
from acts.signals import TestAbortAll
from acts import utils
from acts.test_utils.tel.tel_subscription_utils import \
initial_set_up_for_subid_infomation
from acts.test_utils.tel.tel_test_utils import abort_all_tests
from acts.test_utils.tel.tel_test_utils import check_qxdm_logger_always_on
from acts.test_utils.tel.tel_test_utils import ensure_phones_default_state
from acts.test_utils.tel.tel_test_utils import ensure_phones_idle
from acts.test_utils.tel.tel_test_utils import refresh_droid_config
from acts.test_utils.tel.tel_test_utils import setup_droid_properties
from acts.test_utils.tel.tel_test_utils import set_phone_screen_on
from acts.test_utils.tel.tel_test_utils import set_phone_silent_mode
from acts.test_utils.tel.tel_test_utils import set_qxdm_logger_always_on
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND
from acts.test_utils.tel.tel_defines import PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_ENABLED
from acts.test_utils.tel.tel_defines import WIFI_VERBOSE_LOGGING_DISABLED
from acts.utils import force_airplane_mode
QXDM_LOG_PATH = "/data/vendor/radio/diag_logs/logs/"
class TelephonyBaseTest(BaseTestClass):
def __init__(self, controllers):
BaseTestClass.__init__(self, controllers)
self.logger_sessions = []
for ad in self.android_devices:
if getattr(ad, "qxdm_always_on", False):
#this is only supported on 2017 devices
ad.log.info("qxdm_always_on is set in config file")
mask = getattr(ad, "qxdm_mask", "Radio-general.cfg")
if not check_qxdm_logger_always_on(ad, mask):
ad.log.info("qxdm always on is not set, turn it on")
set_qxdm_logger_always_on(ad, mask)
else:
ad.log.info("qxdm always on is already set")
#The puk and pin should be provided in testbed config file.
#"AndroidDevice": [{"serial": "84B5T15A29018214",
# "adb_logcat_param": "-b all",
# "puk": "12345678",
# "puk_pin": "1234"}]
if hasattr(ad, 'puk'):
if not hasattr(ad, 'puk_pin'):
abort_all_tests(ad.log, "puk_pin is not provided")
ad.log.info("Enter PUK code and pin")
if not ad.droid.telephonySupplyPuk(ad.puk, ad.puk_pin):
abort_all_tests(
ad.log,
"Puk and puk_pin provided in testbed config do NOT work"
)
self.skip_reset_between_cases = self.user_params.get(
"skip_reset_between_cases", True)
# Use for logging in the test cases to facilitate
# faster log lookup and reduce ambiguity in logging.
@staticmethod
def tel_test_wrap(fn):
def | (self, *args, **kwargs):
test_id = "%s:%s:%s" % (self.__class__.__name__, self.test_name,
self.begin_time.replace(' ', '-'))
self.test_id = test_id
log_string = "[Test ID] %s" % test_id
self.log.info(log_string)
try:
for ad in self.android_devices:
ad.droid.logI("Started %s" % log_string)
# TODO: b/19002120 start QXDM Logging
result = fn(self, *args, **kwargs)
for ad in self.android_devices:
ad.droid.logI("Finished %s" % log_string)
new_crash = ad.check_crash_report(self.test_name,
self.begin_time, result)
if new_crash:
ad.log.error("Find new crash reports %s", new_crash)
if not result and self.user_params.get("telephony_auto_rerun"):
self.teardown_test()
# re-run only once, if re-run pass, mark as pass
log_string = "[Rerun Test ID] %s. 1st run failed." % test_id
self.log.info(log_string)
self.setup_test()
for ad in self.android_devices:
ad.droid.logI("Rerun Started %s" % log_string)
result = fn(self, *args, **kwargs)
if result is True:
self.log.info("Rerun passed.")
elif result is False:
self.log.info("Rerun failed.")
else:
# In the event that we have a non-bool or null
# retval, we want to clearly distinguish this in the
# logs from an explicit failure, though the test will
# still be considered a failure for reporting purposes.
self.log.info("Rerun indeterminate.")
result = False
return result
except (TestSignal, TestAbortClass, TestAbortAll):
raise
except Exception as e:
self.log.error(str(e))
return False
finally:
# TODO: b/19002120 stop QXDM Logging
for ad in self.android_devices:
try:
ad.adb.wait_for_device()
except Exception as e:
self.log.error(str(e))
return _safe_wrap_test_case
def setup_class(self):
sim_conf_file = self.user_params.get("sim_conf_file")
if not sim_conf_file:
self.log.info("\"sim_conf_file\" is not provided test bed config!")
else:
# If the sim_conf_file is not a full path, attempt to find it
# relative to the config file.
if not os.path.isfile(sim_conf_file):
sim_conf_file = os.path.join(
self.user_params[Config.key_config_path], sim_conf_file)
if not os.path.isfile(sim_conf_file):
self.log.error("Unable to load user config %s ",
sim_conf_file)
return False
setattr(self, "diag_logger",
self.register_controller(
acts.controllers.diag_logger, required=False))
if not self.user_params.get("Attenuator"):
ensure_phones_default_state(self.log, self.android_devices)
else:
ensure_phones_idle(self.log, self.android_devices)
for ad in self.android_devices:
setup_droid_properties(self.log, ad, sim_conf_file)
# Setup VoWiFi MDN for Verizon. b/33187374
build_id = ad.build_info["build_id"]
if "vzw" in [
sub["operator"] for sub in ad.cfg["subscription"].values()
] and ad.is_apk_installed("com.google.android.wfcactivation"):
ad.log.info("setup VoWiFi MDN per b/33187374")
ad.adb.shell("setprop dbg.vzw.force_wfc_nv_enabled true")
ad.adb.shell("am start --ei EXTRA_LAUNCH_CARRIER_APP 0 -n "
"\"com.google.android.wfcactivation/"
".VzwEmergencyAddressActivity\"")
# Start telephony monitor
if not ad.is_apk_running("com.google.telephonymonitor"):
ad.log.info("TelephonyMonitor is not running, start it now")
ad.adb.shell(
'am broadcast -a '
'com.google.gservices.intent.action.GSERVICES_OVERRIDE -e '
'"ce.telephony_monitor_enable" "true"')
# Sub ID setup
initial_set_up_for_subid_infomation(self.log, ad)
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(WIFI_VERBOSE_LOGGING_ENABLED)
# If device is setup already, skip the following setup procedures
if getattr(ad, "telephony_test_setup", None):
continue
# Disable Emergency alerts
# Set chrome browser start with no-first-run verification and
# disable-fre. Give permission to read from and write to storage.
for cmd in (
"am start -n com.google.android.setupwizard/."
"SetupWizardExitActivity",
"pm disable com.android.cellbroadcastreceiver",
"pm grant com.android.chrome "
"android.permission.READ_EXTERNAL_STORAGE",
"pm grant com.android.chrome "
"android.permission.WRITE_EXTERNAL_STORAGE",
"rm /data/local/chrome-command-line",
"am set-debug-app --persistent com.android.chrome",
'echo "chrome --no-default-browser-check --no-first-run '
'--disable-fre" > /data/local/tmp/chrome-command-line'):
ad.adb.shell(cmd)
# Ensure that a test class starts from a consistent state that
# improves chances of valid network selection and facilitates
# logging.
try:
if not set_phone_screen_on(self.log, ad):
self.log.error("Failed to set phone screen-on time.")
return False
if not set_phone_silent_mode(self.log, ad):
self.log.error("Failed to set phone silent mode.")
return False
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_FOREGROUND, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_RINGING, True)
ad.droid.telephonyAdjustPreciseCallStateListenLevel(
PRECISE_CALL_STATE_LISTEN_LEVEL_BACKGROUND, True)
except Exception as e:
self.log.error("Failure with %s", e)
setattr(ad, "telephony_test_setup", True)
return True
def teardown_class(self):
try:
for ad in self.android_devices:
if "enable_wifi_verbose_logging" in self.user_params:
ad.droid.wifiEnableVerboseLogging(
WIFI_VERBOSE_LOGGING_DISABLED)
return True
except Exception as e:
self.log.error("Failure with %s", e)
def setup_test(self):
if getattr(self, "diag_logger", None):
for logger in self.diag_logger:
self.log.info("Starting a diagnostic session %s", logger)
self.logger_sessions.append((logger, logger.start()))
if self.skip_reset_between_cases:
ensure_phones_idle(self.log, self.android_devices)
ensure_phones_default_state(self.log, self.android_devices)
def teardown_test(self):
return True
def on_exception(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_fail(self, test_name, begin_time):
self._pull_diag_logs(test_name, begin_time)
self._take_bug_report(test_name, begin_time)
self._cleanup_logger_sessions()
def on_pass(self, test_name, begin_time):
self._cleanup_logger_sessions()
def get_stress_test_number(self):
"""Gets the stress_test_number param from user params.
Gets the stress_test_number param. If absent, returns default 100.
"""
return int(self.user_params.get("stress_test_number", 100))
| _safe_wrap_test_case | identifier_name |
reader.go | package gospelmaria
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"time"
"github.com/VividCortex/ewma"
"github.com/jmalloc/gospel/src/gospel"
"github.com/jmalloc/gospel/src/internal/metrics"
"github.com/jmalloc/gospel/src/internal/options"
"github.com/jmalloc/twelf/src/twelf"
"golang.org/x/time/rate"
)
const (
// averageLatencyAge is average age of samples to keep when computing the
// average latency. A sample is taken after each poll.
//
// Averages are computed using an exponentially-weighted moving average.
// See https://github.com/VividCortex/ewma for more information.
averageLatencyAge = 20.0
)
// Reader is an interface for reading facts from a stream stored in MariaDB.
type Reader struct {
// stmt is a prepared statement used to query for facts.
// It accepts the stream offset as a parameter.
stmt *sql.Stmt
// logger is the target for debug logging. Readers do not perform general
// activity logging.
logger twelf.Logger
// facts is a channel on which facts are delivered to the caller of Next().
// A worker goroutine polls the database and delivers the facts to this
// channel.
facts chan gospel.Fact
// current is the fact returned by Get() until Next() is called again.
current *gospel.Fact
// next is the fact that will become "current" when Next() is called.
// If it is nil, no additional facts were available in the buffer on the
// previous call to Next().
next *gospel.Fact
// end is a signaling channel that is closed when the database polling
// goroutine fetches 0 facts.
end chan struct{}
// done is a signaling channel which is closed when the database polling
// goroutine returns. The error that caused the closure, if any, is sent to
// the channel before it closed. This means a pending call to Next() will
// return the error when it first occurs, but subsequent calls will return
// a more generic "reader is closed" error.
done chan error
// ctx is a context that is canceled when Close() is called, or when the
// database polling goroutine returns. It is used to abort any in-progress
// database queries or rate-limit pauses when the reader is closed.
//
// Context cancellation errors are not sent to the 'done' channel, so any
// pending Next() call will receive a generic "reader is closed" error.
ctx context.Context
cancel func()
// addr is the starting address for the next database poll.
addr gospel.Address
// globalLimit is a rate-limiter that limits the number of polling queries
// that can be performed each second. It is shared by all readers, and hence
// provides a global cap of the number of read queries per second.
globalLimit *rate.Limiter
// adaptiveLimit is a rate-limiter that is adjusted on-the-fly in an attempt
// to balance the number of database polls against the latency of facts.
// It is not shared by other readers.
adaptiveLimit *rate.Limiter
// acceptableLatency is the amount of latency that is generally acceptable
// for the purposes of this reader. The reader will attempt to maintain this
// latency by adjusting its polling rate.
acceptableLatency time.Duration
// starvationLatency is the amount of latency that is acceptable once the
// reader has reached the end of the stream and is "starving" for facts.
// This setting informs the minimum poll rate.
starvationLatency time.Duration
// instantaneousLatency is the latency computed from the facts returend by
// the most recent database poll. If there are no facts the latency is 0.
instantaneousLatency time.Duration
// averageLatency tracks the average latency of the last 10 database polls.
// The average latency is weighed against the acceptableLatency and
// starvationLatency values to decide how the poll rate is adjusted.
averageLatency ewma.MovingAverage
// debug contains several properties that are only relevant when the reader
// is using a debug logger.
debug *readerDebug
}
// readerDebug contains several properties that are only relevant when the
// reader is using a debug logger.
type readerDebug struct {
// opts is the options specified when opening the reader.
opts *options.ReaderOptions
// averagePollRate keeps track of the average polling rate, which can be
// substantially lower than the adaptive limit for slow readers.
averagePollRate *metrics.RateCounter
// averageFactRate keeps track of the average rate of delivery of facts.
averageFactRate *metrics.RateCounter
// previousPollRate is compared to the poll rate after each poll to
// determine whether a log message should be displayed.
previousPollRate rate.Limit
// muteEmptyPolls is true if the previous database poll did not return any
// facts. It is only used to mute repeated debug messages if there is no new
// information to report.
muteEmptyPolls bool
}
// errReaderClosed is an error returned by Next() when it is called on a closed
// reader, or when the reader is closed while a call to Next() is pending.
var errReaderClosed = errors.New("reader is closed")
// openReader returns a new reader that begins at addr.
func openReader(
ctx context.Context,
db *sql.DB,
storeID uint64,
addr gospel.Address,
limit *rate.Limiter,
logger twelf.Logger,
opts *options.ReaderOptions,
) (*Reader, error) {
// Note that runCtx is NOT derived from ctx, which is only used for the
// opening of the reader itself.
runCtx, cancel := context.WithCancel(context.Background())
accetableLatency := getAcceptableLatency(opts) | done: make(chan error, 1),
ctx: runCtx,
cancel: cancel,
addr: addr,
globalLimit: limit,
adaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),
acceptableLatency: accetableLatency,
starvationLatency: getStarvationLatency(opts),
averageLatency: ewma.NewMovingAverage(averageLatencyAge),
}
if logger.IsDebug() {
r.debug = &readerDebug{
opts: opts,
averagePollRate: metrics.NewRateCounter(),
averageFactRate: metrics.NewRateCounter(),
}
}
if err := r.prepareStatement(ctx, db, storeID, opts); err != nil {
return nil, err
}
r.logInitialization()
go r.run()
return r, nil
}
// Next blocks until a fact is available for reading or ctx is canceled.
//
// If err is nil, the "current" fact is ready to be returned by Get().
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
//
// Note that nx is not always the address immediately following the fact
// returned by Get() - it may be "further ahead" in the stream, this skipping
// over any facts that the reader is not interested in.
func (r *Reader) Next(ctx context.Context) (nx gospel.Address, err error) {
nx, _, err = r.tryNext(ctx, nil)
return nx, err
}
// TryNext blocks until the next fact is available for reading, the end of
// stream is reached, or ctx is canceled.
//
// If ok is true, a new fact is available and is ready to be returned by
// Get(). ok is false if the current fact is the last known fact in the
// stream.
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
// nx is invalid if ok is false.
func (r *Reader) TryNext(ctx context.Context) (nx gospel.Address, ok bool, err error) {
return r.tryNext(ctx, r.end)
}
func (r *Reader) tryNext(ctx context.Context, end <-chan struct{}) (nx gospel.Address, ok bool, err error) {
if r.next == nil {
select {
case f := <-r.facts:
r.current = &f
ok = true
case <-end:
// no fact is available, return with ok == false
return
case <-ctx.Done():
err = ctx.Err()
return
case err = <-r.done:
if err == nil {
err = errReaderClosed
}
return
}
} else {
r.current = r.next
r.next = nil
ok = true
}
// Perform a non-blocking lookahead to see if we have the next fact already.
select {
case f := <-r.facts:
r.next = &f
nx = r.next.Addr
default:
// assume next is literally the next fact on the stream
nx = r.current.Addr.Next()
}
return
}
// Get returns the "current" fact.
//
// It panics if Next() has not been called.
// Get() returns the same Fact until Next() is called again.
func (r *Reader) Get() gospel.Fact {
if r.current == nil {
panic("Next() must be called before calling Get()")
}
return *r.current
}
// Close closes the reader.
func (r *Reader) Close() error {
select {
case err := <-r.done:
return err
default:
r.cancel()
return <-r.done
}
}
// prepareStatement creates r.stmt, an SQL prepared statement used to poll
// for new facts.
func (r *Reader) prepareStatement(
ctx context.Context,
db *sql.DB,
storeID uint64,
opts *options.ReaderOptions,
) error {
filter := ""
if opts.FilterByEventType {
types := strings.Join(escapeStrings(opts.EventTypes), `, `)
filter = `AND e.event_type IN (` + types + `)`
}
query := fmt.Sprintf(
`SELECT
f.offset,
f.time,
e.event_type,
e.content_type,
e.body,
CURRENT_TIMESTAMP(6)
FROM fact AS f
INNER JOIN event AS e
ON e.id = f.event_id
%s
WHERE f.store_id = %d
AND f.stream = %s
AND f.offset >= ?
ORDER BY offset
LIMIT %d`,
filter,
storeID,
escapeString(r.addr.Stream),
cap(r.facts),
)
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return err
}
r.stmt = stmt
return nil
}
// run polls the database for facts and sends them to r.facts until r.ctx is
// canceled or an error occurs.
func (r *Reader) run() {
defer r.cancel()
defer close(r.done)
defer r.stmt.Close()
var err error
for err == nil {
err = r.tick()
}
if err != context.Canceled {
r.done <- err
}
}
// tick executes one pass of the worker goroutine.
func (r *Reader) tick() error {
if err := r.globalLimit.Wait(r.ctx); err != nil {
return err
}
if err := r.adaptiveLimit.Wait(r.ctx); err != nil {
return err
}
count, err := r.poll()
if err != nil {
return err
}
r.adjustRate()
r.logPoll(count)
return nil
}
// fetch queries the database for facts beginning at r.addr.
func (r *Reader) poll() (int, error) {
rows, err := r.stmt.QueryContext(
r.ctx,
r.addr.Offset,
)
if err != nil {
return 0, err
}
defer rows.Close()
f := gospel.Fact{
Addr: r.addr,
}
count := 0
var first, now time.Time
for rows.Next() {
if err := rows.Scan(
&f.Addr.Offset,
&f.Time,
&f.Event.EventType,
&f.Event.ContentType,
&f.Event.Body,
&now,
); err != nil {
return count, err
}
select {
case r.facts <- f:
case <-r.ctx.Done():
return count, r.ctx.Err()
}
r.addr = f.Addr.Next()
// keep the time of the first fact in the result to compute the maximum
// instantaneous latency for this poll.
if count == 0 {
first = f.Time
}
count++
if r.debug != nil {
r.debug.averageFactRate.Tick()
}
}
// TODO: this doesn't account for the time spent waiting to write to r.facts.
r.instantaneousLatency = now.Sub(first)
r.averageLatency.Add(r.instantaneousLatency.Seconds())
if count == 0 {
select {
case r.end <- struct{}{}:
default:
}
}
return count, nil
}
// setRate sets the adaptive polling rate, capped between the mininum (set by
// r.starvationLatency) and the maximum (set by the global rate limit).
func (r *Reader) setRate(lim rate.Limit) bool {
min := rate.Every(r.starvationLatency)
max := r.globalLimit.Limit()
if lim < min {
lim = min
} else if lim > max {
lim = max
}
prev := r.adaptiveLimit.Limit()
if lim != prev {
r.adaptiveLimit.SetLimit(lim)
return true
}
return false
}
// adjustRate updates the adaptive poll rate in an attempt to balance database
// poll frequency with latency.
func (r *Reader) adjustRate() bool {
latency := r.effectiveLatency()
// headroom is the difference between the acceptable latency and the
// effective latency. If the headroom is positive, we're doing 'better' than
// the acceptable latency and can backoff the poll rate.
headroom := r.acceptableLatency - latency
// don't back off if our headroom is less than 25%
// if headroom > 0 && headroom < r.acceptableLatency/25 {
// return false
// }
// Get the current rate in terms of an interval.
currentInterval := metrics.RateToDuration(
r.adaptiveLimit.Limit(),
)
return r.setRate(
rate.Every(currentInterval + headroom),
)
}
// effectiveLatency returns the latency used to adjust the poll rate.
//
// The rolling average needs to be primed with several samples before the
// average is available, until then it reports zero, in which case the
// instantaneousLatency value is used instead.
func (r *Reader) effectiveLatency() time.Duration {
latency := r.averageLatency.Value()
if latency == 0 {
return r.instantaneousLatency
}
return time.Duration(
latency * float64(time.Second),
)
}
// logInitialization logs a debug message describing the reader settings.
func (r *Reader) logInitialization() {
if !r.logger.IsDebug() {
return
}
filter := "*"
if r.debug.opts.FilterByEventType {
filter = strings.Join(r.debug.opts.EventTypes, ", ")
}
r.logger.Debug(
"[reader %p] %s | global poll limit: %s | acceptable latency: %s | starvation latency: %s | read-buffer: %d | filter: %s",
r,
r.addr,
formatRate(r.globalLimit.Limit()),
formatDuration(r.acceptableLatency),
formatDuration(r.starvationLatency),
getReadBufferSize(r.debug.opts),
filter,
)
}
// logPoll logs a debug message containing metrics for the previous poll and
// adjustments to the adaptive poll rate.
func (r *Reader) logPoll(count int) {
if r.debug == nil {
return
}
r.debug.averagePollRate.Tick()
pollRate := r.adaptiveLimit.Limit()
if pollRate == r.debug.previousPollRate &&
count == 0 && r.debug.muteEmptyPolls {
return
}
r.debug.muteEmptyPolls = count == 0
r.logger.Debug(
"[reader %p] %s | fetch: %3d %s | queue: %3d/%3d | adaptive poll: %s | avg poll: %s | latency: %s",
r,
r.addr,
count,
formatRate(rate.Limit(r.debug.averageFactRate.Rate())),
len(r.facts),
cap(r.facts),
formatRate(r.adaptiveLimit.Limit()),
formatRate(rate.Limit(r.debug.averagePollRate.Rate())),
formatDuration(r.effectiveLatency()),
)
r.debug.previousPollRate = pollRate
}
// formatRate formats a rate limit for display in reader debug logs.
func formatRate(r rate.Limit) string {
if r == 0 {
// "500.00/s 2.00ms"
return " ?.??/s ?.??µs"
}
d := metrics.RateToDuration(r)
return fmt.Sprintf(
"%6.02f/s %s",
r,
formatDuration(d),
)
}
// formatDuration formats a duration for display in reader debug logs.
func formatDuration(d time.Duration) string {
if d >= time.Hour {
return fmt.Sprintf("%6.02fh ", d.Seconds()/3600)
} else if d >= time.Minute {
return fmt.Sprintf("%6.02fm ", d.Seconds()/60)
} else if d >= time.Second {
return fmt.Sprintf("%6.02fs ", d.Seconds())
} else if d >= time.Millisecond {
return fmt.Sprintf("%6.02fms", d.Seconds()/time.Millisecond.Seconds())
}
return fmt.Sprintf("%6.02fµs", d.Seconds()/time.Microsecond.Seconds())
} |
r := &Reader{
logger: logger,
facts: make(chan gospel.Fact, getReadBufferSize(opts)),
end: make(chan struct{}), | random_line_split |
reader.go | package gospelmaria
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"time"
"github.com/VividCortex/ewma"
"github.com/jmalloc/gospel/src/gospel"
"github.com/jmalloc/gospel/src/internal/metrics"
"github.com/jmalloc/gospel/src/internal/options"
"github.com/jmalloc/twelf/src/twelf"
"golang.org/x/time/rate"
)
const (
// averageLatencyAge is average age of samples to keep when computing the
// average latency. A sample is taken after each poll.
//
// Averages are computed using an exponentially-weighted moving average.
// See https://github.com/VividCortex/ewma for more information.
averageLatencyAge = 20.0
)
// Reader is an interface for reading facts from a stream stored in MariaDB.
type Reader struct {
// stmt is a prepared statement used to query for facts.
// It accepts the stream offset as a parameter.
stmt *sql.Stmt
// logger is the target for debug logging. Readers do not perform general
// activity logging.
logger twelf.Logger
// facts is a channel on which facts are delivered to the caller of Next().
// A worker goroutine polls the database and delivers the facts to this
// channel.
facts chan gospel.Fact
// current is the fact returned by Get() until Next() is called again.
current *gospel.Fact
// next is the fact that will become "current" when Next() is called.
// If it is nil, no additional facts were available in the buffer on the
// previous call to Next().
next *gospel.Fact
// end is a signaling channel that is closed when the database polling
// goroutine fetches 0 facts.
end chan struct{}
// done is a signaling channel which is closed when the database polling
// goroutine returns. The error that caused the closure, if any, is sent to
// the channel before it closed. This means a pending call to Next() will
// return the error when it first occurs, but subsequent calls will return
// a more generic "reader is closed" error.
done chan error
// ctx is a context that is canceled when Close() is called, or when the
// database polling goroutine returns. It is used to abort any in-progress
// database queries or rate-limit pauses when the reader is closed.
//
// Context cancellation errors are not sent to the 'done' channel, so any
// pending Next() call will receive a generic "reader is closed" error.
ctx context.Context
cancel func()
// addr is the starting address for the next database poll.
addr gospel.Address
// globalLimit is a rate-limiter that limits the number of polling queries
// that can be performed each second. It is shared by all readers, and hence
// provides a global cap of the number of read queries per second.
globalLimit *rate.Limiter
// adaptiveLimit is a rate-limiter that is adjusted on-the-fly in an attempt
// to balance the number of database polls against the latency of facts.
// It is not shared by other readers.
adaptiveLimit *rate.Limiter
// acceptableLatency is the amount of latency that is generally acceptable
// for the purposes of this reader. The reader will attempt to maintain this
// latency by adjusting its polling rate.
acceptableLatency time.Duration
// starvationLatency is the amount of latency that is acceptable once the
// reader has reached the end of the stream and is "starving" for facts.
// This setting informs the minimum poll rate.
starvationLatency time.Duration
// instantaneousLatency is the latency computed from the facts returend by
// the most recent database poll. If there are no facts the latency is 0.
instantaneousLatency time.Duration
// averageLatency tracks the average latency of the last 10 database polls.
// The average latency is weighed against the acceptableLatency and
// starvationLatency values to decide how the poll rate is adjusted.
averageLatency ewma.MovingAverage
// debug contains several properties that are only relevant when the reader
// is using a debug logger.
debug *readerDebug
}
// readerDebug contains several properties that are only relevant when the
// reader is using a debug logger.
type readerDebug struct {
// opts is the options specified when opening the reader.
opts *options.ReaderOptions
// averagePollRate keeps track of the average polling rate, which can be
// substantially lower than the adaptive limit for slow readers.
averagePollRate *metrics.RateCounter
// averageFactRate keeps track of the average rate of delivery of facts.
averageFactRate *metrics.RateCounter
// previousPollRate is compared to the poll rate after each poll to
// determine whether a log message should be displayed.
previousPollRate rate.Limit
// muteEmptyPolls is true if the previous database poll did not return any
// facts. It is only used to mute repeated debug messages if there is no new
// information to report.
muteEmptyPolls bool
}
// errReaderClosed is an error returned by Next() when it is called on a closed
// reader, or when the reader is closed while a call to Next() is pending.
var errReaderClosed = errors.New("reader is closed")
// openReader returns a new reader that begins at addr.
func openReader(
ctx context.Context,
db *sql.DB,
storeID uint64,
addr gospel.Address,
limit *rate.Limiter,
logger twelf.Logger,
opts *options.ReaderOptions,
) (*Reader, error) {
// Note that runCtx is NOT derived from ctx, which is only used for the
// opening of the reader itself.
runCtx, cancel := context.WithCancel(context.Background())
accetableLatency := getAcceptableLatency(opts)
r := &Reader{
logger: logger,
facts: make(chan gospel.Fact, getReadBufferSize(opts)),
end: make(chan struct{}),
done: make(chan error, 1),
ctx: runCtx,
cancel: cancel,
addr: addr,
globalLimit: limit,
adaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),
acceptableLatency: accetableLatency,
starvationLatency: getStarvationLatency(opts),
averageLatency: ewma.NewMovingAverage(averageLatencyAge),
}
if logger.IsDebug() {
r.debug = &readerDebug{
opts: opts,
averagePollRate: metrics.NewRateCounter(),
averageFactRate: metrics.NewRateCounter(),
}
}
if err := r.prepareStatement(ctx, db, storeID, opts); err != nil {
return nil, err
}
r.logInitialization()
go r.run()
return r, nil
}
// Next blocks until a fact is available for reading or ctx is canceled.
//
// If err is nil, the "current" fact is ready to be returned by Get().
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
//
// Note that nx is not always the address immediately following the fact
// returned by Get() - it may be "further ahead" in the stream, this skipping
// over any facts that the reader is not interested in.
func (r *Reader) Next(ctx context.Context) (nx gospel.Address, err error) {
nx, _, err = r.tryNext(ctx, nil)
return nx, err
}
// TryNext blocks until the next fact is available for reading, the end of
// stream is reached, or ctx is canceled.
//
// If ok is true, a new fact is available and is ready to be returned by
// Get(). ok is false if the current fact is the last known fact in the
// stream.
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
// nx is invalid if ok is false.
func (r *Reader) TryNext(ctx context.Context) (nx gospel.Address, ok bool, err error) {
return r.tryNext(ctx, r.end)
}
func (r *Reader) tryNext(ctx context.Context, end <-chan struct{}) (nx gospel.Address, ok bool, err error) {
if r.next == nil {
select {
case f := <-r.facts:
r.current = &f
ok = true
case <-end:
// no fact is available, return with ok == false
return
case <-ctx.Done():
err = ctx.Err()
return
case err = <-r.done:
if err == nil {
err = errReaderClosed
}
return
}
} else {
r.current = r.next
r.next = nil
ok = true
}
// Perform a non-blocking lookahead to see if we have the next fact already.
select {
case f := <-r.facts:
r.next = &f
nx = r.next.Addr
default:
// assume next is literally the next fact on the stream
nx = r.current.Addr.Next()
}
return
}
// Get returns the "current" fact.
//
// It panics if Next() has not been called.
// Get() returns the same Fact until Next() is called again.
func (r *Reader) Get() gospel.Fact {
if r.current == nil {
panic("Next() must be called before calling Get()")
}
return *r.current
}
// Close closes the reader.
func (r *Reader) Close() error {
select {
case err := <-r.done:
return err
default:
r.cancel()
return <-r.done
}
}
// prepareStatement creates r.stmt, an SQL prepared statement used to poll
// for new facts.
func (r *Reader) prepareStatement(
ctx context.Context,
db *sql.DB,
storeID uint64,
opts *options.ReaderOptions,
) error {
filter := ""
if opts.FilterByEventType {
types := strings.Join(escapeStrings(opts.EventTypes), `, `)
filter = `AND e.event_type IN (` + types + `)`
}
query := fmt.Sprintf(
`SELECT
f.offset,
f.time,
e.event_type,
e.content_type,
e.body,
CURRENT_TIMESTAMP(6)
FROM fact AS f
INNER JOIN event AS e
ON e.id = f.event_id
%s
WHERE f.store_id = %d
AND f.stream = %s
AND f.offset >= ?
ORDER BY offset
LIMIT %d`,
filter,
storeID,
escapeString(r.addr.Stream),
cap(r.facts),
)
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return err
}
r.stmt = stmt
return nil
}
// run polls the database for facts and sends them to r.facts until r.ctx is
// canceled or an error occurs.
func (r *Reader) run() {
defer r.cancel()
defer close(r.done)
defer r.stmt.Close()
var err error
for err == nil {
err = r.tick()
}
if err != context.Canceled {
r.done <- err
}
}
// tick executes one pass of the worker goroutine.
func (r *Reader) tick() error {
if err := r.globalLimit.Wait(r.ctx); err != nil {
return err
}
if err := r.adaptiveLimit.Wait(r.ctx); err != nil {
return err
}
count, err := r.poll()
if err != nil {
return err
}
r.adjustRate()
r.logPoll(count)
return nil
}
// fetch queries the database for facts beginning at r.addr.
func (r *Reader) | () (int, error) {
rows, err := r.stmt.QueryContext(
r.ctx,
r.addr.Offset,
)
if err != nil {
return 0, err
}
defer rows.Close()
f := gospel.Fact{
Addr: r.addr,
}
count := 0
var first, now time.Time
for rows.Next() {
if err := rows.Scan(
&f.Addr.Offset,
&f.Time,
&f.Event.EventType,
&f.Event.ContentType,
&f.Event.Body,
&now,
); err != nil {
return count, err
}
select {
case r.facts <- f:
case <-r.ctx.Done():
return count, r.ctx.Err()
}
r.addr = f.Addr.Next()
// keep the time of the first fact in the result to compute the maximum
// instantaneous latency for this poll.
if count == 0 {
first = f.Time
}
count++
if r.debug != nil {
r.debug.averageFactRate.Tick()
}
}
// TODO: this doesn't account for the time spent waiting to write to r.facts.
r.instantaneousLatency = now.Sub(first)
r.averageLatency.Add(r.instantaneousLatency.Seconds())
if count == 0 {
select {
case r.end <- struct{}{}:
default:
}
}
return count, nil
}
// setRate sets the adaptive polling rate, capped between the mininum (set by
// r.starvationLatency) and the maximum (set by the global rate limit).
func (r *Reader) setRate(lim rate.Limit) bool {
min := rate.Every(r.starvationLatency)
max := r.globalLimit.Limit()
if lim < min {
lim = min
} else if lim > max {
lim = max
}
prev := r.adaptiveLimit.Limit()
if lim != prev {
r.adaptiveLimit.SetLimit(lim)
return true
}
return false
}
// adjustRate updates the adaptive poll rate in an attempt to balance database
// poll frequency with latency.
func (r *Reader) adjustRate() bool {
latency := r.effectiveLatency()
// headroom is the difference between the acceptable latency and the
// effective latency. If the headroom is positive, we're doing 'better' than
// the acceptable latency and can backoff the poll rate.
headroom := r.acceptableLatency - latency
// don't back off if our headroom is less than 25%
// if headroom > 0 && headroom < r.acceptableLatency/25 {
// return false
// }
// Get the current rate in terms of an interval.
currentInterval := metrics.RateToDuration(
r.adaptiveLimit.Limit(),
)
return r.setRate(
rate.Every(currentInterval + headroom),
)
}
// effectiveLatency returns the latency used to adjust the poll rate.
//
// The rolling average needs to be primed with several samples before the
// average is available, until then it reports zero, in which case the
// instantaneousLatency value is used instead.
func (r *Reader) effectiveLatency() time.Duration {
latency := r.averageLatency.Value()
if latency == 0 {
return r.instantaneousLatency
}
return time.Duration(
latency * float64(time.Second),
)
}
// logInitialization logs a debug message describing the reader settings.
func (r *Reader) logInitialization() {
if !r.logger.IsDebug() {
return
}
filter := "*"
if r.debug.opts.FilterByEventType {
filter = strings.Join(r.debug.opts.EventTypes, ", ")
}
r.logger.Debug(
"[reader %p] %s | global poll limit: %s | acceptable latency: %s | starvation latency: %s | read-buffer: %d | filter: %s",
r,
r.addr,
formatRate(r.globalLimit.Limit()),
formatDuration(r.acceptableLatency),
formatDuration(r.starvationLatency),
getReadBufferSize(r.debug.opts),
filter,
)
}
// logPoll logs a debug message containing metrics for the previous poll and
// adjustments to the adaptive poll rate.
func (r *Reader) logPoll(count int) {
if r.debug == nil {
return
}
r.debug.averagePollRate.Tick()
pollRate := r.adaptiveLimit.Limit()
if pollRate == r.debug.previousPollRate &&
count == 0 && r.debug.muteEmptyPolls {
return
}
r.debug.muteEmptyPolls = count == 0
r.logger.Debug(
"[reader %p] %s | fetch: %3d %s | queue: %3d/%3d | adaptive poll: %s | avg poll: %s | latency: %s",
r,
r.addr,
count,
formatRate(rate.Limit(r.debug.averageFactRate.Rate())),
len(r.facts),
cap(r.facts),
formatRate(r.adaptiveLimit.Limit()),
formatRate(rate.Limit(r.debug.averagePollRate.Rate())),
formatDuration(r.effectiveLatency()),
)
r.debug.previousPollRate = pollRate
}
// formatRate formats a rate limit for display in reader debug logs.
func formatRate(r rate.Limit) string {
if r == 0 {
// "500.00/s 2.00ms"
return " ?.??/s ?.??µs"
}
d := metrics.RateToDuration(r)
return fmt.Sprintf(
"%6.02f/s %s",
r,
formatDuration(d),
)
}
// formatDuration formats a duration for display in reader debug logs.
func formatDuration(d time.Duration) string {
if d >= time.Hour {
return fmt.Sprintf("%6.02fh ", d.Seconds()/3600)
} else if d >= time.Minute {
return fmt.Sprintf("%6.02fm ", d.Seconds()/60)
} else if d >= time.Second {
return fmt.Sprintf("%6.02fs ", d.Seconds())
} else if d >= time.Millisecond {
return fmt.Sprintf("%6.02fms", d.Seconds()/time.Millisecond.Seconds())
}
return fmt.Sprintf("%6.02fµs", d.Seconds()/time.Microsecond.Seconds())
}
| poll | identifier_name |
reader.go | package gospelmaria
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"time"
"github.com/VividCortex/ewma"
"github.com/jmalloc/gospel/src/gospel"
"github.com/jmalloc/gospel/src/internal/metrics"
"github.com/jmalloc/gospel/src/internal/options"
"github.com/jmalloc/twelf/src/twelf"
"golang.org/x/time/rate"
)
const (
// averageLatencyAge is average age of samples to keep when computing the
// average latency. A sample is taken after each poll.
//
// Averages are computed using an exponentially-weighted moving average.
// See https://github.com/VividCortex/ewma for more information.
averageLatencyAge = 20.0
)
// Reader is an interface for reading facts from a stream stored in MariaDB.
type Reader struct {
// stmt is a prepared statement used to query for facts.
// It accepts the stream offset as a parameter.
stmt *sql.Stmt
// logger is the target for debug logging. Readers do not perform general
// activity logging.
logger twelf.Logger
// facts is a channel on which facts are delivered to the caller of Next().
// A worker goroutine polls the database and delivers the facts to this
// channel.
facts chan gospel.Fact
// current is the fact returned by Get() until Next() is called again.
current *gospel.Fact
// next is the fact that will become "current" when Next() is called.
// If it is nil, no additional facts were available in the buffer on the
// previous call to Next().
next *gospel.Fact
// end is a signaling channel that is closed when the database polling
// goroutine fetches 0 facts.
end chan struct{}
// done is a signaling channel which is closed when the database polling
// goroutine returns. The error that caused the closure, if any, is sent to
// the channel before it closed. This means a pending call to Next() will
// return the error when it first occurs, but subsequent calls will return
// a more generic "reader is closed" error.
done chan error
// ctx is a context that is canceled when Close() is called, or when the
// database polling goroutine returns. It is used to abort any in-progress
// database queries or rate-limit pauses when the reader is closed.
//
// Context cancellation errors are not sent to the 'done' channel, so any
// pending Next() call will receive a generic "reader is closed" error.
ctx context.Context
cancel func()
// addr is the starting address for the next database poll.
addr gospel.Address
// globalLimit is a rate-limiter that limits the number of polling queries
// that can be performed each second. It is shared by all readers, and hence
// provides a global cap of the number of read queries per second.
globalLimit *rate.Limiter
// adaptiveLimit is a rate-limiter that is adjusted on-the-fly in an attempt
// to balance the number of database polls against the latency of facts.
// It is not shared by other readers.
adaptiveLimit *rate.Limiter
// acceptableLatency is the amount of latency that is generally acceptable
// for the purposes of this reader. The reader will attempt to maintain this
// latency by adjusting its polling rate.
acceptableLatency time.Duration
// starvationLatency is the amount of latency that is acceptable once the
// reader has reached the end of the stream and is "starving" for facts.
// This setting informs the minimum poll rate.
starvationLatency time.Duration
// instantaneousLatency is the latency computed from the facts returend by
// the most recent database poll. If there are no facts the latency is 0.
instantaneousLatency time.Duration
// averageLatency tracks the average latency of the last 10 database polls.
// The average latency is weighed against the acceptableLatency and
// starvationLatency values to decide how the poll rate is adjusted.
averageLatency ewma.MovingAverage
// debug contains several properties that are only relevant when the reader
// is using a debug logger.
debug *readerDebug
}
// readerDebug contains several properties that are only relevant when the
// reader is using a debug logger.
type readerDebug struct {
// opts is the options specified when opening the reader.
opts *options.ReaderOptions
// averagePollRate keeps track of the average polling rate, which can be
// substantially lower than the adaptive limit for slow readers.
averagePollRate *metrics.RateCounter
// averageFactRate keeps track of the average rate of delivery of facts.
averageFactRate *metrics.RateCounter
// previousPollRate is compared to the poll rate after each poll to
// determine whether a log message should be displayed.
previousPollRate rate.Limit
// muteEmptyPolls is true if the previous database poll did not return any
// facts. It is only used to mute repeated debug messages if there is no new
// information to report.
muteEmptyPolls bool
}
// errReaderClosed is an error returned by Next() when it is called on a closed
// reader, or when the reader is closed while a call to Next() is pending.
var errReaderClosed = errors.New("reader is closed")
// openReader returns a new reader that begins at addr.
func openReader(
ctx context.Context,
db *sql.DB,
storeID uint64,
addr gospel.Address,
limit *rate.Limiter,
logger twelf.Logger,
opts *options.ReaderOptions,
) (*Reader, error) |
// Next blocks until a fact is available for reading or ctx is canceled.
//
// If err is nil, the "current" fact is ready to be returned by Get().
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
//
// Note that nx is not always the address immediately following the fact
// returned by Get() - it may be "further ahead" in the stream, this skipping
// over any facts that the reader is not interested in.
func (r *Reader) Next(ctx context.Context) (nx gospel.Address, err error) {
nx, _, err = r.tryNext(ctx, nil)
return nx, err
}
// TryNext blocks until the next fact is available for reading, the end of
// stream is reached, or ctx is canceled.
//
// If ok is true, a new fact is available and is ready to be returned by
// Get(). ok is false if the current fact is the last known fact in the
// stream.
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
// nx is invalid if ok is false.
func (r *Reader) TryNext(ctx context.Context) (nx gospel.Address, ok bool, err error) {
return r.tryNext(ctx, r.end)
}
func (r *Reader) tryNext(ctx context.Context, end <-chan struct{}) (nx gospel.Address, ok bool, err error) {
if r.next == nil {
select {
case f := <-r.facts:
r.current = &f
ok = true
case <-end:
// no fact is available, return with ok == false
return
case <-ctx.Done():
err = ctx.Err()
return
case err = <-r.done:
if err == nil {
err = errReaderClosed
}
return
}
} else {
r.current = r.next
r.next = nil
ok = true
}
// Perform a non-blocking lookahead to see if we have the next fact already.
select {
case f := <-r.facts:
r.next = &f
nx = r.next.Addr
default:
// assume next is literally the next fact on the stream
nx = r.current.Addr.Next()
}
return
}
// Get returns the "current" fact.
//
// It panics if Next() has not been called.
// Get() returns the same Fact until Next() is called again.
func (r *Reader) Get() gospel.Fact {
if r.current == nil {
panic("Next() must be called before calling Get()")
}
return *r.current
}
// Close closes the reader.
func (r *Reader) Close() error {
select {
case err := <-r.done:
return err
default:
r.cancel()
return <-r.done
}
}
// prepareStatement creates r.stmt, an SQL prepared statement used to poll
// for new facts.
func (r *Reader) prepareStatement(
ctx context.Context,
db *sql.DB,
storeID uint64,
opts *options.ReaderOptions,
) error {
filter := ""
if opts.FilterByEventType {
types := strings.Join(escapeStrings(opts.EventTypes), `, `)
filter = `AND e.event_type IN (` + types + `)`
}
query := fmt.Sprintf(
`SELECT
f.offset,
f.time,
e.event_type,
e.content_type,
e.body,
CURRENT_TIMESTAMP(6)
FROM fact AS f
INNER JOIN event AS e
ON e.id = f.event_id
%s
WHERE f.store_id = %d
AND f.stream = %s
AND f.offset >= ?
ORDER BY offset
LIMIT %d`,
filter,
storeID,
escapeString(r.addr.Stream),
cap(r.facts),
)
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return err
}
r.stmt = stmt
return nil
}
// run polls the database for facts and sends them to r.facts until r.ctx is
// canceled or an error occurs.
func (r *Reader) run() {
defer r.cancel()
defer close(r.done)
defer r.stmt.Close()
var err error
for err == nil {
err = r.tick()
}
if err != context.Canceled {
r.done <- err
}
}
// tick executes one pass of the worker goroutine.
func (r *Reader) tick() error {
if err := r.globalLimit.Wait(r.ctx); err != nil {
return err
}
if err := r.adaptiveLimit.Wait(r.ctx); err != nil {
return err
}
count, err := r.poll()
if err != nil {
return err
}
r.adjustRate()
r.logPoll(count)
return nil
}
// fetch queries the database for facts beginning at r.addr.
func (r *Reader) poll() (int, error) {
rows, err := r.stmt.QueryContext(
r.ctx,
r.addr.Offset,
)
if err != nil {
return 0, err
}
defer rows.Close()
f := gospel.Fact{
Addr: r.addr,
}
count := 0
var first, now time.Time
for rows.Next() {
if err := rows.Scan(
&f.Addr.Offset,
&f.Time,
&f.Event.EventType,
&f.Event.ContentType,
&f.Event.Body,
&now,
); err != nil {
return count, err
}
select {
case r.facts <- f:
case <-r.ctx.Done():
return count, r.ctx.Err()
}
r.addr = f.Addr.Next()
// keep the time of the first fact in the result to compute the maximum
// instantaneous latency for this poll.
if count == 0 {
first = f.Time
}
count++
if r.debug != nil {
r.debug.averageFactRate.Tick()
}
}
// TODO: this doesn't account for the time spent waiting to write to r.facts.
r.instantaneousLatency = now.Sub(first)
r.averageLatency.Add(r.instantaneousLatency.Seconds())
if count == 0 {
select {
case r.end <- struct{}{}:
default:
}
}
return count, nil
}
// setRate sets the adaptive polling rate, capped between the mininum (set by
// r.starvationLatency) and the maximum (set by the global rate limit).
func (r *Reader) setRate(lim rate.Limit) bool {
min := rate.Every(r.starvationLatency)
max := r.globalLimit.Limit()
if lim < min {
lim = min
} else if lim > max {
lim = max
}
prev := r.adaptiveLimit.Limit()
if lim != prev {
r.adaptiveLimit.SetLimit(lim)
return true
}
return false
}
// adjustRate updates the adaptive poll rate in an attempt to balance database
// poll frequency with latency.
func (r *Reader) adjustRate() bool {
latency := r.effectiveLatency()
// headroom is the difference between the acceptable latency and the
// effective latency. If the headroom is positive, we're doing 'better' than
// the acceptable latency and can backoff the poll rate.
headroom := r.acceptableLatency - latency
// don't back off if our headroom is less than 25%
// if headroom > 0 && headroom < r.acceptableLatency/25 {
// return false
// }
// Get the current rate in terms of an interval.
currentInterval := metrics.RateToDuration(
r.adaptiveLimit.Limit(),
)
return r.setRate(
rate.Every(currentInterval + headroom),
)
}
// effectiveLatency returns the latency used to adjust the poll rate.
//
// The rolling average needs to be primed with several samples before the
// average is available, until then it reports zero, in which case the
// instantaneousLatency value is used instead.
func (r *Reader) effectiveLatency() time.Duration {
latency := r.averageLatency.Value()
if latency == 0 {
return r.instantaneousLatency
}
return time.Duration(
latency * float64(time.Second),
)
}
// logInitialization logs a debug message describing the reader settings.
func (r *Reader) logInitialization() {
if !r.logger.IsDebug() {
return
}
filter := "*"
if r.debug.opts.FilterByEventType {
filter = strings.Join(r.debug.opts.EventTypes, ", ")
}
r.logger.Debug(
"[reader %p] %s | global poll limit: %s | acceptable latency: %s | starvation latency: %s | read-buffer: %d | filter: %s",
r,
r.addr,
formatRate(r.globalLimit.Limit()),
formatDuration(r.acceptableLatency),
formatDuration(r.starvationLatency),
getReadBufferSize(r.debug.opts),
filter,
)
}
// logPoll logs a debug message containing metrics for the previous poll and
// adjustments to the adaptive poll rate.
func (r *Reader) logPoll(count int) {
if r.debug == nil {
return
}
r.debug.averagePollRate.Tick()
pollRate := r.adaptiveLimit.Limit()
if pollRate == r.debug.previousPollRate &&
count == 0 && r.debug.muteEmptyPolls {
return
}
r.debug.muteEmptyPolls = count == 0
r.logger.Debug(
"[reader %p] %s | fetch: %3d %s | queue: %3d/%3d | adaptive poll: %s | avg poll: %s | latency: %s",
r,
r.addr,
count,
formatRate(rate.Limit(r.debug.averageFactRate.Rate())),
len(r.facts),
cap(r.facts),
formatRate(r.adaptiveLimit.Limit()),
formatRate(rate.Limit(r.debug.averagePollRate.Rate())),
formatDuration(r.effectiveLatency()),
)
r.debug.previousPollRate = pollRate
}
// formatRate formats a rate limit for display in reader debug logs.
func formatRate(r rate.Limit) string {
if r == 0 {
// "500.00/s 2.00ms"
return " ?.??/s ?.??µs"
}
d := metrics.RateToDuration(r)
return fmt.Sprintf(
"%6.02f/s %s",
r,
formatDuration(d),
)
}
// formatDuration formats a duration for display in reader debug logs.
func formatDuration(d time.Duration) string {
if d >= time.Hour {
return fmt.Sprintf("%6.02fh ", d.Seconds()/3600)
} else if d >= time.Minute {
return fmt.Sprintf("%6.02fm ", d.Seconds()/60)
} else if d >= time.Second {
return fmt.Sprintf("%6.02fs ", d.Seconds())
} else if d >= time.Millisecond {
return fmt.Sprintf("%6.02fms", d.Seconds()/time.Millisecond.Seconds())
}
return fmt.Sprintf("%6.02fµs", d.Seconds()/time.Microsecond.Seconds())
}
| {
// Note that runCtx is NOT derived from ctx, which is only used for the
// opening of the reader itself.
runCtx, cancel := context.WithCancel(context.Background())
accetableLatency := getAcceptableLatency(opts)
r := &Reader{
logger: logger,
facts: make(chan gospel.Fact, getReadBufferSize(opts)),
end: make(chan struct{}),
done: make(chan error, 1),
ctx: runCtx,
cancel: cancel,
addr: addr,
globalLimit: limit,
adaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),
acceptableLatency: accetableLatency,
starvationLatency: getStarvationLatency(opts),
averageLatency: ewma.NewMovingAverage(averageLatencyAge),
}
if logger.IsDebug() {
r.debug = &readerDebug{
opts: opts,
averagePollRate: metrics.NewRateCounter(),
averageFactRate: metrics.NewRateCounter(),
}
}
if err := r.prepareStatement(ctx, db, storeID, opts); err != nil {
return nil, err
}
r.logInitialization()
go r.run()
return r, nil
} | identifier_body |
reader.go | package gospelmaria
import (
"context"
"database/sql"
"errors"
"fmt"
"strings"
"time"
"github.com/VividCortex/ewma"
"github.com/jmalloc/gospel/src/gospel"
"github.com/jmalloc/gospel/src/internal/metrics"
"github.com/jmalloc/gospel/src/internal/options"
"github.com/jmalloc/twelf/src/twelf"
"golang.org/x/time/rate"
)
const (
// averageLatencyAge is average age of samples to keep when computing the
// average latency. A sample is taken after each poll.
//
// Averages are computed using an exponentially-weighted moving average.
// See https://github.com/VividCortex/ewma for more information.
averageLatencyAge = 20.0
)
// Reader is an interface for reading facts from a stream stored in MariaDB.
type Reader struct {
// stmt is a prepared statement used to query for facts.
// It accepts the stream offset as a parameter.
stmt *sql.Stmt
// logger is the target for debug logging. Readers do not perform general
// activity logging.
logger twelf.Logger
// facts is a channel on which facts are delivered to the caller of Next().
// A worker goroutine polls the database and delivers the facts to this
// channel.
facts chan gospel.Fact
// current is the fact returned by Get() until Next() is called again.
current *gospel.Fact
// next is the fact that will become "current" when Next() is called.
// If it is nil, no additional facts were available in the buffer on the
// previous call to Next().
next *gospel.Fact
// end is a signaling channel that is closed when the database polling
// goroutine fetches 0 facts.
end chan struct{}
// done is a signaling channel which is closed when the database polling
// goroutine returns. The error that caused the closure, if any, is sent to
// the channel before it closed. This means a pending call to Next() will
// return the error when it first occurs, but subsequent calls will return
// a more generic "reader is closed" error.
done chan error
// ctx is a context that is canceled when Close() is called, or when the
// database polling goroutine returns. It is used to abort any in-progress
// database queries or rate-limit pauses when the reader is closed.
//
// Context cancellation errors are not sent to the 'done' channel, so any
// pending Next() call will receive a generic "reader is closed" error.
ctx context.Context
cancel func()
// addr is the starting address for the next database poll.
addr gospel.Address
// globalLimit is a rate-limiter that limits the number of polling queries
// that can be performed each second. It is shared by all readers, and hence
// provides a global cap of the number of read queries per second.
globalLimit *rate.Limiter
// adaptiveLimit is a rate-limiter that is adjusted on-the-fly in an attempt
// to balance the number of database polls against the latency of facts.
// It is not shared by other readers.
adaptiveLimit *rate.Limiter
// acceptableLatency is the amount of latency that is generally acceptable
// for the purposes of this reader. The reader will attempt to maintain this
// latency by adjusting its polling rate.
acceptableLatency time.Duration
// starvationLatency is the amount of latency that is acceptable once the
// reader has reached the end of the stream and is "starving" for facts.
// This setting informs the minimum poll rate.
starvationLatency time.Duration
// instantaneousLatency is the latency computed from the facts returend by
// the most recent database poll. If there are no facts the latency is 0.
instantaneousLatency time.Duration
// averageLatency tracks the average latency of the last 10 database polls.
// The average latency is weighed against the acceptableLatency and
// starvationLatency values to decide how the poll rate is adjusted.
averageLatency ewma.MovingAverage
// debug contains several properties that are only relevant when the reader
// is using a debug logger.
debug *readerDebug
}
// readerDebug contains several properties that are only relevant when the
// reader is using a debug logger.
type readerDebug struct {
// opts is the options specified when opening the reader.
opts *options.ReaderOptions
// averagePollRate keeps track of the average polling rate, which can be
// substantially lower than the adaptive limit for slow readers.
averagePollRate *metrics.RateCounter
// averageFactRate keeps track of the average rate of delivery of facts.
averageFactRate *metrics.RateCounter
// previousPollRate is compared to the poll rate after each poll to
// determine whether a log message should be displayed.
previousPollRate rate.Limit
// muteEmptyPolls is true if the previous database poll did not return any
// facts. It is only used to mute repeated debug messages if there is no new
// information to report.
muteEmptyPolls bool
}
// errReaderClosed is an error returned by Next() when it is called on a closed
// reader, or when the reader is closed while a call to Next() is pending.
var errReaderClosed = errors.New("reader is closed")
// openReader returns a new reader that begins at addr.
func openReader(
ctx context.Context,
db *sql.DB,
storeID uint64,
addr gospel.Address,
limit *rate.Limiter,
logger twelf.Logger,
opts *options.ReaderOptions,
) (*Reader, error) {
// Note that runCtx is NOT derived from ctx, which is only used for the
// opening of the reader itself.
runCtx, cancel := context.WithCancel(context.Background())
accetableLatency := getAcceptableLatency(opts)
r := &Reader{
logger: logger,
facts: make(chan gospel.Fact, getReadBufferSize(opts)),
end: make(chan struct{}),
done: make(chan error, 1),
ctx: runCtx,
cancel: cancel,
addr: addr,
globalLimit: limit,
adaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),
acceptableLatency: accetableLatency,
starvationLatency: getStarvationLatency(opts),
averageLatency: ewma.NewMovingAverage(averageLatencyAge),
}
if logger.IsDebug() {
r.debug = &readerDebug{
opts: opts,
averagePollRate: metrics.NewRateCounter(),
averageFactRate: metrics.NewRateCounter(),
}
}
if err := r.prepareStatement(ctx, db, storeID, opts); err != nil {
return nil, err
}
r.logInitialization()
go r.run()
return r, nil
}
// Next blocks until a fact is available for reading or ctx is canceled.
//
// If err is nil, the "current" fact is ready to be returned by Get().
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
//
// Note that nx is not always the address immediately following the fact
// returned by Get() - it may be "further ahead" in the stream, this skipping
// over any facts that the reader is not interested in.
func (r *Reader) Next(ctx context.Context) (nx gospel.Address, err error) {
nx, _, err = r.tryNext(ctx, nil)
return nx, err
}
// TryNext blocks until the next fact is available for reading, the end of
// stream is reached, or ctx is canceled.
//
// If ok is true, a new fact is available and is ready to be returned by
// Get(). ok is false if the current fact is the last known fact in the
// stream.
//
// nx is the offset within the stream that the reader has reached. It can be
// used to efficiently resume reading in a future call to EventStore.Open().
// nx is invalid if ok is false.
func (r *Reader) TryNext(ctx context.Context) (nx gospel.Address, ok bool, err error) {
return r.tryNext(ctx, r.end)
}
func (r *Reader) tryNext(ctx context.Context, end <-chan struct{}) (nx gospel.Address, ok bool, err error) {
if r.next == nil {
select {
case f := <-r.facts:
r.current = &f
ok = true
case <-end:
// no fact is available, return with ok == false
return
case <-ctx.Done():
err = ctx.Err()
return
case err = <-r.done:
if err == nil {
err = errReaderClosed
}
return
}
} else {
r.current = r.next
r.next = nil
ok = true
}
// Perform a non-blocking lookahead to see if we have the next fact already.
select {
case f := <-r.facts:
r.next = &f
nx = r.next.Addr
default:
// assume next is literally the next fact on the stream
nx = r.current.Addr.Next()
}
return
}
// Get returns the "current" fact.
//
// It panics if Next() has not been called.
// Get() returns the same Fact until Next() is called again.
func (r *Reader) Get() gospel.Fact {
if r.current == nil {
panic("Next() must be called before calling Get()")
}
return *r.current
}
// Close closes the reader.
func (r *Reader) Close() error {
select {
case err := <-r.done:
return err
default:
r.cancel()
return <-r.done
}
}
// prepareStatement creates r.stmt, an SQL prepared statement used to poll
// for new facts.
func (r *Reader) prepareStatement(
ctx context.Context,
db *sql.DB,
storeID uint64,
opts *options.ReaderOptions,
) error {
filter := ""
if opts.FilterByEventType {
types := strings.Join(escapeStrings(opts.EventTypes), `, `)
filter = `AND e.event_type IN (` + types + `)`
}
query := fmt.Sprintf(
`SELECT
f.offset,
f.time,
e.event_type,
e.content_type,
e.body,
CURRENT_TIMESTAMP(6)
FROM fact AS f
INNER JOIN event AS e
ON e.id = f.event_id
%s
WHERE f.store_id = %d
AND f.stream = %s
AND f.offset >= ?
ORDER BY offset
LIMIT %d`,
filter,
storeID,
escapeString(r.addr.Stream),
cap(r.facts),
)
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return err
}
r.stmt = stmt
return nil
}
// run polls the database for facts and sends them to r.facts until r.ctx is
// canceled or an error occurs.
func (r *Reader) run() {
defer r.cancel()
defer close(r.done)
defer r.stmt.Close()
var err error
for err == nil {
err = r.tick()
}
if err != context.Canceled {
r.done <- err
}
}
// tick executes one pass of the worker goroutine.
func (r *Reader) tick() error {
if err := r.globalLimit.Wait(r.ctx); err != nil {
return err
}
if err := r.adaptiveLimit.Wait(r.ctx); err != nil {
return err
}
count, err := r.poll()
if err != nil {
return err
}
r.adjustRate()
r.logPoll(count)
return nil
}
// fetch queries the database for facts beginning at r.addr.
func (r *Reader) poll() (int, error) {
rows, err := r.stmt.QueryContext(
r.ctx,
r.addr.Offset,
)
if err != nil {
return 0, err
}
defer rows.Close()
f := gospel.Fact{
Addr: r.addr,
}
count := 0
var first, now time.Time
for rows.Next() {
if err := rows.Scan(
&f.Addr.Offset,
&f.Time,
&f.Event.EventType,
&f.Event.ContentType,
&f.Event.Body,
&now,
); err != nil {
return count, err
}
select {
case r.facts <- f:
case <-r.ctx.Done():
return count, r.ctx.Err()
}
r.addr = f.Addr.Next()
// keep the time of the first fact in the result to compute the maximum
// instantaneous latency for this poll.
if count == 0 {
first = f.Time
}
count++
if r.debug != nil {
r.debug.averageFactRate.Tick()
}
}
// TODO: this doesn't account for the time spent waiting to write to r.facts.
r.instantaneousLatency = now.Sub(first)
r.averageLatency.Add(r.instantaneousLatency.Seconds())
if count == 0 {
select {
case r.end <- struct{}{}:
default:
}
}
return count, nil
}
// setRate sets the adaptive polling rate, capped between the mininum (set by
// r.starvationLatency) and the maximum (set by the global rate limit).
func (r *Reader) setRate(lim rate.Limit) bool {
min := rate.Every(r.starvationLatency)
max := r.globalLimit.Limit()
if lim < min {
lim = min
} else if lim > max {
lim = max
}
prev := r.adaptiveLimit.Limit()
if lim != prev {
r.adaptiveLimit.SetLimit(lim)
return true
}
return false
}
// adjustRate updates the adaptive poll rate in an attempt to balance database
// poll frequency with latency.
func (r *Reader) adjustRate() bool {
latency := r.effectiveLatency()
// headroom is the difference between the acceptable latency and the
// effective latency. If the headroom is positive, we're doing 'better' than
// the acceptable latency and can backoff the poll rate.
headroom := r.acceptableLatency - latency
// don't back off if our headroom is less than 25%
// if headroom > 0 && headroom < r.acceptableLatency/25 {
// return false
// }
// Get the current rate in terms of an interval.
currentInterval := metrics.RateToDuration(
r.adaptiveLimit.Limit(),
)
return r.setRate(
rate.Every(currentInterval + headroom),
)
}
// effectiveLatency returns the latency used to adjust the poll rate.
//
// The rolling average needs to be primed with several samples before the
// average is available, until then it reports zero, in which case the
// instantaneousLatency value is used instead.
func (r *Reader) effectiveLatency() time.Duration {
latency := r.averageLatency.Value()
if latency == 0 {
return r.instantaneousLatency
}
return time.Duration(
latency * float64(time.Second),
)
}
// logInitialization logs a debug message describing the reader settings.
func (r *Reader) logInitialization() {
if !r.logger.IsDebug() {
return
}
filter := "*"
if r.debug.opts.FilterByEventType {
filter = strings.Join(r.debug.opts.EventTypes, ", ")
}
r.logger.Debug(
"[reader %p] %s | global poll limit: %s | acceptable latency: %s | starvation latency: %s | read-buffer: %d | filter: %s",
r,
r.addr,
formatRate(r.globalLimit.Limit()),
formatDuration(r.acceptableLatency),
formatDuration(r.starvationLatency),
getReadBufferSize(r.debug.opts),
filter,
)
}
// logPoll logs a debug message containing metrics for the previous poll and
// adjustments to the adaptive poll rate.
func (r *Reader) logPoll(count int) {
if r.debug == nil {
return
}
r.debug.averagePollRate.Tick()
pollRate := r.adaptiveLimit.Limit()
if pollRate == r.debug.previousPollRate &&
count == 0 && r.debug.muteEmptyPolls {
return
}
r.debug.muteEmptyPolls = count == 0
r.logger.Debug(
"[reader %p] %s | fetch: %3d %s | queue: %3d/%3d | adaptive poll: %s | avg poll: %s | latency: %s",
r,
r.addr,
count,
formatRate(rate.Limit(r.debug.averageFactRate.Rate())),
len(r.facts),
cap(r.facts),
formatRate(r.adaptiveLimit.Limit()),
formatRate(rate.Limit(r.debug.averagePollRate.Rate())),
formatDuration(r.effectiveLatency()),
)
r.debug.previousPollRate = pollRate
}
// formatRate formats a rate limit for display in reader debug logs.
func formatRate(r rate.Limit) string {
if r == 0 {
// "500.00/s 2.00ms"
return " ?.??/s ?.??µs"
}
d := metrics.RateToDuration(r)
return fmt.Sprintf(
"%6.02f/s %s",
r,
formatDuration(d),
)
}
// formatDuration formats a duration for display in reader debug logs.
func formatDuration(d time.Duration) string {
if d >= time.Hour {
return fmt.Sprintf("%6.02fh ", d.Seconds()/3600)
} else if d >= time.Minute { | else if d >= time.Second {
return fmt.Sprintf("%6.02fs ", d.Seconds())
} else if d >= time.Millisecond {
return fmt.Sprintf("%6.02fms", d.Seconds()/time.Millisecond.Seconds())
}
return fmt.Sprintf("%6.02fµs", d.Seconds()/time.Microsecond.Seconds())
}
|
return fmt.Sprintf("%6.02fm ", d.Seconds()/60)
} | conditional_block |
CaptureGridView.js | var CaptureGridView = CustomGridView.extend({
className: 'CaptureGridView',
columnSelectView: undefined,
colPreferences: undefined,
collection: undefined, //SimpleDocuments
ro: undefined,
captureGridItemViews: undefined,
editingView: undefined,
resizeMe: true,
resizeOnDocumentViewResize: true,
events: {
'click .columnSelector': 'chooseColumns'
},
close: function () {
this.closeItemViews();
this.remove(); //Removes this from the DOM, and calls stopListening to remove any bound events that has been listenTo'd.
},
initialize: function (options) {
this.compiledTemplate = doT.template(Templates.get('capturegridviewlayout'));
this.initializeGrid({ slowClickEdit: true });
this.captureGridItemViews = [];
this.colPreferences = Utility.tryParseJSON(Utility.GetUserPreference('capture_col_Pref'));
this.listenTo(this.collection, 'remove', this.collectionRemovedFrom);
this.listenTo(this.collection, 'add', this.collectionAddedTo);
this.listenTo(this.collection, 'reset', this.render);
return this;
},
render: function () {
this.ro = this.getRenderObject();
this.$el.html(this.compiledTemplate(this.ro));
this.renderGrid();
this.renderItemViews();
return this;
},
getRenderObject: function () {
var ro = {
headers: []
};
var defColPrefs = this.defaultColumnPreferences();
if (!this.colPreferences) {
this.colPreferences = this.defaultColumnPreferences();
}
var length = Object.keys(this.colPreferences).length;
var cp;
var i = 0;
for (cp in this.colPreferences) {
if (this.colPreferences.hasOwnProperty(cp)) {
var name = Constants.c[cp];
var idx = this.colPreferences[cp].order;
if (idx === undefined) {
idx = i++; //If you just resize and don't reorder you will have an undefined order.
}
var w = this.getWidthFromPreference(cp, this.colPreferences, defColPrefs);
var isEditable = cp !== 'fileSizeMB';
ro.headers[idx] = { value: name, colId: cp, hasColumnEdit: isEditable, style: 'width: ' + w };
}
}
if (this.collection.hasAnyErrors()) {
ro.headers.push({ value: Constants.c.exception, colId: '', hasColumnEdit: false, style: 'width: 10%;' });
}
return ro;
},
renderItemViews: function () {
this.closeItemViews();
var $container = this.$el.find('.customGridTable tbody');
$container.empty(); //Remove any other rows left over after the item views are closed.
var that = this;
var cc = function (e, cae) { that.itemViewCellClick(e, cae); };
var eem = function (e) { that.itemViewEnterEditMode(e); };
var exem = function () { that.itemViewExitEditMode(); };
var i = 0;
var length = this.collection.length;
for (i; i < length; i++) {
var itemView = new CaptureGridItemView({
model: this.collection.at(i),
headers: this.ro.headers,
cellClickFunc: cc,
enterEditModeFunc: eem,
exitEditModeFunc: exem
});
$container.append(itemView.render().$el);
this.captureGridItemViews.push(itemView);
}
//Append an empty row to the end of the list, this will be used to fill the remaining space.
var tr = document.createElement('tr');
tr.setAttribute('class', 'emptyGridRow');
var td = document.createElement('td');
td.setAttribute('colspan', this.ro.headers.length + 2);
tr.appendChild(td);
$container.append(tr);
},
itemViewCellClick: function (e, cae) {
var event = new $.Event();
event.currentTarget = e;
this.cellClick(event, cae);
},
itemViewEnterEditMode: function (e) {
var event = new $.Event();
event.currentTarget = e;
this.enterEditMode(event);
},
itemViewExitEditMode: function () {
this.exitEditMode();
},
closeItemViews: function () {
var itemView = this.captureGridItemViews.pop();
while (itemView) {
itemView.close();
itemView = null;
itemView = this.captureGridItemViews.pop();
}
},
defaultColumnPreferences: function () {
var p = {
titleName: { order: 0, width: 15 },
keywords: { order: 1, width: 10 },
contentType: { order: 2, width: 15 },
securityClass: { order: 3, width: 10 },
workflow: { order: 4, width: 10 },
inbox: { order: 5, width: 10 },
folder: { order: 6, width: 10 },
fileSizeMB: { order: 7, width: 10 }
};
if (window.versioningLicensed) {
p.createAsDraft = { order: 8, width: 10 };
}
return p;
},
applyColumnEdit: function (cleanupFunc, selected, columnId, $editElements) {
var values = [];
if ($editElements[0].tagName === 'SELECT') {
$editElements = $editElements.find('option:selected');
}
var i = 0;
var length = $editElements.length;
for (i; i < length; i++) {
if (columnId === 'folder') {
values.push({ Id: $editElements.eq(i).attr("Id"), Name: $editElements.eq(i).attr("Name") });
}
else {
values.push($editElements.eq(i).val());
}
}
i = 0;
length = selected.length;
for (i; i < length; i++) {
selected[i].setValueByColumnName(columnId, values);
}
Utility.executeCallback(cleanupFunc);
},
collectionRemovedFrom: function (model, collection, options) {
var i = 0;
var length = this.captureGridItemViews.length;
for (i; i < length; i++) {
if (this.captureGridItemViews[i].model === model) {
this.captureGridItemViews[i].close();
this.captureGridItemViews.splice(i, 1);
break;
}
}
},
collectionAddedTo: function (model, collection, options) {
this.render(); //Need to rerender as the collection is sorted once it is added to.
},
chooseColumns: function () {
var sourceFields = {};
var selectedFields = {};
var allColumns = this.defaultColumnPreferences();
var i = 0;
if (!this.colPreferences) {
this.colPreferences = this.defaultColumnPreferences();
}
var cp;
for (cp in allColumns) {
if (allColumns.hasOwnProperty(cp)) {
sourceFields[cp] = Constants.c[cp];
if (this.colPreferences[cp]) {
selectedFields[cp] = this.colPreferences[cp].order === undefined ? i : this.colPreferences[cp].order;
}
i++;
}
}
var that = this;
if (this.columnSelectView) {
this.columnSelectView.close();
this.columnSelectView = null;
}
this.columnSelectView = new ColumnSelectView({
sourceFields: sourceFields,
selectedFields: selectedFields,
dialogCallbacks: {
saveCallback: function (preference) {
that.columnSelectView.close();
that.columnSelectView = null;
that.onColumnsChanged(preference, true);
},
cancelCallback: function () {
that.columnSelectView.close();
that.columnSelectView = null;
}
}
});
this.columnSelectView.render();
},
onColumnEdit: function (colId, $th) {
var selected = this.collection.getSelected();
if (selected.length > 0) {
var columnId = colId;
var name = Constants.c[columnId];
var length = this.captureGridItemViews.length;
var i = 0;
var editString = '<div class="searchColumnEdit">';
for (i; i < length; i++) {
| editString += '</div>';
var $dlg;
var that = this;
var okFunc = function (cleanupFunc) {
var $editElements = $dlg.find('.searchColumnEdit input, .searchColumnEdit select');
var value = that.applyColumnEdit(cleanupFunc, selected, columnId, $editElements);
};
var setupFolderPicker = function () {
var $input = $dlg.find('input[data-colname="folder"]');
$input.off('click').on('click', function (ev) {
var callback = function (btnText, uiState, foldId, foldTitle, foldPath) {
var targ = $(ev.currentTarget);
if (btnText && btnText !== Constants.c.cancel) {
targ.attr('Id', foldId);
targ.attr('Name', foldPath);
targ.val(foldPath);
}
};
DialogsUtil.folderSelection(false, false, '', callback, this, { singleSelect: true });
});
};
var options = {
autoOpen: false,
title: Constants.c.editColumn + ' ' + name,
height: 110,
resizable: false,
modal: true,
open: function () {
if (columnId === "folder") {
setupFolderPicker();
}
},
html: editString
};
$dlg = DialogsUtil.generalPromptDialog('', okFunc, null, options);
$dlg.dialog('open');
}
},
//#region CustomGridView virtual functions
onColumnsChanged: function (preference, isColumnSelection) {
var colPreferences = Utility.tryParseJSON(Utility.GetUserPreference('capture_col_Pref')) || {};
//Loop preference adding back values from exising preferences that are not specified while still dropping columns no longer displayed. (no extend).
var id;
var totWidth = 0;
for (id in preference) {
if (preference.hasOwnProperty(id)) {
if (colPreferences[id]) {
if (preference[id].width === undefined) { //if width is not specified copy from existing preference.
preference[id].width = colPreferences[id].width;
}
if (preference[id].order === undefined) { //if order is not specified copy from existing preference.
preference[id].order = colPreferences[id].order;
}
totWidth += colPreferences[id].width;
}
// If there is no width specified, set it to 100 (eg. Columns made to be visible in the column chooser don't have a specified width)
if (preference[id].width === undefined) {
preference[id].width = 100;
}
}
}
if (isColumnSelection && totWidth !== 0 && totWidth < 100) {
var keys = Object.keys(preference);
var length = keys.length;
var average = (100 - totWidth) / length;
var cp;
for (cp in preference) {
if (preference.hasOwnProperty(cp)) {
preference[cp].width += average;
}
}
}
Utility.SetSingleUserPreference('capture_col_Pref', JSON.stringify(preference));
this.colPreferences = preference;
this.render();
},
onRowSelect: function (rowId, $td, ev) {
this.onGridRowSelect(rowId, $td, ev);
},
onSortGrid: function (cellId, $th) {
this.collection.sortByColumn(cellId);
},
onEdit: function (rowId, $td) {
var i = 0;
var length = this.captureGridItemViews.length;
var cellId = $td.data('cellid');
for (i; i < length; i++) {
var iv = this.captureGridItemViews[i];
if (iv.modelId === rowId) {
this.editingView = iv;
iv.model.storeOriginalValues();
iv.render(true);
this.focusElement(cellId, iv.$el);
break;
}
}
},
onExitEdit: function (rowId) {
if (!this.editingView) {
return;
}
var pref = Utility.GetUserPreference('rowEditChange') || 'restoreRow';
if (pref === 'restoreRow') {
this.editingView.model.revertChanges();
}
this.editingView.render();
},
getGridCollection: function () {
return this.collection;
}
//#endregion
}); | if (this.captureGridItemViews[i].model === selected[0]) {
editString += this.captureGridItemViews[i].getFieldEditObject(columnId, ['']);
break;
}
}
| conditional_block |
CaptureGridView.js | var CaptureGridView = CustomGridView.extend({
className: 'CaptureGridView',
columnSelectView: undefined,
colPreferences: undefined,
collection: undefined, //SimpleDocuments
ro: undefined,
captureGridItemViews: undefined,
editingView: undefined,
resizeMe: true,
resizeOnDocumentViewResize: true,
events: {
'click .columnSelector': 'chooseColumns'
},
close: function () {
this.closeItemViews();
this.remove(); //Removes this from the DOM, and calls stopListening to remove any bound events that has been listenTo'd.
},
initialize: function (options) {
this.compiledTemplate = doT.template(Templates.get('capturegridviewlayout'));
this.initializeGrid({ slowClickEdit: true });
this.captureGridItemViews = [];
this.colPreferences = Utility.tryParseJSON(Utility.GetUserPreference('capture_col_Pref'));
this.listenTo(this.collection, 'remove', this.collectionRemovedFrom);
this.listenTo(this.collection, 'add', this.collectionAddedTo);
this.listenTo(this.collection, 'reset', this.render);
return this;
},
render: function () {
this.ro = this.getRenderObject();
this.$el.html(this.compiledTemplate(this.ro));
this.renderGrid();
this.renderItemViews();
return this;
},
getRenderObject: function () {
var ro = {
headers: []
};
var defColPrefs = this.defaultColumnPreferences();
if (!this.colPreferences) {
this.colPreferences = this.defaultColumnPreferences();
}
var length = Object.keys(this.colPreferences).length;
var cp;
var i = 0;
for (cp in this.colPreferences) {
if (this.colPreferences.hasOwnProperty(cp)) {
var name = Constants.c[cp];
var idx = this.colPreferences[cp].order;
if (idx === undefined) {
idx = i++; //If you just resize and don't reorder you will have an undefined order.
}
var w = this.getWidthFromPreference(cp, this.colPreferences, defColPrefs);
var isEditable = cp !== 'fileSizeMB';
ro.headers[idx] = { value: name, colId: cp, hasColumnEdit: isEditable, style: 'width: ' + w };
}
}
if (this.collection.hasAnyErrors()) {
ro.headers.push({ value: Constants.c.exception, colId: '', hasColumnEdit: false, style: 'width: 10%;' });
}
return ro;
},
renderItemViews: function () {
this.closeItemViews();
var $container = this.$el.find('.customGridTable tbody');
$container.empty(); //Remove any other rows left over after the item views are closed.
var that = this;
var cc = function (e, cae) { that.itemViewCellClick(e, cae); };
var eem = function (e) { that.itemViewEnterEditMode(e); };
var exem = function () { that.itemViewExitEditMode(); };
var i = 0;
var length = this.collection.length;
for (i; i < length; i++) {
var itemView = new CaptureGridItemView({
model: this.collection.at(i),
headers: this.ro.headers,
cellClickFunc: cc,
enterEditModeFunc: eem,
exitEditModeFunc: exem
});
$container.append(itemView.render().$el);
this.captureGridItemViews.push(itemView);
}
//Append an empty row to the end of the list, this will be used to fill the remaining space.
var tr = document.createElement('tr');
tr.setAttribute('class', 'emptyGridRow');
var td = document.createElement('td');
td.setAttribute('colspan', this.ro.headers.length + 2);
tr.appendChild(td);
$container.append(tr);
},
itemViewCellClick: function (e, cae) {
var event = new $.Event();
event.currentTarget = e;
this.cellClick(event, cae);
},
itemViewEnterEditMode: function (e) {
var event = new $.Event();
event.currentTarget = e;
this.enterEditMode(event);
},
itemViewExitEditMode: function () {
this.exitEditMode();
},
closeItemViews: function () {
var itemView = this.captureGridItemViews.pop();
while (itemView) {
itemView.close();
itemView = null;
itemView = this.captureGridItemViews.pop();
}
},
defaultColumnPreferences: function () {
var p = {
titleName: { order: 0, width: 15 },
keywords: { order: 1, width: 10 },
contentType: { order: 2, width: 15 },
securityClass: { order: 3, width: 10 },
workflow: { order: 4, width: 10 },
inbox: { order: 5, width: 10 },
folder: { order: 6, width: 10 },
fileSizeMB: { order: 7, width: 10 }
};
if (window.versioningLicensed) {
p.createAsDraft = { order: 8, width: 10 };
}
return p;
},
applyColumnEdit: function (cleanupFunc, selected, columnId, $editElements) {
var values = [];
if ($editElements[0].tagName === 'SELECT') {
$editElements = $editElements.find('option:selected');
}
var i = 0;
var length = $editElements.length;
for (i; i < length; i++) {
if (columnId === 'folder') {
values.push({ Id: $editElements.eq(i).attr("Id"), Name: $editElements.eq(i).attr("Name") });
}
else {
values.push($editElements.eq(i).val());
}
}
i = 0;
length = selected.length;
for (i; i < length; i++) {
selected[i].setValueByColumnName(columnId, values);
}
Utility.executeCallback(cleanupFunc);
},
collectionRemovedFrom: function (model, collection, options) {
var i = 0;
var length = this.captureGridItemViews.length;
for (i; i < length; i++) {
if (this.captureGridItemViews[i].model === model) {
this.captureGridItemViews[i].close();
this.captureGridItemViews.splice(i, 1);
break;
}
}
},
collectionAddedTo: function (model, collection, options) {
this.render(); //Need to rerender as the collection is sorted once it is added to.
},
chooseColumns: function () {
var sourceFields = {};
var selectedFields = {};
var allColumns = this.defaultColumnPreferences();
var i = 0;
if (!this.colPreferences) {
this.colPreferences = this.defaultColumnPreferences();
}
var cp;
for (cp in allColumns) {
if (allColumns.hasOwnProperty(cp)) {
sourceFields[cp] = Constants.c[cp];
if (this.colPreferences[cp]) {
selectedFields[cp] = this.colPreferences[cp].order === undefined ? i : this.colPreferences[cp].order;
}
i++;
}
}
var that = this;
if (this.columnSelectView) {
this.columnSelectView.close();
this.columnSelectView = null;
}
this.columnSelectView = new ColumnSelectView({
sourceFields: sourceFields,
selectedFields: selectedFields,
dialogCallbacks: {
saveCallback: function (preference) {
that.columnSelectView.close();
that.columnSelectView = null;
that.onColumnsChanged(preference, true);
},
cancelCallback: function () {
that.columnSelectView.close();
that.columnSelectView = null;
}
}
});
this.columnSelectView.render();
},
onColumnEdit: function (colId, $th) {
var selected = this.collection.getSelected();
if (selected.length > 0) {
var columnId = colId;
var name = Constants.c[columnId];
var length = this.captureGridItemViews.length;
var i = 0;
var editString = '<div class="searchColumnEdit">';
for (i; i < length; i++) {
if (this.captureGridItemViews[i].model === selected[0]) {
editString += this.captureGridItemViews[i].getFieldEditObject(columnId, ['']);
break;
}
}
editString += '</div>';
var $dlg;
var that = this;
var okFunc = function (cleanupFunc) {
var $editElements = $dlg.find('.searchColumnEdit input, .searchColumnEdit select');
var value = that.applyColumnEdit(cleanupFunc, selected, columnId, $editElements);
};
var setupFolderPicker = function () {
var $input = $dlg.find('input[data-colname="folder"]');
$input.off('click').on('click', function (ev) {
var callback = function (btnText, uiState, foldId, foldTitle, foldPath) {
var targ = $(ev.currentTarget);
if (btnText && btnText !== Constants.c.cancel) {
targ.attr('Id', foldId);
targ.attr('Name', foldPath);
targ.val(foldPath);
}
};
DialogsUtil.folderSelection(false, false, '', callback, this, { singleSelect: true });
});
};
var options = {
autoOpen: false,
title: Constants.c.editColumn + ' ' + name,
height: 110,
resizable: false,
modal: true,
open: function () {
if (columnId === "folder") {
setupFolderPicker();
}
},
html: editString
};
$dlg = DialogsUtil.generalPromptDialog('', okFunc, null, options);
$dlg.dialog('open');
}
},
//#region CustomGridView virtual functions
onColumnsChanged: function (preference, isColumnSelection) {
var colPreferences = Utility.tryParseJSON(Utility.GetUserPreference('capture_col_Pref')) || {}; | if (colPreferences[id]) {
if (preference[id].width === undefined) { //if width is not specified copy from existing preference.
preference[id].width = colPreferences[id].width;
}
if (preference[id].order === undefined) { //if order is not specified copy from existing preference.
preference[id].order = colPreferences[id].order;
}
totWidth += colPreferences[id].width;
}
// If there is no width specified, set it to 100 (eg. Columns made to be visible in the column chooser don't have a specified width)
if (preference[id].width === undefined) {
preference[id].width = 100;
}
}
}
if (isColumnSelection && totWidth !== 0 && totWidth < 100) {
var keys = Object.keys(preference);
var length = keys.length;
var average = (100 - totWidth) / length;
var cp;
for (cp in preference) {
if (preference.hasOwnProperty(cp)) {
preference[cp].width += average;
}
}
}
Utility.SetSingleUserPreference('capture_col_Pref', JSON.stringify(preference));
this.colPreferences = preference;
this.render();
},
onRowSelect: function (rowId, $td, ev) {
this.onGridRowSelect(rowId, $td, ev);
},
onSortGrid: function (cellId, $th) {
this.collection.sortByColumn(cellId);
},
onEdit: function (rowId, $td) {
var i = 0;
var length = this.captureGridItemViews.length;
var cellId = $td.data('cellid');
for (i; i < length; i++) {
var iv = this.captureGridItemViews[i];
if (iv.modelId === rowId) {
this.editingView = iv;
iv.model.storeOriginalValues();
iv.render(true);
this.focusElement(cellId, iv.$el);
break;
}
}
},
onExitEdit: function (rowId) {
if (!this.editingView) {
return;
}
var pref = Utility.GetUserPreference('rowEditChange') || 'restoreRow';
if (pref === 'restoreRow') {
this.editingView.model.revertChanges();
}
this.editingView.render();
},
getGridCollection: function () {
return this.collection;
}
//#endregion
}); | //Loop preference adding back values from exising preferences that are not specified while still dropping columns no longer displayed. (no extend).
var id;
var totWidth = 0;
for (id in preference) {
if (preference.hasOwnProperty(id)) { | random_line_split |
pix2pix_sat_imgs.py | # -*- coding: utf-8 -*-
"""Copie de pix2pix_sat_imgs.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gedyO3q39-3CGiaQPvOoKASf5_SQZKjP
#Image-to-Image Translation with Conditional Adversarial Nets
In this notebook we impleted Pix2Pix by , an image-to-image translation using Conditional Adversarial Nets. The achitecture uses the conditinal version of GANs with the following specifities:
-The generator uses Unet architecture;
-And the Discrimitor implementes a Patch version called PatchGAN
"""
# Import of the required librairies
import torch
from torch import nn
from torchvision import transforms
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn.functional as F
torch.manual_seed(0)
"""# Generator
The Generator uses Unet architecture:
"""
class EncoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(EncoderUnit, self).__init__()
self.conv_layer1 = nn.Conv2d(input_channels, input_channels * 2,kernel_size=3,padding=1)
self.conv_layer2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.act_function= nn.LeakyReLU(0.2)
self.pooling_layer = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, input_fm):
out = self.conv_layer1(input_fm)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.conv_layer2(out)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.pooling_layer(out)
return out
x = torch.randn((1,3,256,256))
eu = EncoderUnit(3)
y = eu(x)
y.shape
class DecoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(DecoderUnit, self).__init__()
self.upsampling_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_layer1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv_layer2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv_layer3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.act_function = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, input_fm, input_fm_skip_con):
input_fm = self.upsampling_layer(input_fm)
input_fm = self.conv_layer1(input_fm)
cropped_input_fm = self.crop_image(input_fm_skip_con, input_fm.shape)
out_fm = torch.cat([input_fm, cropped_input_fm ], axis=1)
decoded_fm = self.conv_layer2(out_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
decoded_fm = self.conv_layer3(decoded_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm) | center_height = image.shape[2] // 2
center_width = image.shape[3] // 2
top_left = center_height - round(target_shape[2] / 2)
top_right = top_left + target_shape[2]
bottom_left = center_width - round(target_shape[3] / 2)
bottom_right = bottom_left + target_shape[3]
self.new_image = image[:, :, top_left:top_right, bottom_left:bottom_right]
return self.new_image
class UnetGenerator(nn.Module):
def __init__(self, input_channels, output_channels, hidden_channels=32):
super(UnetGenerator, self).__init__()
self.unet_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels, 1),
nn.LeakyReLU(0.2))
#self.unet_first_layer = FeatureMapBlock(input_channels, hidden_channels)
self.encoder1 = EncoderUnit(hidden_channels, use_dropout=True)
self.encoder2 = EncoderUnit(hidden_channels * 2, use_dropout=True)
self.encoder3 = EncoderUnit(hidden_channels * 4, use_dropout=True)
self.encoder4 = EncoderUnit(hidden_channels * 8)
self.encoder5 = EncoderUnit(hidden_channels * 16)
self.encoder6 = EncoderUnit(hidden_channels * 32)
self.decoder0 = DecoderUnit(hidden_channels * 64)
self.decoder1 = DecoderUnit(hidden_channels * 32)
self.decoder2 = DecoderUnit(hidden_channels * 16)
self.decoder3 = DecoderUnit(hidden_channels * 8)
self.decoder4 = DecoderUnit(hidden_channels * 4)
self.decoder5 = DecoderUnit(hidden_channels * 2)
self.unet_last_layer = nn.Sequential(
nn.Conv2d(hidden_channels, output_channels, 1),
nn.Tanh())
#self.sigmoid = torch.nn.Sigmoid()
def forward(self, real_input):
x0 = self.unet_first_layer(real_input)
x1 = self.encoder1(x0)
x2 = self.encoder2(x1)
x3 = self.encoder3(x2)
x4 = self.encoder4(x3)
x5 = self.encoder5(x4)
x6 = self.encoder6(x5)
x7 = self.decoder0(x6, x5)
x8 = self.decoder1(x7, x4)
x9 = self.decoder2(x8, x3)
x10 = self.decoder3(x9, x2)
x11 = self.decoder4(x10, x1)
x12 = self.decoder5(x11, x0)
gen_image = self.unet_last_layer(x12)
return gen_image
#return self.sigmoid(gen_image)
x = torch.randn(1,3,256,256)
ge = UnetGenerator(3,3)
a = ge(x)
a.shape
"""#Create the Discriminator"""
class PatchGanDis(nn.Module):
def __init__(self, input_channels, hidden_channels=8):
super(PatchGanDis, self).__init__()
self.patchGan_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels,1),
nn.LeakyReLU(0.2))
self.patchGan_layer1 = EncoderUnit(hidden_channels, use_bn=False)
self.patchGan_layer2 = EncoderUnit(hidden_channels * 2)
self.patchGan_layer3 = EncoderUnit(hidden_channels * 4)
self.patchGan_layer4 = EncoderUnit(hidden_channels * 8)
self.patchGan_final_layer = nn.Conv2d(hidden_channels * 16, 1, kernel_size=1)
def forward(self, gen_img, real_output):
x = torch.cat((gen_img, real_output), axis=1)
x0 = self.patchGan_first_layer(x)
x1 = self.patchGan_layer1(x0)
x2 = self.patchGan_layer2(x1)
x3 = self.patchGan_layer3(x2)
x4 = self.patchGan_layer4(x3)
realness_probas_marix = self.patchGan_final_layer(x4)
return realness_probas_marix
"""# upload Dataset"""
from google.colab import drive
drive.mount('/content/gdrive')
# New parameters
lCGAN_criterion = nn.BCEWithLogitsLoss()
pix_dist_criterion = nn.L1Loss()
lambda_pix_dist = 200
n_epochs = 30
input_img_channels = 3
real_img_channels = 3
display_step = 200
batch_size = 4
lr = 0.0002
target_shape = 256
device = 'cuda'
dataset_path = '/content/drive/MyDrive/Colab Notebooks/deep_learinng_projects/computer_vision/dataset/map_dataset/maps'
transform = transforms.Compose([
transforms.ToTensor(),
])
import torchvision
dataset = torchvision.datasets.ImageFolder(dataset_path, transform=transform)
gen = UnetGenerator(input_img_channels, real_img_channels).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = PatchGanDis(input_img_channels + real_img_channels).to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
gen = gen.apply(weights_init)
disc = disc.apply(weights_init)
def plot_images(image_tensor, num_images=25, size=(1, 28, 28)):
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
from skimage import color
import numpy as np
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
discriminator_losses = []
generator_losses = []
for epoch in range(n_epochs):
# Dataloader returns the batches
for image, _ in dataloader:
image_width = image.shape[3]
condition = image[:, :, :, :image_width // 2]
condition = nn.functional.interpolate(condition, size=target_shape)
real = image[:, :, :, image_width // 2:]
real = nn.functional.interpolate(real, size=target_shape)
cur_batch_size = len(condition)
condition = condition.to(device)
real = real.to(device)
### Update discriminator ###
disc_opt.zero_grad()
with torch.no_grad():
fake = gen(condition)
disc_fake_hat = disc(fake.detach(), condition) # Detach generator
disc_fake_loss = lCGAN_criterion(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(real, condition)
disc_real_loss = lCGAN_criterion(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
disc_loss.backward(retain_graph=True)
disc_opt.step()
### Update generator ###
gen_opt.zero_grad()
fake = gen(condition)
disc_fake_bar = disc(fake, condition)
gen_adv_loss = lCGAN_criterion(disc_fake_bar, torch.ones_like(disc_fake_bar))
gen_rec_loss = pix_dist_criterion(real, fake)
gen_loss_tot = gen_adv_loss + lambda_pix_dist * gen_rec_loss
gen_loss_tot.backward()
gen_opt.step()
mean_discriminator_loss += disc_loss.item() / display_step
mean_generator_loss += gen_loss_tot.item() / display_step
discriminator_losses.append(mean_discriminator_loss)
generator_losses.append(mean_discriminator_loss)
#Visualization
if cur_step % display_step == 0:
if cur_step > 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
else:
print("Pretrained initial state")
plot_images(condition, size=(input_img_channels, target_shape, target_shape))
plot_images(real, size=(input_img_channels, target_shape, target_shape))
plot_images(fake, size=(input_img_channels, target_shape, target_shape))
mean_generator_loss = 0
mean_discriminator_loss = 0
cur_step += 1 | return decoded_fm
def crop_image(self, image, target_shape):
| random_line_split |
pix2pix_sat_imgs.py | # -*- coding: utf-8 -*-
"""Copie de pix2pix_sat_imgs.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gedyO3q39-3CGiaQPvOoKASf5_SQZKjP
#Image-to-Image Translation with Conditional Adversarial Nets
In this notebook we impleted Pix2Pix by , an image-to-image translation using Conditional Adversarial Nets. The achitecture uses the conditinal version of GANs with the following specifities:
-The generator uses Unet architecture;
-And the Discrimitor implementes a Patch version called PatchGAN
"""
# Import of the required librairies
import torch
from torch import nn
from torchvision import transforms
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn.functional as F
torch.manual_seed(0)
"""# Generator
The Generator uses Unet architecture:
"""
class EncoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(EncoderUnit, self).__init__()
self.conv_layer1 = nn.Conv2d(input_channels, input_channels * 2,kernel_size=3,padding=1)
self.conv_layer2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.act_function= nn.LeakyReLU(0.2)
self.pooling_layer = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, input_fm):
out = self.conv_layer1(input_fm)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.conv_layer2(out)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.pooling_layer(out)
return out
x = torch.randn((1,3,256,256))
eu = EncoderUnit(3)
y = eu(x)
y.shape
class DecoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(DecoderUnit, self).__init__()
self.upsampling_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_layer1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv_layer2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv_layer3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.act_function = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, input_fm, input_fm_skip_con):
input_fm = self.upsampling_layer(input_fm)
input_fm = self.conv_layer1(input_fm)
cropped_input_fm = self.crop_image(input_fm_skip_con, input_fm.shape)
out_fm = torch.cat([input_fm, cropped_input_fm ], axis=1)
decoded_fm = self.conv_layer2(out_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
decoded_fm = self.conv_layer3(decoded_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
return decoded_fm
def crop_image(self, image, target_shape):
center_height = image.shape[2] // 2
center_width = image.shape[3] // 2
top_left = center_height - round(target_shape[2] / 2)
top_right = top_left + target_shape[2]
bottom_left = center_width - round(target_shape[3] / 2)
bottom_right = bottom_left + target_shape[3]
self.new_image = image[:, :, top_left:top_right, bottom_left:bottom_right]
return self.new_image
class UnetGenerator(nn.Module):
def __init__(self, input_channels, output_channels, hidden_channels=32):
super(UnetGenerator, self).__init__()
self.unet_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels, 1),
nn.LeakyReLU(0.2))
#self.unet_first_layer = FeatureMapBlock(input_channels, hidden_channels)
self.encoder1 = EncoderUnit(hidden_channels, use_dropout=True)
self.encoder2 = EncoderUnit(hidden_channels * 2, use_dropout=True)
self.encoder3 = EncoderUnit(hidden_channels * 4, use_dropout=True)
self.encoder4 = EncoderUnit(hidden_channels * 8)
self.encoder5 = EncoderUnit(hidden_channels * 16)
self.encoder6 = EncoderUnit(hidden_channels * 32)
self.decoder0 = DecoderUnit(hidden_channels * 64)
self.decoder1 = DecoderUnit(hidden_channels * 32)
self.decoder2 = DecoderUnit(hidden_channels * 16)
self.decoder3 = DecoderUnit(hidden_channels * 8)
self.decoder4 = DecoderUnit(hidden_channels * 4)
self.decoder5 = DecoderUnit(hidden_channels * 2)
self.unet_last_layer = nn.Sequential(
nn.Conv2d(hidden_channels, output_channels, 1),
nn.Tanh())
#self.sigmoid = torch.nn.Sigmoid()
def forward(self, real_input):
x0 = self.unet_first_layer(real_input)
x1 = self.encoder1(x0)
x2 = self.encoder2(x1)
x3 = self.encoder3(x2)
x4 = self.encoder4(x3)
x5 = self.encoder5(x4)
x6 = self.encoder6(x5)
x7 = self.decoder0(x6, x5)
x8 = self.decoder1(x7, x4)
x9 = self.decoder2(x8, x3)
x10 = self.decoder3(x9, x2)
x11 = self.decoder4(x10, x1)
x12 = self.decoder5(x11, x0)
gen_image = self.unet_last_layer(x12)
return gen_image
#return self.sigmoid(gen_image)
x = torch.randn(1,3,256,256)
ge = UnetGenerator(3,3)
a = ge(x)
a.shape
"""#Create the Discriminator"""
class PatchGanDis(nn.Module):
def __init__(self, input_channels, hidden_channels=8):
super(PatchGanDis, self).__init__()
self.patchGan_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels,1),
nn.LeakyReLU(0.2))
self.patchGan_layer1 = EncoderUnit(hidden_channels, use_bn=False)
self.patchGan_layer2 = EncoderUnit(hidden_channels * 2)
self.patchGan_layer3 = EncoderUnit(hidden_channels * 4)
self.patchGan_layer4 = EncoderUnit(hidden_channels * 8)
self.patchGan_final_layer = nn.Conv2d(hidden_channels * 16, 1, kernel_size=1)
def forward(self, gen_img, real_output):
x = torch.cat((gen_img, real_output), axis=1)
x0 = self.patchGan_first_layer(x)
x1 = self.patchGan_layer1(x0)
x2 = self.patchGan_layer2(x1)
x3 = self.patchGan_layer3(x2)
x4 = self.patchGan_layer4(x3)
realness_probas_marix = self.patchGan_final_layer(x4)
return realness_probas_marix
"""# upload Dataset"""
from google.colab import drive
drive.mount('/content/gdrive')
# New parameters
lCGAN_criterion = nn.BCEWithLogitsLoss()
pix_dist_criterion = nn.L1Loss()
lambda_pix_dist = 200
n_epochs = 30
input_img_channels = 3
real_img_channels = 3
display_step = 200
batch_size = 4
lr = 0.0002
target_shape = 256
device = 'cuda'
dataset_path = '/content/drive/MyDrive/Colab Notebooks/deep_learinng_projects/computer_vision/dataset/map_dataset/maps'
transform = transforms.Compose([
transforms.ToTensor(),
])
import torchvision
dataset = torchvision.datasets.ImageFolder(dataset_path, transform=transform)
gen = UnetGenerator(input_img_channels, real_img_channels).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = PatchGanDis(input_img_channels + real_img_channels).to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
gen = gen.apply(weights_init)
disc = disc.apply(weights_init)
def plot_images(image_tensor, num_images=25, size=(1, 28, 28)):
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
from skimage import color
import numpy as np
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
discriminator_losses = []
generator_losses = []
for epoch in range(n_epochs):
# Dataloader returns the batches
for image, _ in dataloader:
image_width = image.shape[3]
condition = image[:, :, :, :image_width // 2]
condition = nn.functional.interpolate(condition, size=target_shape)
real = image[:, :, :, image_width // 2:]
real = nn.functional.interpolate(real, size=target_shape)
cur_batch_size = len(condition)
condition = condition.to(device)
real = real.to(device)
### Update discriminator ###
disc_opt.zero_grad()
with torch.no_grad():
fake = gen(condition)
disc_fake_hat = disc(fake.detach(), condition) # Detach generator
disc_fake_loss = lCGAN_criterion(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(real, condition)
disc_real_loss = lCGAN_criterion(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
disc_loss.backward(retain_graph=True)
disc_opt.step()
### Update generator ###
gen_opt.zero_grad()
fake = gen(condition)
disc_fake_bar = disc(fake, condition)
gen_adv_loss = lCGAN_criterion(disc_fake_bar, torch.ones_like(disc_fake_bar))
gen_rec_loss = pix_dist_criterion(real, fake)
gen_loss_tot = gen_adv_loss + lambda_pix_dist * gen_rec_loss
gen_loss_tot.backward()
gen_opt.step()
mean_discriminator_loss += disc_loss.item() / display_step
mean_generator_loss += gen_loss_tot.item() / display_step
discriminator_losses.append(mean_discriminator_loss)
generator_losses.append(mean_discriminator_loss)
#Visualization
if cur_step % display_step == 0:
|
cur_step += 1
| if cur_step > 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
else:
print("Pretrained initial state")
plot_images(condition, size=(input_img_channels, target_shape, target_shape))
plot_images(real, size=(input_img_channels, target_shape, target_shape))
plot_images(fake, size=(input_img_channels, target_shape, target_shape))
mean_generator_loss = 0
mean_discriminator_loss = 0 | conditional_block |
pix2pix_sat_imgs.py | # -*- coding: utf-8 -*-
"""Copie de pix2pix_sat_imgs.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gedyO3q39-3CGiaQPvOoKASf5_SQZKjP
#Image-to-Image Translation with Conditional Adversarial Nets
In this notebook we impleted Pix2Pix by , an image-to-image translation using Conditional Adversarial Nets. The achitecture uses the conditinal version of GANs with the following specifities:
-The generator uses Unet architecture;
-And the Discrimitor implementes a Patch version called PatchGAN
"""
# Import of the required librairies
import torch
from torch import nn
from torchvision import transforms
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn.functional as F
torch.manual_seed(0)
"""# Generator
The Generator uses Unet architecture:
"""
class EncoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(EncoderUnit, self).__init__()
self.conv_layer1 = nn.Conv2d(input_channels, input_channels * 2,kernel_size=3,padding=1)
self.conv_layer2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.act_function= nn.LeakyReLU(0.2)
self.pooling_layer = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def | (self, input_fm):
out = self.conv_layer1(input_fm)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.conv_layer2(out)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.pooling_layer(out)
return out
x = torch.randn((1,3,256,256))
eu = EncoderUnit(3)
y = eu(x)
y.shape
class DecoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(DecoderUnit, self).__init__()
self.upsampling_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_layer1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv_layer2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv_layer3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.act_function = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, input_fm, input_fm_skip_con):
input_fm = self.upsampling_layer(input_fm)
input_fm = self.conv_layer1(input_fm)
cropped_input_fm = self.crop_image(input_fm_skip_con, input_fm.shape)
out_fm = torch.cat([input_fm, cropped_input_fm ], axis=1)
decoded_fm = self.conv_layer2(out_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
decoded_fm = self.conv_layer3(decoded_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
return decoded_fm
def crop_image(self, image, target_shape):
center_height = image.shape[2] // 2
center_width = image.shape[3] // 2
top_left = center_height - round(target_shape[2] / 2)
top_right = top_left + target_shape[2]
bottom_left = center_width - round(target_shape[3] / 2)
bottom_right = bottom_left + target_shape[3]
self.new_image = image[:, :, top_left:top_right, bottom_left:bottom_right]
return self.new_image
class UnetGenerator(nn.Module):
def __init__(self, input_channels, output_channels, hidden_channels=32):
super(UnetGenerator, self).__init__()
self.unet_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels, 1),
nn.LeakyReLU(0.2))
#self.unet_first_layer = FeatureMapBlock(input_channels, hidden_channels)
self.encoder1 = EncoderUnit(hidden_channels, use_dropout=True)
self.encoder2 = EncoderUnit(hidden_channels * 2, use_dropout=True)
self.encoder3 = EncoderUnit(hidden_channels * 4, use_dropout=True)
self.encoder4 = EncoderUnit(hidden_channels * 8)
self.encoder5 = EncoderUnit(hidden_channels * 16)
self.encoder6 = EncoderUnit(hidden_channels * 32)
self.decoder0 = DecoderUnit(hidden_channels * 64)
self.decoder1 = DecoderUnit(hidden_channels * 32)
self.decoder2 = DecoderUnit(hidden_channels * 16)
self.decoder3 = DecoderUnit(hidden_channels * 8)
self.decoder4 = DecoderUnit(hidden_channels * 4)
self.decoder5 = DecoderUnit(hidden_channels * 2)
self.unet_last_layer = nn.Sequential(
nn.Conv2d(hidden_channels, output_channels, 1),
nn.Tanh())
#self.sigmoid = torch.nn.Sigmoid()
def forward(self, real_input):
x0 = self.unet_first_layer(real_input)
x1 = self.encoder1(x0)
x2 = self.encoder2(x1)
x3 = self.encoder3(x2)
x4 = self.encoder4(x3)
x5 = self.encoder5(x4)
x6 = self.encoder6(x5)
x7 = self.decoder0(x6, x5)
x8 = self.decoder1(x7, x4)
x9 = self.decoder2(x8, x3)
x10 = self.decoder3(x9, x2)
x11 = self.decoder4(x10, x1)
x12 = self.decoder5(x11, x0)
gen_image = self.unet_last_layer(x12)
return gen_image
#return self.sigmoid(gen_image)
x = torch.randn(1,3,256,256)
ge = UnetGenerator(3,3)
a = ge(x)
a.shape
"""#Create the Discriminator"""
class PatchGanDis(nn.Module):
def __init__(self, input_channels, hidden_channels=8):
super(PatchGanDis, self).__init__()
self.patchGan_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels,1),
nn.LeakyReLU(0.2))
self.patchGan_layer1 = EncoderUnit(hidden_channels, use_bn=False)
self.patchGan_layer2 = EncoderUnit(hidden_channels * 2)
self.patchGan_layer3 = EncoderUnit(hidden_channels * 4)
self.patchGan_layer4 = EncoderUnit(hidden_channels * 8)
self.patchGan_final_layer = nn.Conv2d(hidden_channels * 16, 1, kernel_size=1)
def forward(self, gen_img, real_output):
x = torch.cat((gen_img, real_output), axis=1)
x0 = self.patchGan_first_layer(x)
x1 = self.patchGan_layer1(x0)
x2 = self.patchGan_layer2(x1)
x3 = self.patchGan_layer3(x2)
x4 = self.patchGan_layer4(x3)
realness_probas_marix = self.patchGan_final_layer(x4)
return realness_probas_marix
"""# upload Dataset"""
from google.colab import drive
drive.mount('/content/gdrive')
# New parameters
lCGAN_criterion = nn.BCEWithLogitsLoss()
pix_dist_criterion = nn.L1Loss()
lambda_pix_dist = 200
n_epochs = 30
input_img_channels = 3
real_img_channels = 3
display_step = 200
batch_size = 4
lr = 0.0002
target_shape = 256
device = 'cuda'
dataset_path = '/content/drive/MyDrive/Colab Notebooks/deep_learinng_projects/computer_vision/dataset/map_dataset/maps'
transform = transforms.Compose([
transforms.ToTensor(),
])
import torchvision
dataset = torchvision.datasets.ImageFolder(dataset_path, transform=transform)
gen = UnetGenerator(input_img_channels, real_img_channels).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = PatchGanDis(input_img_channels + real_img_channels).to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
gen = gen.apply(weights_init)
disc = disc.apply(weights_init)
def plot_images(image_tensor, num_images=25, size=(1, 28, 28)):
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
from skimage import color
import numpy as np
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
discriminator_losses = []
generator_losses = []
for epoch in range(n_epochs):
# Dataloader returns the batches
for image, _ in dataloader:
image_width = image.shape[3]
condition = image[:, :, :, :image_width // 2]
condition = nn.functional.interpolate(condition, size=target_shape)
real = image[:, :, :, image_width // 2:]
real = nn.functional.interpolate(real, size=target_shape)
cur_batch_size = len(condition)
condition = condition.to(device)
real = real.to(device)
### Update discriminator ###
disc_opt.zero_grad()
with torch.no_grad():
fake = gen(condition)
disc_fake_hat = disc(fake.detach(), condition) # Detach generator
disc_fake_loss = lCGAN_criterion(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(real, condition)
disc_real_loss = lCGAN_criterion(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
disc_loss.backward(retain_graph=True)
disc_opt.step()
### Update generator ###
gen_opt.zero_grad()
fake = gen(condition)
disc_fake_bar = disc(fake, condition)
gen_adv_loss = lCGAN_criterion(disc_fake_bar, torch.ones_like(disc_fake_bar))
gen_rec_loss = pix_dist_criterion(real, fake)
gen_loss_tot = gen_adv_loss + lambda_pix_dist * gen_rec_loss
gen_loss_tot.backward()
gen_opt.step()
mean_discriminator_loss += disc_loss.item() / display_step
mean_generator_loss += gen_loss_tot.item() / display_step
discriminator_losses.append(mean_discriminator_loss)
generator_losses.append(mean_discriminator_loss)
#Visualization
if cur_step % display_step == 0:
if cur_step > 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
else:
print("Pretrained initial state")
plot_images(condition, size=(input_img_channels, target_shape, target_shape))
plot_images(real, size=(input_img_channels, target_shape, target_shape))
plot_images(fake, size=(input_img_channels, target_shape, target_shape))
mean_generator_loss = 0
mean_discriminator_loss = 0
cur_step += 1
| forward | identifier_name |
pix2pix_sat_imgs.py | # -*- coding: utf-8 -*-
"""Copie de pix2pix_sat_imgs.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gedyO3q39-3CGiaQPvOoKASf5_SQZKjP
#Image-to-Image Translation with Conditional Adversarial Nets
In this notebook we impleted Pix2Pix by , an image-to-image translation using Conditional Adversarial Nets. The achitecture uses the conditinal version of GANs with the following specifities:
-The generator uses Unet architecture;
-And the Discrimitor implementes a Patch version called PatchGAN
"""
# Import of the required librairies
import torch
from torch import nn
from torchvision import transforms
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch.nn.functional as F
torch.manual_seed(0)
"""# Generator
The Generator uses Unet architecture:
"""
class EncoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
|
def forward(self, input_fm):
out = self.conv_layer1(input_fm)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.conv_layer2(out)
if self.use_bn:
out = self.bn(out)
if self.use_dropout:
out = self.dropout(out)
out = self.act_function(out)
out = self.pooling_layer(out)
return out
x = torch.randn((1,3,256,256))
eu = EncoderUnit(3)
y = eu(x)
y.shape
class DecoderUnit(nn.Module):
def __init__(self, input_channels, use_dropout=False, use_bn=True):
super(DecoderUnit, self).__init__()
self.upsampling_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_layer1 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=2)
self.conv_layer2 = nn.Conv2d(input_channels, input_channels // 2, kernel_size=3, padding=1)
self.conv_layer3 = nn.Conv2d(input_channels // 2, input_channels // 2, kernel_size=2, padding=1)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels // 2)
self.use_bn = use_bn
self.act_function = nn.ReLU()
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout
def forward(self, input_fm, input_fm_skip_con):
input_fm = self.upsampling_layer(input_fm)
input_fm = self.conv_layer1(input_fm)
cropped_input_fm = self.crop_image(input_fm_skip_con, input_fm.shape)
out_fm = torch.cat([input_fm, cropped_input_fm ], axis=1)
decoded_fm = self.conv_layer2(out_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
decoded_fm = self.conv_layer3(decoded_fm)
if self.use_bn:
decoded_fm = self.bn(decoded_fm)
if self.use_dropout:
decoded_fm = self.dropout(decoded_fm)
decoded_fm = self.act_function(decoded_fm)
return decoded_fm
def crop_image(self, image, target_shape):
center_height = image.shape[2] // 2
center_width = image.shape[3] // 2
top_left = center_height - round(target_shape[2] / 2)
top_right = top_left + target_shape[2]
bottom_left = center_width - round(target_shape[3] / 2)
bottom_right = bottom_left + target_shape[3]
self.new_image = image[:, :, top_left:top_right, bottom_left:bottom_right]
return self.new_image
class UnetGenerator(nn.Module):
def __init__(self, input_channels, output_channels, hidden_channels=32):
super(UnetGenerator, self).__init__()
self.unet_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels, 1),
nn.LeakyReLU(0.2))
#self.unet_first_layer = FeatureMapBlock(input_channels, hidden_channels)
self.encoder1 = EncoderUnit(hidden_channels, use_dropout=True)
self.encoder2 = EncoderUnit(hidden_channels * 2, use_dropout=True)
self.encoder3 = EncoderUnit(hidden_channels * 4, use_dropout=True)
self.encoder4 = EncoderUnit(hidden_channels * 8)
self.encoder5 = EncoderUnit(hidden_channels * 16)
self.encoder6 = EncoderUnit(hidden_channels * 32)
self.decoder0 = DecoderUnit(hidden_channels * 64)
self.decoder1 = DecoderUnit(hidden_channels * 32)
self.decoder2 = DecoderUnit(hidden_channels * 16)
self.decoder3 = DecoderUnit(hidden_channels * 8)
self.decoder4 = DecoderUnit(hidden_channels * 4)
self.decoder5 = DecoderUnit(hidden_channels * 2)
self.unet_last_layer = nn.Sequential(
nn.Conv2d(hidden_channels, output_channels, 1),
nn.Tanh())
#self.sigmoid = torch.nn.Sigmoid()
def forward(self, real_input):
x0 = self.unet_first_layer(real_input)
x1 = self.encoder1(x0)
x2 = self.encoder2(x1)
x3 = self.encoder3(x2)
x4 = self.encoder4(x3)
x5 = self.encoder5(x4)
x6 = self.encoder6(x5)
x7 = self.decoder0(x6, x5)
x8 = self.decoder1(x7, x4)
x9 = self.decoder2(x8, x3)
x10 = self.decoder3(x9, x2)
x11 = self.decoder4(x10, x1)
x12 = self.decoder5(x11, x0)
gen_image = self.unet_last_layer(x12)
return gen_image
#return self.sigmoid(gen_image)
x = torch.randn(1,3,256,256)
ge = UnetGenerator(3,3)
a = ge(x)
a.shape
"""#Create the Discriminator"""
class PatchGanDis(nn.Module):
def __init__(self, input_channels, hidden_channels=8):
super(PatchGanDis, self).__init__()
self.patchGan_first_layer = nn.Sequential(
nn.Conv2d(input_channels, hidden_channels,1),
nn.LeakyReLU(0.2))
self.patchGan_layer1 = EncoderUnit(hidden_channels, use_bn=False)
self.patchGan_layer2 = EncoderUnit(hidden_channels * 2)
self.patchGan_layer3 = EncoderUnit(hidden_channels * 4)
self.patchGan_layer4 = EncoderUnit(hidden_channels * 8)
self.patchGan_final_layer = nn.Conv2d(hidden_channels * 16, 1, kernel_size=1)
def forward(self, gen_img, real_output):
x = torch.cat((gen_img, real_output), axis=1)
x0 = self.patchGan_first_layer(x)
x1 = self.patchGan_layer1(x0)
x2 = self.patchGan_layer2(x1)
x3 = self.patchGan_layer3(x2)
x4 = self.patchGan_layer4(x3)
realness_probas_marix = self.patchGan_final_layer(x4)
return realness_probas_marix
"""# upload Dataset"""
from google.colab import drive
drive.mount('/content/gdrive')
# New parameters
lCGAN_criterion = nn.BCEWithLogitsLoss()
pix_dist_criterion = nn.L1Loss()
lambda_pix_dist = 200
n_epochs = 30
input_img_channels = 3
real_img_channels = 3
display_step = 200
batch_size = 4
lr = 0.0002
target_shape = 256
device = 'cuda'
dataset_path = '/content/drive/MyDrive/Colab Notebooks/deep_learinng_projects/computer_vision/dataset/map_dataset/maps'
transform = transforms.Compose([
transforms.ToTensor(),
])
import torchvision
dataset = torchvision.datasets.ImageFolder(dataset_path, transform=transform)
gen = UnetGenerator(input_img_channels, real_img_channels).to(device)
gen_opt = torch.optim.Adam(gen.parameters(), lr=lr)
disc = PatchGanDis(input_img_channels + real_img_channels).to(device)
disc_opt = torch.optim.Adam(disc.parameters(), lr=lr)
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
if isinstance(m, nn.BatchNorm2d):
torch.nn.init.normal_(m.weight, 0.0, 0.02)
torch.nn.init.constant_(m.bias, 0)
gen = gen.apply(weights_init)
disc = disc.apply(weights_init)
def plot_images(image_tensor, num_images=25, size=(1, 28, 28)):
image_shifted = image_tensor
image_unflat = image_shifted.detach().cpu().view(-1, *size)
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
from skimage import color
import numpy as np
mean_generator_loss = 0
mean_discriminator_loss = 0
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
cur_step = 0
discriminator_losses = []
generator_losses = []
for epoch in range(n_epochs):
# Dataloader returns the batches
for image, _ in dataloader:
image_width = image.shape[3]
condition = image[:, :, :, :image_width // 2]
condition = nn.functional.interpolate(condition, size=target_shape)
real = image[:, :, :, image_width // 2:]
real = nn.functional.interpolate(real, size=target_shape)
cur_batch_size = len(condition)
condition = condition.to(device)
real = real.to(device)
### Update discriminator ###
disc_opt.zero_grad()
with torch.no_grad():
fake = gen(condition)
disc_fake_hat = disc(fake.detach(), condition) # Detach generator
disc_fake_loss = lCGAN_criterion(disc_fake_hat, torch.zeros_like(disc_fake_hat))
disc_real_hat = disc(real, condition)
disc_real_loss = lCGAN_criterion(disc_real_hat, torch.ones_like(disc_real_hat))
disc_loss = (disc_fake_loss + disc_real_loss) / 2
disc_loss.backward(retain_graph=True)
disc_opt.step()
### Update generator ###
gen_opt.zero_grad()
fake = gen(condition)
disc_fake_bar = disc(fake, condition)
gen_adv_loss = lCGAN_criterion(disc_fake_bar, torch.ones_like(disc_fake_bar))
gen_rec_loss = pix_dist_criterion(real, fake)
gen_loss_tot = gen_adv_loss + lambda_pix_dist * gen_rec_loss
gen_loss_tot.backward()
gen_opt.step()
mean_discriminator_loss += disc_loss.item() / display_step
mean_generator_loss += gen_loss_tot.item() / display_step
discriminator_losses.append(mean_discriminator_loss)
generator_losses.append(mean_discriminator_loss)
#Visualization
if cur_step % display_step == 0:
if cur_step > 0:
print(f"Epoch {epoch}: Step {cur_step}: Generator (U-Net) loss: {mean_generator_loss}, Discriminator loss: {mean_discriminator_loss}")
else:
print("Pretrained initial state")
plot_images(condition, size=(input_img_channels, target_shape, target_shape))
plot_images(real, size=(input_img_channels, target_shape, target_shape))
plot_images(fake, size=(input_img_channels, target_shape, target_shape))
mean_generator_loss = 0
mean_discriminator_loss = 0
cur_step += 1
| super(EncoderUnit, self).__init__()
self.conv_layer1 = nn.Conv2d(input_channels, input_channels * 2,kernel_size=3,padding=1)
self.conv_layer2 = nn.Conv2d(input_channels * 2, input_channels * 2, kernel_size=3, padding=1)
self.act_function= nn.LeakyReLU(0.2)
self.pooling_layer = nn.MaxPool2d(kernel_size=2, stride=2)
if use_bn:
self.bn = nn.BatchNorm2d(input_channels * 2)
self.use_bn = use_bn
if use_dropout:
self.dropout = nn.Dropout()
self.use_dropout = use_dropout | identifier_body |
exec.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::RefCell;
use std::collections::HashMap;
use std::sync::Arc;
use thread_local::CachedThreadLocal;
use syntax::{Expr, ExprBuilder, Literals};
use compile::Compiler;
use error::Error;
use input::{ByteInput, CharInput};
use pikevm;
use prog::Program;
use re_builder::RegexOptions;
use re_set;
use re_trait::{RegularExpression, Slot, Locations, as_slots};
use re_unicode;
use utf8::next_utf8;
/// `Exec` manages the execution of a regular expression.
///
/// In particular, this manages the various compiled forms of a single regular
/// expression and the choice of which matching engine to use to execute a
/// regular expression.
pub struct Exec {
/// All read only state.
ro: Arc<ExecReadOnly>,
/// Caches for the various matching engines.
cache: CachedThreadLocal<ProgramCache>,
}
/// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This
/// means it is no longer Sync, but we can now avoid the overhead of
/// synchronization to fetch the cache.
#[derive(Debug)]
pub struct ExecNoSync<'c> {
/// All read only state.
ro: &'c Arc<ExecReadOnly>,
/// Caches for the various matching engines.
cache: &'c ProgramCache,
}
/// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8].
pub struct ExecNoSyncStr<'c>(ExecNoSync<'c>);
/// `ExecReadOnly` comprises all read only state for a regex. Namely, all such
/// state is determined at compile time and never changes during search.
#[derive(Debug)]
struct ExecReadOnly {
/// The original regular expressions given by the caller to compile.
res: Vec<String>,
/// A compiled program that is used in the NFA simulation and backtracking.
/// It can be byte-based or Unicode codepoint based.
///
/// N.B. It is not possibly to make this byte-based from the public API.
/// It is only used for testing byte based programs in the NFA simulations.
nfa: Program,
/// match_type encodes as much upfront knowledge about how we're going to
/// execute a search as possible.
match_type: MatchType,
}
/// Facilitates the construction of an executor by exposing various knobs
/// to control how a regex is executed and what kinds of resources it's
/// permitted to use.
pub struct ExecBuilder {
options: RegexOptions,
match_type: Option<MatchType>,
bytes: bool,
only_utf8: bool,
}
/// Parsed represents a set of parsed regular expressions and their detected
/// literals.
struct Parsed {
exprs: Vec<Expr>,
bytes: bool,
}
impl ExecBuilder {
/// Create a regex execution builder.
///
/// This uses default settings for everything except the regex itself,
/// which must be provided. Further knobs can be set by calling methods,
/// and then finally, `build` to actually create the executor.
pub fn new(re: &str) -> Self {
Self::new_many(&[re])
}
/// Like new, but compiles the union of the given regular expressions.
///
/// Note that when compiling 2 or more regular expressions, capture groups
/// are completely unsupported. (This means both `find` and `captures`
/// wont work.)
pub fn new_many<I, S>(res: I) -> Self
where S: AsRef<str>, I: IntoIterator<Item=S> {
let mut opts = RegexOptions::default();
opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect();
Self::new_options(opts)
}
/// Create a regex execution builder.
pub fn new_options(opts: RegexOptions) -> Self {
ExecBuilder {
options: opts,
match_type: None,
bytes: false,
only_utf8: true,
}
}
/// Set the matching engine to be automatically determined.
///
/// This is the default state and will apply whatever optimizations are
/// possible, such as running a DFA. | ///
/// This overrides whatever was previously set via the `nfa` or
/// `bounded_backtracking` methods.
pub fn automatic(mut self) -> Self {
self.match_type = None;
self
}
/// Sets the matching engine to use the NFA algorithm no matter what
/// optimizations are possible.
///
/// This overrides whatever was previously set via the `automatic` or
/// `bounded_backtracking` methods.
pub fn nfa(mut self) -> Self {
self.match_type = Some(MatchType::Nfa);
self
}
/// Compiles byte based programs for use with the NFA matching engines.
///
/// By default, the NFA engines match on Unicode scalar values. They can
/// be made to use byte based programs instead. In general, the byte based
/// programs are slower because of a less efficient encoding of character
/// classes.
///
/// Note that this does not impact DFA matching engines, which always
/// execute on bytes.
pub fn bytes(mut self, yes: bool) -> Self {
self.bytes = yes;
self
}
/// When disabled, the program compiled may match arbitrary bytes.
///
/// When enabled (the default), all compiled programs exclusively match
/// valid UTF-8 bytes.
pub fn only_utf8(mut self, yes: bool) -> Self {
self.only_utf8 = yes;
self
}
/// Set the Unicode flag.
pub fn unicode(mut self, yes: bool) -> Self {
self.options.unicode = yes;
self
}
/// Parse the current set of patterns into their AST and extract literals.
fn parse(&self) -> Result<Parsed, Error> {
let mut exprs = Vec::with_capacity(self.options.pats.len());
let mut prefixes = Some(Literals::empty());
let mut suffixes = Some(Literals::empty());
let mut bytes = false;
let is_set = self.options.pats.len() > 1;
// If we're compiling a regex set and that set has any anchored
// expressions, then disable all literal optimizations.
for pat in &self.options.pats {
let parser =
ExprBuilder::new()
.case_insensitive(self.options.case_insensitive)
.multi_line(self.options.multi_line)
.dot_matches_new_line(self.options.dot_matches_new_line)
.swap_greed(self.options.swap_greed)
.ignore_whitespace(self.options.ignore_whitespace)
.unicode(self.options.unicode)
.allow_bytes(!self.only_utf8);
let expr = try!(parser.parse(pat));
bytes = bytes || expr.has_bytes();
if !expr.is_anchored_start() && expr.has_anchored_start() {
// Partial anchors unfortunately make it hard to use prefixes,
// so disable them.
prefixes = None;
} else if is_set && expr.is_anchored_start() {
// Regex sets with anchors do not go well with literal
// optimizations.
prefixes = None;
}
prefixes = prefixes.and_then(|mut prefixes| {
if !prefixes.union_prefixes(&expr) {
None
} else {
Some(prefixes)
}
});
if !expr.is_anchored_end() && expr.has_anchored_end() {
// Partial anchors unfortunately make it hard to use suffixes,
// so disable them.
suffixes = None;
} else if is_set && expr.is_anchored_end() {
// Regex sets with anchors do not go well with literal
// optimizations.
prefixes = None;
}
suffixes = suffixes.and_then(|mut suffixes| {
if !suffixes.union_suffixes(&expr) {
None
} else {
Some(suffixes)
}
});
exprs.push(expr);
}
Ok(Parsed {
exprs: exprs,
bytes: bytes,
})
}
/// Build an executor that can run a regular expression.
pub fn build(self) -> Result<Exec, Error> {
// Special case when we have no patterns to compile.
// This can happen when compiling a regex set.
if self.options.pats.is_empty() {
let ro = Arc::new(ExecReadOnly {
res: vec![],
nfa: Program::new(),
match_type: MatchType::Nothing,
});
return Ok(Exec { ro: ro, cache: CachedThreadLocal::new() });
}
let parsed = try!(self.parse());
let nfa = try!(
Compiler::new()
.size_limit(self.options.size_limit)
.bytes(self.bytes || parsed.bytes)
.only_utf8(self.only_utf8)
.compile(&parsed.exprs));
let mut ro = ExecReadOnly {
res: self.options.pats,
nfa: nfa,
match_type: MatchType::Nothing,
};
ro.match_type = ro.choose_match_type(self.match_type);
let ro = Arc::new(ro);
Ok(Exec { ro: ro, cache: CachedThreadLocal::new() })
}
}
impl<'c> RegularExpression for ExecNoSyncStr<'c> {
type Text = str;
fn slots_len(&self) -> usize { self.0.slots_len() }
fn next_after_empty(&self, text: &str, i: usize) -> usize {
next_utf8(text.as_bytes(), i)
}
#[inline(always)] // reduces constant overhead
fn shortest_match_at(&self, text: &str, start: usize) -> Option<usize> {
self.0.shortest_match_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn is_match_at(&self, text: &str, start: usize) -> bool {
self.0.is_match_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> {
self.0.find_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn read_captures_at(
&self,
locs: &mut Locations,
text: &str,
start: usize,
) -> Option<(usize, usize)> {
self.0.read_captures_at(locs, text.as_bytes(), start)
}
}
impl<'c> RegularExpression for ExecNoSync<'c> {
type Text = [u8];
/// Returns the number of capture slots in the regular expression. (There
/// are two slots for every capture group, corresponding to possibly empty
/// start and end locations of the capture.)
fn slots_len(&self) -> usize {
self.ro.nfa.captures.len() * 2
}
fn next_after_empty(&self, _text: &[u8], i: usize) -> usize {
i + 1
}
/// Returns the end of a match location, possibly occurring before the
/// end location of the correct leftmost-first match.
#[inline(always)] // reduces constant overhead
fn shortest_match_at(&self, text: &[u8], start: usize) -> Option<usize> {
match self.ro.match_type {
MatchType::Nfa => self.shortest_nfa(text, start),
MatchType::Nothing => None,
}
}
/// Returns true if and only if the regex matches text.
///
/// For single regular expressions, this is equivalent to calling
/// shortest_match(...).is_some().
#[inline(always)] // reduces constant overhead
fn is_match_at(&self, text: &[u8], start: usize) -> bool {
// We need to do this dance because shortest_match relies on the NFA
// filling in captures[1], but a RegexSet has no captures. In other
// words, a RegexSet can't (currently) use shortest_match. ---AG
match self.ro.match_type {
MatchType::Nfa => self.match_nfa(text, start),
MatchType::Nothing => false,
}
}
/// Finds the start and end location of the leftmost-first match, starting
/// at the given location.
#[inline(always)] // reduces constant overhead
fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> {
match self.ro.match_type {
MatchType::Nfa => self.find_nfa(text, start),
MatchType::Nothing => None,
}
}
/// Finds the start and end location of the leftmost-first match and also
/// fills in all matching capture groups.
///
/// The number of capture slots given should be equal to the total number
/// of capture slots in the compiled program.
///
/// Note that the first two slots always correspond to the start and end
/// locations of the overall match.
fn read_captures_at(
&self,
locs: &mut Locations,
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
let slots = as_slots(locs);
for slot in slots.iter_mut() {
*slot = None;
}
// If the caller unnecessarily uses this, then we try to save them
// from themselves.
match slots.len() {
0 => return self.find_at(text, start),
2 => {
return self.find_at(text, start).map(|(s, e)| {
slots[0] = Some(s);
slots[1] = Some(e);
(s, e)
});
}
_ => {} // fallthrough
}
match self.ro.match_type {
MatchType::Nfa => {
self.captures_nfa(slots, text, start)
}
MatchType::Nothing => None,
}
}
}
impl<'c> ExecNoSync<'c> {
/// Executes the NFA engine to return whether there is a match or not.
///
/// Ideally, we could use shortest_nfa(...).is_some() and get the same
/// performance characteristics, but regex sets don't have captures, which
/// shortest_nfa depends on.
fn match_nfa(
&self,
text: &[u8],
start: usize,
) -> bool {
self.exec_pikevm(&mut [false], &mut [], true, text, start)
}
/// Finds the shortest match using an NFA.
fn shortest_nfa(&self, text: &[u8], start: usize) -> Option<usize> {
let mut slots = [None, None];
if self.exec_pikevm(&mut [false], &mut slots, true, text, start) {
slots[1]
} else {
None
}
}
/// Like find, but executes an NFA engine.
fn find_nfa(
&self,
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
let mut slots = [None, None];
if self.exec_pikevm(&mut [false], &mut slots, false, text, start) {
match (slots[0], slots[1]) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}
} else {
None
}
}
/// Like find_nfa, but fills in captures.
///
/// `slots` should have length equal to `2 * nfa.captures.len()`.
fn captures_nfa(
&self,
slots: &mut [Slot],
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
if self.exec_pikevm(&mut [false], slots, false, text, start) {
match (slots[0], slots[1]) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}
} else {
None
}
}
/// Always run the NFA algorithm.
fn exec_pikevm(
&self,
matches: &mut [bool],
slots: &mut [Slot],
quit_after_match: bool,
text: &[u8],
start: usize,
) -> bool {
use input::Input;
let cache = &mut self.cache.borrow_mut().pikevm;
if start == 0 {
cache.reset();
} else {
cache.prep_for_next_match();
}
if self.ro.nfa.uses_bytes() {
let input = ByteInput::new(text, self.ro.nfa.only_utf8);
let mut at = input.at(start);
let mut fsm = pikevm::Fsm::new(
&self.ro.nfa,
quit_after_match,
);
loop {
let stop = fsm.next(
cache,
matches,
slots,
at,
input.only_utf8(),
);
if stop || at.is_end() {
break;
}
at = input.at(at.next_pos());
}
} else {
let input = CharInput::new(text);
let mut at = input.at(start);
let mut fsm = pikevm::Fsm::new(
&self.ro.nfa,
quit_after_match,
);
loop {
let stop = fsm.next(
cache,
matches,
slots,
at,
input.only_utf8(),
);
if stop || at.is_end() {
break;
}
at = input.at(at.next_pos());
}
}
matches.iter().any(|b| *b)
}
/// Finds which regular expressions match the given text.
///
/// `matches` should have length equal to the number of regexes being
/// searched.
///
/// This is only useful when one wants to know which regexes in a set
/// match some text.
pub fn many_matches_at(
&self,
matches: &mut [bool],
text: &[u8],
start: usize,
) -> bool {
use self::MatchType::*;
match self.ro.match_type {
Nfa => self.exec_pikevm(matches, &mut [], false, text, start),
Nothing => false,
}
}
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
&self.ro.nfa.capture_name_idx
}
}
impl<'c> ExecNoSyncStr<'c> {
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
self.0.capture_name_idx()
}
}
impl Exec {
/// Get a searcher that isn't Sync.
#[inline(always)] // reduces constant overhead
pub fn searcher(&self) -> ExecNoSync {
let create = || Box::new(RefCell::new(ProgramCacheInner::new(&self.ro)));
ExecNoSync {
ro: &self.ro, // a clone is too expensive here! (and not needed)
cache: self.cache.get_or(create),
}
}
/// Get a searcher that isn't Sync and can match on &str.
#[inline(always)] // reduces constant overhead
pub fn searcher_str(&self) -> ExecNoSyncStr {
ExecNoSyncStr(self.searcher())
}
/// Build a Regex from this executor.
pub fn into_regex(self) -> re_unicode::Regex {
re_unicode::Regex::from(self)
}
/// Build a RegexSet from this executor.
pub fn into_regex_set(self) -> re_set::unicode::RegexSet {
re_set::unicode::RegexSet::from(self)
}
/// The original regular expressions given by the caller that were
/// compiled.
pub fn regex_strings(&self) -> &[String] {
&self.ro.res
}
/// Return a slice of capture names.
///
/// Any capture that isn't named is None.
pub fn capture_names(&self) -> &[Option<String>] {
&self.ro.nfa.captures
}
/// Return a reference to named groups mapping (from group name to
/// group position).
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
&self.ro.nfa.capture_name_idx
}
}
impl Clone for Exec {
fn clone(&self) -> Exec {
Exec {
ro: self.ro.clone(),
cache: CachedThreadLocal::new(),
}
}
}
impl ExecReadOnly {
fn choose_match_type(&self, hint: Option<MatchType>) -> MatchType {
use self::MatchType::*;
if let Some(Nfa) = hint {
return hint.unwrap();
}
// If the NFA is empty, then we'll never match anything.
if self.nfa.insts.is_empty() {
return Nothing;
}
Nfa
}
}
#[derive(Clone, Copy, Debug)]
enum MatchType {
/// An NFA variant.
Nfa,
/// No match is ever possible, so don't ever try to search.
Nothing,
}
/// `ProgramCache` maintains reusable allocations for each matching engine
/// available to a particular program.
pub type ProgramCache = RefCell<ProgramCacheInner>;
#[derive(Clone, Debug)]
pub struct ProgramCacheInner {
pub pikevm: pikevm::Cache,
}
impl ProgramCacheInner {
fn new(ro: &ExecReadOnly) -> Self {
ProgramCacheInner {
pikevm: pikevm::Cache::new(&ro.nfa),
}
}
} | random_line_split | |
exec.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::RefCell;
use std::collections::HashMap;
use std::sync::Arc;
use thread_local::CachedThreadLocal;
use syntax::{Expr, ExprBuilder, Literals};
use compile::Compiler;
use error::Error;
use input::{ByteInput, CharInput};
use pikevm;
use prog::Program;
use re_builder::RegexOptions;
use re_set;
use re_trait::{RegularExpression, Slot, Locations, as_slots};
use re_unicode;
use utf8::next_utf8;
/// `Exec` manages the execution of a regular expression.
///
/// In particular, this manages the various compiled forms of a single regular
/// expression and the choice of which matching engine to use to execute a
/// regular expression.
pub struct Exec {
/// All read only state.
ro: Arc<ExecReadOnly>,
/// Caches for the various matching engines.
cache: CachedThreadLocal<ProgramCache>,
}
/// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This
/// means it is no longer Sync, but we can now avoid the overhead of
/// synchronization to fetch the cache.
#[derive(Debug)]
pub struct ExecNoSync<'c> {
/// All read only state.
ro: &'c Arc<ExecReadOnly>,
/// Caches for the various matching engines.
cache: &'c ProgramCache,
}
/// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8].
pub struct | <'c>(ExecNoSync<'c>);
/// `ExecReadOnly` comprises all read only state for a regex. Namely, all such
/// state is determined at compile time and never changes during search.
#[derive(Debug)]
struct ExecReadOnly {
/// The original regular expressions given by the caller to compile.
res: Vec<String>,
/// A compiled program that is used in the NFA simulation and backtracking.
/// It can be byte-based or Unicode codepoint based.
///
/// N.B. It is not possibly to make this byte-based from the public API.
/// It is only used for testing byte based programs in the NFA simulations.
nfa: Program,
/// match_type encodes as much upfront knowledge about how we're going to
/// execute a search as possible.
match_type: MatchType,
}
/// Facilitates the construction of an executor by exposing various knobs
/// to control how a regex is executed and what kinds of resources it's
/// permitted to use.
pub struct ExecBuilder {
options: RegexOptions,
match_type: Option<MatchType>,
bytes: bool,
only_utf8: bool,
}
/// Parsed represents a set of parsed regular expressions and their detected
/// literals.
struct Parsed {
exprs: Vec<Expr>,
bytes: bool,
}
impl ExecBuilder {
/// Create a regex execution builder.
///
/// This uses default settings for everything except the regex itself,
/// which must be provided. Further knobs can be set by calling methods,
/// and then finally, `build` to actually create the executor.
pub fn new(re: &str) -> Self {
Self::new_many(&[re])
}
/// Like new, but compiles the union of the given regular expressions.
///
/// Note that when compiling 2 or more regular expressions, capture groups
/// are completely unsupported. (This means both `find` and `captures`
/// wont work.)
pub fn new_many<I, S>(res: I) -> Self
where S: AsRef<str>, I: IntoIterator<Item=S> {
let mut opts = RegexOptions::default();
opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect();
Self::new_options(opts)
}
/// Create a regex execution builder.
pub fn new_options(opts: RegexOptions) -> Self {
ExecBuilder {
options: opts,
match_type: None,
bytes: false,
only_utf8: true,
}
}
/// Set the matching engine to be automatically determined.
///
/// This is the default state and will apply whatever optimizations are
/// possible, such as running a DFA.
///
/// This overrides whatever was previously set via the `nfa` or
/// `bounded_backtracking` methods.
pub fn automatic(mut self) -> Self {
self.match_type = None;
self
}
/// Sets the matching engine to use the NFA algorithm no matter what
/// optimizations are possible.
///
/// This overrides whatever was previously set via the `automatic` or
/// `bounded_backtracking` methods.
pub fn nfa(mut self) -> Self {
self.match_type = Some(MatchType::Nfa);
self
}
/// Compiles byte based programs for use with the NFA matching engines.
///
/// By default, the NFA engines match on Unicode scalar values. They can
/// be made to use byte based programs instead. In general, the byte based
/// programs are slower because of a less efficient encoding of character
/// classes.
///
/// Note that this does not impact DFA matching engines, which always
/// execute on bytes.
pub fn bytes(mut self, yes: bool) -> Self {
self.bytes = yes;
self
}
/// When disabled, the program compiled may match arbitrary bytes.
///
/// When enabled (the default), all compiled programs exclusively match
/// valid UTF-8 bytes.
pub fn only_utf8(mut self, yes: bool) -> Self {
self.only_utf8 = yes;
self
}
/// Set the Unicode flag.
pub fn unicode(mut self, yes: bool) -> Self {
self.options.unicode = yes;
self
}
/// Parse the current set of patterns into their AST and extract literals.
fn parse(&self) -> Result<Parsed, Error> {
let mut exprs = Vec::with_capacity(self.options.pats.len());
let mut prefixes = Some(Literals::empty());
let mut suffixes = Some(Literals::empty());
let mut bytes = false;
let is_set = self.options.pats.len() > 1;
// If we're compiling a regex set and that set has any anchored
// expressions, then disable all literal optimizations.
for pat in &self.options.pats {
let parser =
ExprBuilder::new()
.case_insensitive(self.options.case_insensitive)
.multi_line(self.options.multi_line)
.dot_matches_new_line(self.options.dot_matches_new_line)
.swap_greed(self.options.swap_greed)
.ignore_whitespace(self.options.ignore_whitespace)
.unicode(self.options.unicode)
.allow_bytes(!self.only_utf8);
let expr = try!(parser.parse(pat));
bytes = bytes || expr.has_bytes();
if !expr.is_anchored_start() && expr.has_anchored_start() {
// Partial anchors unfortunately make it hard to use prefixes,
// so disable them.
prefixes = None;
} else if is_set && expr.is_anchored_start() {
// Regex sets with anchors do not go well with literal
// optimizations.
prefixes = None;
}
prefixes = prefixes.and_then(|mut prefixes| {
if !prefixes.union_prefixes(&expr) {
None
} else {
Some(prefixes)
}
});
if !expr.is_anchored_end() && expr.has_anchored_end() {
// Partial anchors unfortunately make it hard to use suffixes,
// so disable them.
suffixes = None;
} else if is_set && expr.is_anchored_end() {
// Regex sets with anchors do not go well with literal
// optimizations.
prefixes = None;
}
suffixes = suffixes.and_then(|mut suffixes| {
if !suffixes.union_suffixes(&expr) {
None
} else {
Some(suffixes)
}
});
exprs.push(expr);
}
Ok(Parsed {
exprs: exprs,
bytes: bytes,
})
}
/// Build an executor that can run a regular expression.
pub fn build(self) -> Result<Exec, Error> {
// Special case when we have no patterns to compile.
// This can happen when compiling a regex set.
if self.options.pats.is_empty() {
let ro = Arc::new(ExecReadOnly {
res: vec![],
nfa: Program::new(),
match_type: MatchType::Nothing,
});
return Ok(Exec { ro: ro, cache: CachedThreadLocal::new() });
}
let parsed = try!(self.parse());
let nfa = try!(
Compiler::new()
.size_limit(self.options.size_limit)
.bytes(self.bytes || parsed.bytes)
.only_utf8(self.only_utf8)
.compile(&parsed.exprs));
let mut ro = ExecReadOnly {
res: self.options.pats,
nfa: nfa,
match_type: MatchType::Nothing,
};
ro.match_type = ro.choose_match_type(self.match_type);
let ro = Arc::new(ro);
Ok(Exec { ro: ro, cache: CachedThreadLocal::new() })
}
}
impl<'c> RegularExpression for ExecNoSyncStr<'c> {
type Text = str;
fn slots_len(&self) -> usize { self.0.slots_len() }
fn next_after_empty(&self, text: &str, i: usize) -> usize {
next_utf8(text.as_bytes(), i)
}
#[inline(always)] // reduces constant overhead
fn shortest_match_at(&self, text: &str, start: usize) -> Option<usize> {
self.0.shortest_match_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn is_match_at(&self, text: &str, start: usize) -> bool {
self.0.is_match_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> {
self.0.find_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn read_captures_at(
&self,
locs: &mut Locations,
text: &str,
start: usize,
) -> Option<(usize, usize)> {
self.0.read_captures_at(locs, text.as_bytes(), start)
}
}
impl<'c> RegularExpression for ExecNoSync<'c> {
type Text = [u8];
/// Returns the number of capture slots in the regular expression. (There
/// are two slots for every capture group, corresponding to possibly empty
/// start and end locations of the capture.)
fn slots_len(&self) -> usize {
self.ro.nfa.captures.len() * 2
}
fn next_after_empty(&self, _text: &[u8], i: usize) -> usize {
i + 1
}
/// Returns the end of a match location, possibly occurring before the
/// end location of the correct leftmost-first match.
#[inline(always)] // reduces constant overhead
fn shortest_match_at(&self, text: &[u8], start: usize) -> Option<usize> {
match self.ro.match_type {
MatchType::Nfa => self.shortest_nfa(text, start),
MatchType::Nothing => None,
}
}
/// Returns true if and only if the regex matches text.
///
/// For single regular expressions, this is equivalent to calling
/// shortest_match(...).is_some().
#[inline(always)] // reduces constant overhead
fn is_match_at(&self, text: &[u8], start: usize) -> bool {
// We need to do this dance because shortest_match relies on the NFA
// filling in captures[1], but a RegexSet has no captures. In other
// words, a RegexSet can't (currently) use shortest_match. ---AG
match self.ro.match_type {
MatchType::Nfa => self.match_nfa(text, start),
MatchType::Nothing => false,
}
}
/// Finds the start and end location of the leftmost-first match, starting
/// at the given location.
#[inline(always)] // reduces constant overhead
fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> {
match self.ro.match_type {
MatchType::Nfa => self.find_nfa(text, start),
MatchType::Nothing => None,
}
}
/// Finds the start and end location of the leftmost-first match and also
/// fills in all matching capture groups.
///
/// The number of capture slots given should be equal to the total number
/// of capture slots in the compiled program.
///
/// Note that the first two slots always correspond to the start and end
/// locations of the overall match.
fn read_captures_at(
&self,
locs: &mut Locations,
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
let slots = as_slots(locs);
for slot in slots.iter_mut() {
*slot = None;
}
// If the caller unnecessarily uses this, then we try to save them
// from themselves.
match slots.len() {
0 => return self.find_at(text, start),
2 => {
return self.find_at(text, start).map(|(s, e)| {
slots[0] = Some(s);
slots[1] = Some(e);
(s, e)
});
}
_ => {} // fallthrough
}
match self.ro.match_type {
MatchType::Nfa => {
self.captures_nfa(slots, text, start)
}
MatchType::Nothing => None,
}
}
}
impl<'c> ExecNoSync<'c> {
/// Executes the NFA engine to return whether there is a match or not.
///
/// Ideally, we could use shortest_nfa(...).is_some() and get the same
/// performance characteristics, but regex sets don't have captures, which
/// shortest_nfa depends on.
fn match_nfa(
&self,
text: &[u8],
start: usize,
) -> bool {
self.exec_pikevm(&mut [false], &mut [], true, text, start)
}
/// Finds the shortest match using an NFA.
fn shortest_nfa(&self, text: &[u8], start: usize) -> Option<usize> {
let mut slots = [None, None];
if self.exec_pikevm(&mut [false], &mut slots, true, text, start) {
slots[1]
} else {
None
}
}
/// Like find, but executes an NFA engine.
fn find_nfa(
&self,
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
let mut slots = [None, None];
if self.exec_pikevm(&mut [false], &mut slots, false, text, start) {
match (slots[0], slots[1]) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}
} else {
None
}
}
/// Like find_nfa, but fills in captures.
///
/// `slots` should have length equal to `2 * nfa.captures.len()`.
fn captures_nfa(
&self,
slots: &mut [Slot],
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
if self.exec_pikevm(&mut [false], slots, false, text, start) {
match (slots[0], slots[1]) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}
} else {
None
}
}
/// Always run the NFA algorithm.
fn exec_pikevm(
&self,
matches: &mut [bool],
slots: &mut [Slot],
quit_after_match: bool,
text: &[u8],
start: usize,
) -> bool {
use input::Input;
let cache = &mut self.cache.borrow_mut().pikevm;
if start == 0 {
cache.reset();
} else {
cache.prep_for_next_match();
}
if self.ro.nfa.uses_bytes() {
let input = ByteInput::new(text, self.ro.nfa.only_utf8);
let mut at = input.at(start);
let mut fsm = pikevm::Fsm::new(
&self.ro.nfa,
quit_after_match,
);
loop {
let stop = fsm.next(
cache,
matches,
slots,
at,
input.only_utf8(),
);
if stop || at.is_end() {
break;
}
at = input.at(at.next_pos());
}
} else {
let input = CharInput::new(text);
let mut at = input.at(start);
let mut fsm = pikevm::Fsm::new(
&self.ro.nfa,
quit_after_match,
);
loop {
let stop = fsm.next(
cache,
matches,
slots,
at,
input.only_utf8(),
);
if stop || at.is_end() {
break;
}
at = input.at(at.next_pos());
}
}
matches.iter().any(|b| *b)
}
/// Finds which regular expressions match the given text.
///
/// `matches` should have length equal to the number of regexes being
/// searched.
///
/// This is only useful when one wants to know which regexes in a set
/// match some text.
pub fn many_matches_at(
&self,
matches: &mut [bool],
text: &[u8],
start: usize,
) -> bool {
use self::MatchType::*;
match self.ro.match_type {
Nfa => self.exec_pikevm(matches, &mut [], false, text, start),
Nothing => false,
}
}
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
&self.ro.nfa.capture_name_idx
}
}
impl<'c> ExecNoSyncStr<'c> {
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
self.0.capture_name_idx()
}
}
impl Exec {
/// Get a searcher that isn't Sync.
#[inline(always)] // reduces constant overhead
pub fn searcher(&self) -> ExecNoSync {
let create = || Box::new(RefCell::new(ProgramCacheInner::new(&self.ro)));
ExecNoSync {
ro: &self.ro, // a clone is too expensive here! (and not needed)
cache: self.cache.get_or(create),
}
}
/// Get a searcher that isn't Sync and can match on &str.
#[inline(always)] // reduces constant overhead
pub fn searcher_str(&self) -> ExecNoSyncStr {
ExecNoSyncStr(self.searcher())
}
/// Build a Regex from this executor.
pub fn into_regex(self) -> re_unicode::Regex {
re_unicode::Regex::from(self)
}
/// Build a RegexSet from this executor.
pub fn into_regex_set(self) -> re_set::unicode::RegexSet {
re_set::unicode::RegexSet::from(self)
}
/// The original regular expressions given by the caller that were
/// compiled.
pub fn regex_strings(&self) -> &[String] {
&self.ro.res
}
/// Return a slice of capture names.
///
/// Any capture that isn't named is None.
pub fn capture_names(&self) -> &[Option<String>] {
&self.ro.nfa.captures
}
/// Return a reference to named groups mapping (from group name to
/// group position).
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
&self.ro.nfa.capture_name_idx
}
}
impl Clone for Exec {
fn clone(&self) -> Exec {
Exec {
ro: self.ro.clone(),
cache: CachedThreadLocal::new(),
}
}
}
impl ExecReadOnly {
fn choose_match_type(&self, hint: Option<MatchType>) -> MatchType {
use self::MatchType::*;
if let Some(Nfa) = hint {
return hint.unwrap();
}
// If the NFA is empty, then we'll never match anything.
if self.nfa.insts.is_empty() {
return Nothing;
}
Nfa
}
}
#[derive(Clone, Copy, Debug)]
enum MatchType {
/// An NFA variant.
Nfa,
/// No match is ever possible, so don't ever try to search.
Nothing,
}
/// `ProgramCache` maintains reusable allocations for each matching engine
/// available to a particular program.
pub type ProgramCache = RefCell<ProgramCacheInner>;
#[derive(Clone, Debug)]
pub struct ProgramCacheInner {
pub pikevm: pikevm::Cache,
}
impl ProgramCacheInner {
fn new(ro: &ExecReadOnly) -> Self {
ProgramCacheInner {
pikevm: pikevm::Cache::new(&ro.nfa),
}
}
}
| ExecNoSyncStr | identifier_name |
exec.rs | // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::RefCell;
use std::collections::HashMap;
use std::sync::Arc;
use thread_local::CachedThreadLocal;
use syntax::{Expr, ExprBuilder, Literals};
use compile::Compiler;
use error::Error;
use input::{ByteInput, CharInput};
use pikevm;
use prog::Program;
use re_builder::RegexOptions;
use re_set;
use re_trait::{RegularExpression, Slot, Locations, as_slots};
use re_unicode;
use utf8::next_utf8;
/// `Exec` manages the execution of a regular expression.
///
/// In particular, this manages the various compiled forms of a single regular
/// expression and the choice of which matching engine to use to execute a
/// regular expression.
pub struct Exec {
/// All read only state.
ro: Arc<ExecReadOnly>,
/// Caches for the various matching engines.
cache: CachedThreadLocal<ProgramCache>,
}
/// `ExecNoSync` is like `Exec`, except it embeds a reference to a cache. This
/// means it is no longer Sync, but we can now avoid the overhead of
/// synchronization to fetch the cache.
#[derive(Debug)]
pub struct ExecNoSync<'c> {
/// All read only state.
ro: &'c Arc<ExecReadOnly>,
/// Caches for the various matching engines.
cache: &'c ProgramCache,
}
/// `ExecNoSyncStr` is like `ExecNoSync`, but matches on &str instead of &[u8].
pub struct ExecNoSyncStr<'c>(ExecNoSync<'c>);
/// `ExecReadOnly` comprises all read only state for a regex. Namely, all such
/// state is determined at compile time and never changes during search.
#[derive(Debug)]
struct ExecReadOnly {
/// The original regular expressions given by the caller to compile.
res: Vec<String>,
/// A compiled program that is used in the NFA simulation and backtracking.
/// It can be byte-based or Unicode codepoint based.
///
/// N.B. It is not possibly to make this byte-based from the public API.
/// It is only used for testing byte based programs in the NFA simulations.
nfa: Program,
/// match_type encodes as much upfront knowledge about how we're going to
/// execute a search as possible.
match_type: MatchType,
}
/// Facilitates the construction of an executor by exposing various knobs
/// to control how a regex is executed and what kinds of resources it's
/// permitted to use.
pub struct ExecBuilder {
options: RegexOptions,
match_type: Option<MatchType>,
bytes: bool,
only_utf8: bool,
}
/// Parsed represents a set of parsed regular expressions and their detected
/// literals.
struct Parsed {
exprs: Vec<Expr>,
bytes: bool,
}
impl ExecBuilder {
/// Create a regex execution builder.
///
/// This uses default settings for everything except the regex itself,
/// which must be provided. Further knobs can be set by calling methods,
/// and then finally, `build` to actually create the executor.
pub fn new(re: &str) -> Self {
Self::new_many(&[re])
}
/// Like new, but compiles the union of the given regular expressions.
///
/// Note that when compiling 2 or more regular expressions, capture groups
/// are completely unsupported. (This means both `find` and `captures`
/// wont work.)
pub fn new_many<I, S>(res: I) -> Self
where S: AsRef<str>, I: IntoIterator<Item=S> {
let mut opts = RegexOptions::default();
opts.pats = res.into_iter().map(|s| s.as_ref().to_owned()).collect();
Self::new_options(opts)
}
/// Create a regex execution builder.
pub fn new_options(opts: RegexOptions) -> Self {
ExecBuilder {
options: opts,
match_type: None,
bytes: false,
only_utf8: true,
}
}
/// Set the matching engine to be automatically determined.
///
/// This is the default state and will apply whatever optimizations are
/// possible, such as running a DFA.
///
/// This overrides whatever was previously set via the `nfa` or
/// `bounded_backtracking` methods.
pub fn automatic(mut self) -> Self {
self.match_type = None;
self
}
/// Sets the matching engine to use the NFA algorithm no matter what
/// optimizations are possible.
///
/// This overrides whatever was previously set via the `automatic` or
/// `bounded_backtracking` methods.
pub fn nfa(mut self) -> Self {
self.match_type = Some(MatchType::Nfa);
self
}
/// Compiles byte based programs for use with the NFA matching engines.
///
/// By default, the NFA engines match on Unicode scalar values. They can
/// be made to use byte based programs instead. In general, the byte based
/// programs are slower because of a less efficient encoding of character
/// classes.
///
/// Note that this does not impact DFA matching engines, which always
/// execute on bytes.
pub fn bytes(mut self, yes: bool) -> Self {
self.bytes = yes;
self
}
/// When disabled, the program compiled may match arbitrary bytes.
///
/// When enabled (the default), all compiled programs exclusively match
/// valid UTF-8 bytes.
pub fn only_utf8(mut self, yes: bool) -> Self {
self.only_utf8 = yes;
self
}
/// Set the Unicode flag.
pub fn unicode(mut self, yes: bool) -> Self {
self.options.unicode = yes;
self
}
/// Parse the current set of patterns into their AST and extract literals.
fn parse(&self) -> Result<Parsed, Error> {
let mut exprs = Vec::with_capacity(self.options.pats.len());
let mut prefixes = Some(Literals::empty());
let mut suffixes = Some(Literals::empty());
let mut bytes = false;
let is_set = self.options.pats.len() > 1;
// If we're compiling a regex set and that set has any anchored
// expressions, then disable all literal optimizations.
for pat in &self.options.pats {
let parser =
ExprBuilder::new()
.case_insensitive(self.options.case_insensitive)
.multi_line(self.options.multi_line)
.dot_matches_new_line(self.options.dot_matches_new_line)
.swap_greed(self.options.swap_greed)
.ignore_whitespace(self.options.ignore_whitespace)
.unicode(self.options.unicode)
.allow_bytes(!self.only_utf8);
let expr = try!(parser.parse(pat));
bytes = bytes || expr.has_bytes();
if !expr.is_anchored_start() && expr.has_anchored_start() {
// Partial anchors unfortunately make it hard to use prefixes,
// so disable them.
prefixes = None;
} else if is_set && expr.is_anchored_start() {
// Regex sets with anchors do not go well with literal
// optimizations.
prefixes = None;
}
prefixes = prefixes.and_then(|mut prefixes| {
if !prefixes.union_prefixes(&expr) {
None
} else {
Some(prefixes)
}
});
if !expr.is_anchored_end() && expr.has_anchored_end() {
// Partial anchors unfortunately make it hard to use suffixes,
// so disable them.
suffixes = None;
} else if is_set && expr.is_anchored_end() {
// Regex sets with anchors do not go well with literal
// optimizations.
prefixes = None;
}
suffixes = suffixes.and_then(|mut suffixes| {
if !suffixes.union_suffixes(&expr) {
None
} else {
Some(suffixes)
}
});
exprs.push(expr);
}
Ok(Parsed {
exprs: exprs,
bytes: bytes,
})
}
/// Build an executor that can run a regular expression.
pub fn build(self) -> Result<Exec, Error> {
// Special case when we have no patterns to compile.
// This can happen when compiling a regex set.
if self.options.pats.is_empty() {
let ro = Arc::new(ExecReadOnly {
res: vec![],
nfa: Program::new(),
match_type: MatchType::Nothing,
});
return Ok(Exec { ro: ro, cache: CachedThreadLocal::new() });
}
let parsed = try!(self.parse());
let nfa = try!(
Compiler::new()
.size_limit(self.options.size_limit)
.bytes(self.bytes || parsed.bytes)
.only_utf8(self.only_utf8)
.compile(&parsed.exprs));
let mut ro = ExecReadOnly {
res: self.options.pats,
nfa: nfa,
match_type: MatchType::Nothing,
};
ro.match_type = ro.choose_match_type(self.match_type);
let ro = Arc::new(ro);
Ok(Exec { ro: ro, cache: CachedThreadLocal::new() })
}
}
impl<'c> RegularExpression for ExecNoSyncStr<'c> {
type Text = str;
fn slots_len(&self) -> usize { self.0.slots_len() }
fn next_after_empty(&self, text: &str, i: usize) -> usize {
next_utf8(text.as_bytes(), i)
}
#[inline(always)] // reduces constant overhead
fn shortest_match_at(&self, text: &str, start: usize) -> Option<usize> |
#[inline(always)] // reduces constant overhead
fn is_match_at(&self, text: &str, start: usize) -> bool {
self.0.is_match_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn find_at(&self, text: &str, start: usize) -> Option<(usize, usize)> {
self.0.find_at(text.as_bytes(), start)
}
#[inline(always)] // reduces constant overhead
fn read_captures_at(
&self,
locs: &mut Locations,
text: &str,
start: usize,
) -> Option<(usize, usize)> {
self.0.read_captures_at(locs, text.as_bytes(), start)
}
}
impl<'c> RegularExpression for ExecNoSync<'c> {
type Text = [u8];
/// Returns the number of capture slots in the regular expression. (There
/// are two slots for every capture group, corresponding to possibly empty
/// start and end locations of the capture.)
fn slots_len(&self) -> usize {
self.ro.nfa.captures.len() * 2
}
fn next_after_empty(&self, _text: &[u8], i: usize) -> usize {
i + 1
}
/// Returns the end of a match location, possibly occurring before the
/// end location of the correct leftmost-first match.
#[inline(always)] // reduces constant overhead
fn shortest_match_at(&self, text: &[u8], start: usize) -> Option<usize> {
match self.ro.match_type {
MatchType::Nfa => self.shortest_nfa(text, start),
MatchType::Nothing => None,
}
}
/// Returns true if and only if the regex matches text.
///
/// For single regular expressions, this is equivalent to calling
/// shortest_match(...).is_some().
#[inline(always)] // reduces constant overhead
fn is_match_at(&self, text: &[u8], start: usize) -> bool {
// We need to do this dance because shortest_match relies on the NFA
// filling in captures[1], but a RegexSet has no captures. In other
// words, a RegexSet can't (currently) use shortest_match. ---AG
match self.ro.match_type {
MatchType::Nfa => self.match_nfa(text, start),
MatchType::Nothing => false,
}
}
/// Finds the start and end location of the leftmost-first match, starting
/// at the given location.
#[inline(always)] // reduces constant overhead
fn find_at(&self, text: &[u8], start: usize) -> Option<(usize, usize)> {
match self.ro.match_type {
MatchType::Nfa => self.find_nfa(text, start),
MatchType::Nothing => None,
}
}
/// Finds the start and end location of the leftmost-first match and also
/// fills in all matching capture groups.
///
/// The number of capture slots given should be equal to the total number
/// of capture slots in the compiled program.
///
/// Note that the first two slots always correspond to the start and end
/// locations of the overall match.
fn read_captures_at(
&self,
locs: &mut Locations,
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
let slots = as_slots(locs);
for slot in slots.iter_mut() {
*slot = None;
}
// If the caller unnecessarily uses this, then we try to save them
// from themselves.
match slots.len() {
0 => return self.find_at(text, start),
2 => {
return self.find_at(text, start).map(|(s, e)| {
slots[0] = Some(s);
slots[1] = Some(e);
(s, e)
});
}
_ => {} // fallthrough
}
match self.ro.match_type {
MatchType::Nfa => {
self.captures_nfa(slots, text, start)
}
MatchType::Nothing => None,
}
}
}
impl<'c> ExecNoSync<'c> {
/// Executes the NFA engine to return whether there is a match or not.
///
/// Ideally, we could use shortest_nfa(...).is_some() and get the same
/// performance characteristics, but regex sets don't have captures, which
/// shortest_nfa depends on.
fn match_nfa(
&self,
text: &[u8],
start: usize,
) -> bool {
self.exec_pikevm(&mut [false], &mut [], true, text, start)
}
/// Finds the shortest match using an NFA.
fn shortest_nfa(&self, text: &[u8], start: usize) -> Option<usize> {
let mut slots = [None, None];
if self.exec_pikevm(&mut [false], &mut slots, true, text, start) {
slots[1]
} else {
None
}
}
/// Like find, but executes an NFA engine.
fn find_nfa(
&self,
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
let mut slots = [None, None];
if self.exec_pikevm(&mut [false], &mut slots, false, text, start) {
match (slots[0], slots[1]) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}
} else {
None
}
}
/// Like find_nfa, but fills in captures.
///
/// `slots` should have length equal to `2 * nfa.captures.len()`.
fn captures_nfa(
&self,
slots: &mut [Slot],
text: &[u8],
start: usize,
) -> Option<(usize, usize)> {
if self.exec_pikevm(&mut [false], slots, false, text, start) {
match (slots[0], slots[1]) {
(Some(s), Some(e)) => Some((s, e)),
_ => None,
}
} else {
None
}
}
/// Always run the NFA algorithm.
fn exec_pikevm(
&self,
matches: &mut [bool],
slots: &mut [Slot],
quit_after_match: bool,
text: &[u8],
start: usize,
) -> bool {
use input::Input;
let cache = &mut self.cache.borrow_mut().pikevm;
if start == 0 {
cache.reset();
} else {
cache.prep_for_next_match();
}
if self.ro.nfa.uses_bytes() {
let input = ByteInput::new(text, self.ro.nfa.only_utf8);
let mut at = input.at(start);
let mut fsm = pikevm::Fsm::new(
&self.ro.nfa,
quit_after_match,
);
loop {
let stop = fsm.next(
cache,
matches,
slots,
at,
input.only_utf8(),
);
if stop || at.is_end() {
break;
}
at = input.at(at.next_pos());
}
} else {
let input = CharInput::new(text);
let mut at = input.at(start);
let mut fsm = pikevm::Fsm::new(
&self.ro.nfa,
quit_after_match,
);
loop {
let stop = fsm.next(
cache,
matches,
slots,
at,
input.only_utf8(),
);
if stop || at.is_end() {
break;
}
at = input.at(at.next_pos());
}
}
matches.iter().any(|b| *b)
}
/// Finds which regular expressions match the given text.
///
/// `matches` should have length equal to the number of regexes being
/// searched.
///
/// This is only useful when one wants to know which regexes in a set
/// match some text.
pub fn many_matches_at(
&self,
matches: &mut [bool],
text: &[u8],
start: usize,
) -> bool {
use self::MatchType::*;
match self.ro.match_type {
Nfa => self.exec_pikevm(matches, &mut [], false, text, start),
Nothing => false,
}
}
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
&self.ro.nfa.capture_name_idx
}
}
impl<'c> ExecNoSyncStr<'c> {
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
self.0.capture_name_idx()
}
}
impl Exec {
/// Get a searcher that isn't Sync.
#[inline(always)] // reduces constant overhead
pub fn searcher(&self) -> ExecNoSync {
let create = || Box::new(RefCell::new(ProgramCacheInner::new(&self.ro)));
ExecNoSync {
ro: &self.ro, // a clone is too expensive here! (and not needed)
cache: self.cache.get_or(create),
}
}
/// Get a searcher that isn't Sync and can match on &str.
#[inline(always)] // reduces constant overhead
pub fn searcher_str(&self) -> ExecNoSyncStr {
ExecNoSyncStr(self.searcher())
}
/// Build a Regex from this executor.
pub fn into_regex(self) -> re_unicode::Regex {
re_unicode::Regex::from(self)
}
/// Build a RegexSet from this executor.
pub fn into_regex_set(self) -> re_set::unicode::RegexSet {
re_set::unicode::RegexSet::from(self)
}
/// The original regular expressions given by the caller that were
/// compiled.
pub fn regex_strings(&self) -> &[String] {
&self.ro.res
}
/// Return a slice of capture names.
///
/// Any capture that isn't named is None.
pub fn capture_names(&self) -> &[Option<String>] {
&self.ro.nfa.captures
}
/// Return a reference to named groups mapping (from group name to
/// group position).
pub fn capture_name_idx(&self) -> &Arc<HashMap<String, usize>> {
&self.ro.nfa.capture_name_idx
}
}
impl Clone for Exec {
fn clone(&self) -> Exec {
Exec {
ro: self.ro.clone(),
cache: CachedThreadLocal::new(),
}
}
}
impl ExecReadOnly {
fn choose_match_type(&self, hint: Option<MatchType>) -> MatchType {
use self::MatchType::*;
if let Some(Nfa) = hint {
return hint.unwrap();
}
// If the NFA is empty, then we'll never match anything.
if self.nfa.insts.is_empty() {
return Nothing;
}
Nfa
}
}
#[derive(Clone, Copy, Debug)]
enum MatchType {
/// An NFA variant.
Nfa,
/// No match is ever possible, so don't ever try to search.
Nothing,
}
/// `ProgramCache` maintains reusable allocations for each matching engine
/// available to a particular program.
pub type ProgramCache = RefCell<ProgramCacheInner>;
#[derive(Clone, Debug)]
pub struct ProgramCacheInner {
pub pikevm: pikevm::Cache,
}
impl ProgramCacheInner {
fn new(ro: &ExecReadOnly) -> Self {
ProgramCacheInner {
pikevm: pikevm::Cache::new(&ro.nfa),
}
}
}
| {
self.0.shortest_match_at(text.as_bytes(), start)
} | identifier_body |
blob_store.rs | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Combines data chunks into larger blobs to be stored externally.
use std::sync::{Arc, Mutex};
use serialize::{json, Encodable, Decodable};
use serialize::hex::{ToHex};
use serialize::json::{Json, ToJson, Decoder, from_str};
use std::collections::treemap::{TreeMap};
use std::collections::lru_cache::{LruCache};
use std::io::{File};
use std::str;
use process::{Process, MsgHandler};
use blob_index;
use blob_index::{BlobIndexProcess};
#[cfg(test)]
use blob_index::{BlobIndex};
pub type BlobStoreProcess<B> = Process<Msg, Reply, BlobStore<B>>;
pub trait BlobStoreBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String>;
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String>;
}
#[deriving(Clone)]
pub struct FileBackend {
root: Path,
read_cache: Arc<Mutex<LruCache<Vec<u8>, Result<Vec<u8>, String>>>>,
}
impl FileBackend {
pub fn new(root: Path) -> FileBackend {
FileBackend{root: root, read_cache: Arc::new(Mutex::new(LruCache::new(10)))}
}
fn guarded_cache_get(&self, name: &Vec<u8>) -> Option<Result<Vec<u8>, String>> {
self.read_cache.lock().get(name).map(|v| v.clone())
}
fn guarded_cache_put(&mut self, name: Vec<u8>, result: Result<Vec<u8>, String>) {
self.read_cache.lock().put(name, result);
}
}
impl BlobStoreBackend for FileBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> {
let mut path = self.root.clone();
path.push(name.to_hex());
let mut file = match File::create(&path) {
Err(e) => return Err(e.to_string()),
Ok(f) => f,
};
match file.write(data) {
Err(e) => Err(e.to_string()),
Ok(()) => Ok(()),
}
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
// Check for key in cache:
let name = name.into_vec();
let value_opt = self.guarded_cache_get(&name);
match value_opt {
Some(result) => return result,
None => (),
}
// Read key:
let path = { let mut p = self.root.clone();
p.push(name.as_slice().to_hex());
p };
let mut fd = File::open(&path).unwrap();
let res = fd.read_to_end().and_then(|data| {
Ok(data.into_vec()) }).or_else(|e| Err(e.to_string()));
// Update cache to contain key:
self.guarded_cache_put(name, res.clone());
return res;
}
}
#[deriving(Show, Clone, Eq, PartialEq, Encodable, Decodable)]
pub struct BlobID {
name: Vec<u8>,
begin: uint,
end: uint,
}
impl BlobID {
pub fn from_bytes(bytes: Vec<u8>) -> BlobID {
let mut decoder = Decoder::new(from_str(
str::from_utf8(bytes.as_slice()).unwrap()).unwrap());
Decodable::decode(&mut decoder).unwrap()
}
pub fn as_bytes(&self) -> Vec<u8> {
self.to_json().to_string().as_bytes().into_vec()
}
}
impl ToJson for BlobID {
fn to_json(&self) -> Json {
let mut m = TreeMap::new();
m.insert("name".to_string(), self.name.to_json());
m.insert("begin".to_string(), self.begin.to_json());
m.insert("end".to_string(), self.end.to_json());
json::Object(m).to_json()
}
}
pub enum Msg {
/// Store a new data chunk into the current blob. The callback is triggered after the blob
/// containing the chunk has been committed to persistent storage (it is then safe to use the
/// `BlobID` as persistent reference).
Store(Vec<u8>, proc(BlobID):Send -> ()),
/// Retrieve the data chunk identified by `BlobID`.
Retrieve(BlobID),
/// Flush the current blob, independent of its size.
Flush,
}
#[deriving(Eq, PartialEq, Show)]
pub enum Reply {
StoreOK(BlobID),
RetrieveOK(Vec<u8>),
FlushOK,
}
pub struct BlobStore<B> {
backend: B,
blob_index: BlobIndexProcess,
blob_desc: blob_index::BlobDesc,
buffer_data: Vec<(BlobID, Vec<u8>, proc(BlobID):Send -> ())>,
buffer_data_len: uint,
max_blob_size: uint,
}
fn empty_blob_desc() -> blob_index::BlobDesc {
blob_index::BlobDesc{name: b"".into_vec(), id: 0}
}
impl <B: BlobStoreBackend> BlobStore<B> {
pub fn new(index: BlobIndexProcess, backend: B,
max_blob_size: uint) -> BlobStore<B> {
let mut bs = BlobStore{
backend: backend,
blob_index: index,
blob_desc: empty_blob_desc(),
buffer_data: Vec::new(),
buffer_data_len: 0,
max_blob_size: max_blob_size,
};
bs.reserve_new_blob();
bs
}
#[cfg(test)]
pub fn new_for_testing(backend: B, max_blob_size: uint) -> BlobStore<B> {
let biP = Process::new(proc() { BlobIndex::new_for_testing() });
let mut bs = BlobStore{backend: backend,
blob_index: biP,
blob_desc: empty_blob_desc(),
buffer_data: Vec::new(),
buffer_data_len: 0,
max_blob_size: max_blob_size,
};
bs.reserve_new_blob();
bs
}
fn reserve_new_blob(&mut self) -> blob_index::BlobDesc {
let old_blob_desc = self.blob_desc.clone();
let res = self.blob_index.send_reply(blob_index::Reserve);
match res {
blob_index::Reserved(blob_desc) => {
self.blob_desc = blob_desc;
},
_ => fail!("Could not reserve blob."),
}
old_blob_desc
}
fn backend_store(&mut self, name: &[u8], blob: &[u8]) {
match self.backend.store(name, blob) {
Ok(()) => (),
Err(s) => fail!(s),
}
}
fn backend_read(&mut self, name: &[u8]) -> Vec<u8> {
match self.backend.retrieve(name) {
Ok(data) => data,
Err(s) => fail!(s),
}
}
fn flush(&mut self) {
if self.buffer_data_len == 0 { return }
// Replace blob id
let old_blob_desc = self.reserve_new_blob();
self.buffer_data_len = 0;
// Prepare blob
let mut ready_callback = Vec::new();
let mut blob = Vec::new();
loop {
match self.buffer_data.shift() {
Some((chunk_ref, chunk, cb)) => {
ready_callback.push((chunk_ref, cb));
blob.push_all(chunk.as_slice());
},
None => break,
}
}
self.blob_index.send_reply(blob_index::InAir(old_blob_desc.clone()));
self.backend_store(old_blob_desc.name.as_slice(), blob.as_slice());
self.blob_index.send_reply(blob_index::CommitDone(old_blob_desc));
// Go through callbacks
for (blobid, cb) in ready_callback.move_iter() {
cb(blobid);
}
}
fn maybe_flush(&mut self) {
if self.buffer_data_len >= self.max_blob_size {
self.flush();
}
}
}
impl <B: BlobStoreBackend> MsgHandler<Msg, Reply> for BlobStore<B> {
fn handle(&mut self, msg: Msg, reply: |Reply|) {
match msg {
Store(blob, cb) => {
if blob.len() == 0 {
let id = BlobID{name: vec!(0), begin: 0, end: 0};
let cb_id = id.clone();
spawn(proc(){ cb(cb_id) });
return reply(StoreOK(id));
}
let new_size = self.buffer_data_len + blob.len();
let id = BlobID{name: self.blob_desc.name.clone(),
begin: self.buffer_data_len,
end: new_size};
self.buffer_data_len = new_size;
self.buffer_data.push((id.clone(), blob.into_vec(), cb));
// To avoid unnecessary blocking, we reply with the ID *before* possibly flushing.
reply(StoreOK(id));
// Flushing can be expensive, so try not block on it.
self.maybe_flush();
},
Retrieve(id) => {
if id.begin == 0 && id.end == 0 {
return reply(RetrieveOK(vec![].into_vec()));
}
let blob = self.backend_read(id.name.as_slice());
let chunk = blob.slice(id.begin, id.end);
return reply(RetrieveOK(chunk.into_vec()));
},
Flush => {
self.flush();
return reply(FlushOK)
},
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use std::rand::{task_rng};
use quickcheck::{Config, Testable, gen};
use quickcheck::{quickcheck_config};
use process::{Process};
use std::sync::{Arc, Mutex};
use std::collections::treemap::{TreeMap};
#[deriving(Clone)]
pub struct MemoryBackend {
files: Arc<Mutex<TreeMap<Vec<u8>, Vec<u8>>>>
}
impl MemoryBackend {
pub fn new() -> MemoryBackend {
MemoryBackend{files: Arc::new(Mutex::new(TreeMap::new()))}
}
fn guarded_insert(&mut self, key: Vec<u8>, value: Vec<u8>) -> Result<(), String>{
let mut guarded_files = self.files.lock();
if guarded_files.contains_key(&key) {
return Err(format!("Key already exists: '{}'", key));
}
guarded_files.insert(key, value);
Ok(())
}
fn guarded_retrieve(&mut self, key: &[u8]) -> Result<Vec<u8>, String> {
let value_opt = self.files.lock().find(&key.into_vec()).map(|v| v.clone());
value_opt.map(|v| Ok(v)).unwrap_or_else(|| Err(format!("Unknown key: '{}'", key)))
}
}
impl BlobStoreBackend for MemoryBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> {
self.guarded_insert(name.to_owned(), data.into_vec())
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
self.guarded_retrieve(name)
}
}
#[deriving(Clone)]
pub struct DevNullBackend;
impl BlobStoreBackend for DevNullBackend {
fn store(&mut self, _name: &[u8], _data: &[u8]) -> Result<(), String> {
Ok(())
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
Err(format!("Unknown key: '{}'", name))
}
}
// QuickCheck configuration
static SIZE: uint = 100;
static CONFIG: Config = Config {
tests: 200,
max_tests: 1000,
};
// QuickCheck helpers:
fn qcheck<A: Testable>(f: A) {
quickcheck_config(CONFIG, &mut gen(task_rng(), SIZE), f)
}
#[test]
fn identity() {
fn prop(chunks: Vec<Vec<u8>>) -> bool {
let mut backend = MemoryBackend::new();
let local_backend = backend.clone();
let bsP : BlobStoreProcess<MemoryBackend> =
Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) });
let mut ids = Vec::new();
for chunk in chunks.iter() {
match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) {
StoreOK(id) => { ids.push((id, chunk)); },
_ => fail!("Unexpected reply from blob store."),
}
}
assert_eq!(bsP.send_reply(Flush), FlushOK);
// Non-empty chunks must be in the backend now:
for &(ref id, chunk) in ids.iter() {
if chunk.len() > 0 {
match backend.retrieve(id.name.as_slice()) {
Ok(_) => (),
Err(e) => fail!(e),
}
}
}
// All chunks must be available through the blob store:
for &(ref id, chunk) in ids.iter() {
match bsP.send_reply(Retrieve(id.clone())) {
RetrieveOK(found_chunk) => assert_eq!(found_chunk,
chunk.as_slice().into_vec()),
_ => fail!("Unexpected reply from blob store."),
}
}
return true;
}
qcheck(prop);
}
#[test]
fn identity_with_excessive_flushing() |
#[test]
fn blobid_identity() {
fn prop(name: Vec<u8>, begin: uint, end: uint) -> bool {
let blob_id = BlobID{name: name.into_vec(),
begin: begin, end: end};
BlobID::from_bytes(blob_id.as_bytes()) == blob_id
}
qcheck(prop);
}
}
| {
fn prop(chunks: Vec<Vec<u8>>) -> bool {
let mut backend = MemoryBackend::new();
let local_backend = backend.clone();
let bsP: BlobStoreProcess<MemoryBackend> = Process::new(proc() {
BlobStore::new_for_testing(local_backend, 1024) });
let mut ids = Vec::new();
for chunk in chunks.iter() {
match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) {
StoreOK(id) => { ids.push((id, chunk)); },
_ => fail!("Unexpected reply from blob store."),
}
assert_eq!(bsP.send_reply(Flush), FlushOK);
let &(ref id, chunk) = ids.last().unwrap();
assert_eq!(bsP.send_reply(Retrieve(id.clone())), RetrieveOK(chunk.clone()));
}
// Non-empty chunks must be in the backend now:
for &(ref id, chunk) in ids.iter() {
if chunk.len() > 0 {
match backend.retrieve(id.name.as_slice()) {
Ok(_) => (),
Err(e) => fail!(e),
}
}
}
// All chunks must be available through the blob store:
for &(ref id, chunk) in ids.iter() {
match bsP.send_reply(Retrieve(id.clone())) {
RetrieveOK(found_chunk) => assert_eq!(found_chunk,
chunk.as_slice().into_vec()),
_ => fail!("Unexpected reply from blob store."),
}
}
return true;
}
qcheck(prop);
} | identifier_body |
blob_store.rs | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Combines data chunks into larger blobs to be stored externally.
use std::sync::{Arc, Mutex};
use serialize::{json, Encodable, Decodable};
use serialize::hex::{ToHex};
use serialize::json::{Json, ToJson, Decoder, from_str};
use std::collections::treemap::{TreeMap};
use std::collections::lru_cache::{LruCache};
use std::io::{File};
use std::str;
use process::{Process, MsgHandler};
use blob_index;
use blob_index::{BlobIndexProcess};
#[cfg(test)]
use blob_index::{BlobIndex};
pub type BlobStoreProcess<B> = Process<Msg, Reply, BlobStore<B>>;
pub trait BlobStoreBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String>;
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String>;
}
#[deriving(Clone)]
pub struct FileBackend {
root: Path,
read_cache: Arc<Mutex<LruCache<Vec<u8>, Result<Vec<u8>, String>>>>,
}
impl FileBackend {
pub fn new(root: Path) -> FileBackend {
FileBackend{root: root, read_cache: Arc::new(Mutex::new(LruCache::new(10)))}
}
fn guarded_cache_get(&self, name: &Vec<u8>) -> Option<Result<Vec<u8>, String>> {
self.read_cache.lock().get(name).map(|v| v.clone())
}
fn guarded_cache_put(&mut self, name: Vec<u8>, result: Result<Vec<u8>, String>) {
self.read_cache.lock().put(name, result);
}
}
impl BlobStoreBackend for FileBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> {
let mut path = self.root.clone();
path.push(name.to_hex());
let mut file = match File::create(&path) {
Err(e) => return Err(e.to_string()),
Ok(f) => f,
};
match file.write(data) {
Err(e) => Err(e.to_string()),
Ok(()) => Ok(()),
}
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
// Check for key in cache:
let name = name.into_vec();
let value_opt = self.guarded_cache_get(&name);
match value_opt {
Some(result) => return result,
None => (),
}
// Read key:
let path = { let mut p = self.root.clone();
p.push(name.as_slice().to_hex());
p };
let mut fd = File::open(&path).unwrap();
let res = fd.read_to_end().and_then(|data| {
Ok(data.into_vec()) }).or_else(|e| Err(e.to_string()));
// Update cache to contain key:
self.guarded_cache_put(name, res.clone());
return res;
}
}
#[deriving(Show, Clone, Eq, PartialEq, Encodable, Decodable)]
pub struct BlobID {
name: Vec<u8>,
begin: uint,
end: uint,
}
impl BlobID {
pub fn from_bytes(bytes: Vec<u8>) -> BlobID {
let mut decoder = Decoder::new(from_str(
str::from_utf8(bytes.as_slice()).unwrap()).unwrap());
Decodable::decode(&mut decoder).unwrap()
}
pub fn as_bytes(&self) -> Vec<u8> {
self.to_json().to_string().as_bytes().into_vec()
}
}
impl ToJson for BlobID {
fn to_json(&self) -> Json {
let mut m = TreeMap::new();
m.insert("name".to_string(), self.name.to_json());
m.insert("begin".to_string(), self.begin.to_json());
m.insert("end".to_string(), self.end.to_json());
json::Object(m).to_json()
}
}
pub enum Msg {
/// Store a new data chunk into the current blob. The callback is triggered after the blob
/// containing the chunk has been committed to persistent storage (it is then safe to use the
/// `BlobID` as persistent reference).
Store(Vec<u8>, proc(BlobID):Send -> ()),
/// Retrieve the data chunk identified by `BlobID`.
Retrieve(BlobID),
/// Flush the current blob, independent of its size.
Flush,
}
#[deriving(Eq, PartialEq, Show)]
pub enum Reply {
StoreOK(BlobID),
RetrieveOK(Vec<u8>),
FlushOK,
}
pub struct BlobStore<B> {
backend: B,
blob_index: BlobIndexProcess,
blob_desc: blob_index::BlobDesc,
buffer_data: Vec<(BlobID, Vec<u8>, proc(BlobID):Send -> ())>,
buffer_data_len: uint,
max_blob_size: uint,
}
fn | () -> blob_index::BlobDesc {
blob_index::BlobDesc{name: b"".into_vec(), id: 0}
}
impl <B: BlobStoreBackend> BlobStore<B> {
pub fn new(index: BlobIndexProcess, backend: B,
max_blob_size: uint) -> BlobStore<B> {
let mut bs = BlobStore{
backend: backend,
blob_index: index,
blob_desc: empty_blob_desc(),
buffer_data: Vec::new(),
buffer_data_len: 0,
max_blob_size: max_blob_size,
};
bs.reserve_new_blob();
bs
}
#[cfg(test)]
pub fn new_for_testing(backend: B, max_blob_size: uint) -> BlobStore<B> {
let biP = Process::new(proc() { BlobIndex::new_for_testing() });
let mut bs = BlobStore{backend: backend,
blob_index: biP,
blob_desc: empty_blob_desc(),
buffer_data: Vec::new(),
buffer_data_len: 0,
max_blob_size: max_blob_size,
};
bs.reserve_new_blob();
bs
}
fn reserve_new_blob(&mut self) -> blob_index::BlobDesc {
let old_blob_desc = self.blob_desc.clone();
let res = self.blob_index.send_reply(blob_index::Reserve);
match res {
blob_index::Reserved(blob_desc) => {
self.blob_desc = blob_desc;
},
_ => fail!("Could not reserve blob."),
}
old_blob_desc
}
fn backend_store(&mut self, name: &[u8], blob: &[u8]) {
match self.backend.store(name, blob) {
Ok(()) => (),
Err(s) => fail!(s),
}
}
fn backend_read(&mut self, name: &[u8]) -> Vec<u8> {
match self.backend.retrieve(name) {
Ok(data) => data,
Err(s) => fail!(s),
}
}
fn flush(&mut self) {
if self.buffer_data_len == 0 { return }
// Replace blob id
let old_blob_desc = self.reserve_new_blob();
self.buffer_data_len = 0;
// Prepare blob
let mut ready_callback = Vec::new();
let mut blob = Vec::new();
loop {
match self.buffer_data.shift() {
Some((chunk_ref, chunk, cb)) => {
ready_callback.push((chunk_ref, cb));
blob.push_all(chunk.as_slice());
},
None => break,
}
}
self.blob_index.send_reply(blob_index::InAir(old_blob_desc.clone()));
self.backend_store(old_blob_desc.name.as_slice(), blob.as_slice());
self.blob_index.send_reply(blob_index::CommitDone(old_blob_desc));
// Go through callbacks
for (blobid, cb) in ready_callback.move_iter() {
cb(blobid);
}
}
fn maybe_flush(&mut self) {
if self.buffer_data_len >= self.max_blob_size {
self.flush();
}
}
}
impl <B: BlobStoreBackend> MsgHandler<Msg, Reply> for BlobStore<B> {
fn handle(&mut self, msg: Msg, reply: |Reply|) {
match msg {
Store(blob, cb) => {
if blob.len() == 0 {
let id = BlobID{name: vec!(0), begin: 0, end: 0};
let cb_id = id.clone();
spawn(proc(){ cb(cb_id) });
return reply(StoreOK(id));
}
let new_size = self.buffer_data_len + blob.len();
let id = BlobID{name: self.blob_desc.name.clone(),
begin: self.buffer_data_len,
end: new_size};
self.buffer_data_len = new_size;
self.buffer_data.push((id.clone(), blob.into_vec(), cb));
// To avoid unnecessary blocking, we reply with the ID *before* possibly flushing.
reply(StoreOK(id));
// Flushing can be expensive, so try not block on it.
self.maybe_flush();
},
Retrieve(id) => {
if id.begin == 0 && id.end == 0 {
return reply(RetrieveOK(vec![].into_vec()));
}
let blob = self.backend_read(id.name.as_slice());
let chunk = blob.slice(id.begin, id.end);
return reply(RetrieveOK(chunk.into_vec()));
},
Flush => {
self.flush();
return reply(FlushOK)
},
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use std::rand::{task_rng};
use quickcheck::{Config, Testable, gen};
use quickcheck::{quickcheck_config};
use process::{Process};
use std::sync::{Arc, Mutex};
use std::collections::treemap::{TreeMap};
#[deriving(Clone)]
pub struct MemoryBackend {
files: Arc<Mutex<TreeMap<Vec<u8>, Vec<u8>>>>
}
impl MemoryBackend {
pub fn new() -> MemoryBackend {
MemoryBackend{files: Arc::new(Mutex::new(TreeMap::new()))}
}
fn guarded_insert(&mut self, key: Vec<u8>, value: Vec<u8>) -> Result<(), String>{
let mut guarded_files = self.files.lock();
if guarded_files.contains_key(&key) {
return Err(format!("Key already exists: '{}'", key));
}
guarded_files.insert(key, value);
Ok(())
}
fn guarded_retrieve(&mut self, key: &[u8]) -> Result<Vec<u8>, String> {
let value_opt = self.files.lock().find(&key.into_vec()).map(|v| v.clone());
value_opt.map(|v| Ok(v)).unwrap_or_else(|| Err(format!("Unknown key: '{}'", key)))
}
}
impl BlobStoreBackend for MemoryBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> {
self.guarded_insert(name.to_owned(), data.into_vec())
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
self.guarded_retrieve(name)
}
}
#[deriving(Clone)]
pub struct DevNullBackend;
impl BlobStoreBackend for DevNullBackend {
fn store(&mut self, _name: &[u8], _data: &[u8]) -> Result<(), String> {
Ok(())
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
Err(format!("Unknown key: '{}'", name))
}
}
// QuickCheck configuration
static SIZE: uint = 100;
static CONFIG: Config = Config {
tests: 200,
max_tests: 1000,
};
// QuickCheck helpers:
fn qcheck<A: Testable>(f: A) {
quickcheck_config(CONFIG, &mut gen(task_rng(), SIZE), f)
}
#[test]
fn identity() {
fn prop(chunks: Vec<Vec<u8>>) -> bool {
let mut backend = MemoryBackend::new();
let local_backend = backend.clone();
let bsP : BlobStoreProcess<MemoryBackend> =
Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) });
let mut ids = Vec::new();
for chunk in chunks.iter() {
match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) {
StoreOK(id) => { ids.push((id, chunk)); },
_ => fail!("Unexpected reply from blob store."),
}
}
assert_eq!(bsP.send_reply(Flush), FlushOK);
// Non-empty chunks must be in the backend now:
for &(ref id, chunk) in ids.iter() {
if chunk.len() > 0 {
match backend.retrieve(id.name.as_slice()) {
Ok(_) => (),
Err(e) => fail!(e),
}
}
}
// All chunks must be available through the blob store:
for &(ref id, chunk) in ids.iter() {
match bsP.send_reply(Retrieve(id.clone())) {
RetrieveOK(found_chunk) => assert_eq!(found_chunk,
chunk.as_slice().into_vec()),
_ => fail!("Unexpected reply from blob store."),
}
}
return true;
}
qcheck(prop);
}
#[test]
fn identity_with_excessive_flushing() {
fn prop(chunks: Vec<Vec<u8>>) -> bool {
let mut backend = MemoryBackend::new();
let local_backend = backend.clone();
let bsP: BlobStoreProcess<MemoryBackend> = Process::new(proc() {
BlobStore::new_for_testing(local_backend, 1024) });
let mut ids = Vec::new();
for chunk in chunks.iter() {
match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) {
StoreOK(id) => { ids.push((id, chunk)); },
_ => fail!("Unexpected reply from blob store."),
}
assert_eq!(bsP.send_reply(Flush), FlushOK);
let &(ref id, chunk) = ids.last().unwrap();
assert_eq!(bsP.send_reply(Retrieve(id.clone())), RetrieveOK(chunk.clone()));
}
// Non-empty chunks must be in the backend now:
for &(ref id, chunk) in ids.iter() {
if chunk.len() > 0 {
match backend.retrieve(id.name.as_slice()) {
Ok(_) => (),
Err(e) => fail!(e),
}
}
}
// All chunks must be available through the blob store:
for &(ref id, chunk) in ids.iter() {
match bsP.send_reply(Retrieve(id.clone())) {
RetrieveOK(found_chunk) => assert_eq!(found_chunk,
chunk.as_slice().into_vec()),
_ => fail!("Unexpected reply from blob store."),
}
}
return true;
}
qcheck(prop);
}
#[test]
fn blobid_identity() {
fn prop(name: Vec<u8>, begin: uint, end: uint) -> bool {
let blob_id = BlobID{name: name.into_vec(),
begin: begin, end: end};
BlobID::from_bytes(blob_id.as_bytes()) == blob_id
}
qcheck(prop);
}
}
| empty_blob_desc | identifier_name |
blob_store.rs | // Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Combines data chunks into larger blobs to be stored externally.
use std::sync::{Arc, Mutex};
use serialize::{json, Encodable, Decodable};
use serialize::hex::{ToHex};
use serialize::json::{Json, ToJson, Decoder, from_str};
use std::collections::treemap::{TreeMap};
use std::collections::lru_cache::{LruCache};
use std::io::{File};
use std::str;
use process::{Process, MsgHandler};
use blob_index;
use blob_index::{BlobIndexProcess};
#[cfg(test)]
use blob_index::{BlobIndex};
pub type BlobStoreProcess<B> = Process<Msg, Reply, BlobStore<B>>;
pub trait BlobStoreBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String>;
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String>;
}
#[deriving(Clone)]
pub struct FileBackend {
root: Path,
read_cache: Arc<Mutex<LruCache<Vec<u8>, Result<Vec<u8>, String>>>>,
}
impl FileBackend {
pub fn new(root: Path) -> FileBackend {
FileBackend{root: root, read_cache: Arc::new(Mutex::new(LruCache::new(10)))}
}
fn guarded_cache_get(&self, name: &Vec<u8>) -> Option<Result<Vec<u8>, String>> {
self.read_cache.lock().get(name).map(|v| v.clone())
}
fn guarded_cache_put(&mut self, name: Vec<u8>, result: Result<Vec<u8>, String>) {
self.read_cache.lock().put(name, result);
}
}
impl BlobStoreBackend for FileBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> {
let mut path = self.root.clone();
path.push(name.to_hex());
let mut file = match File::create(&path) {
Err(e) => return Err(e.to_string()),
Ok(f) => f,
};
match file.write(data) {
Err(e) => Err(e.to_string()),
Ok(()) => Ok(()),
}
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
// Check for key in cache:
let name = name.into_vec();
let value_opt = self.guarded_cache_get(&name);
match value_opt {
Some(result) => return result,
None => (),
}
// Read key:
let path = { let mut p = self.root.clone();
p.push(name.as_slice().to_hex());
p };
let mut fd = File::open(&path).unwrap();
let res = fd.read_to_end().and_then(|data| {
Ok(data.into_vec()) }).or_else(|e| Err(e.to_string()));
// Update cache to contain key:
self.guarded_cache_put(name, res.clone());
return res;
}
}
#[deriving(Show, Clone, Eq, PartialEq, Encodable, Decodable)]
pub struct BlobID {
name: Vec<u8>,
begin: uint,
end: uint,
}
impl BlobID {
pub fn from_bytes(bytes: Vec<u8>) -> BlobID {
let mut decoder = Decoder::new(from_str(
str::from_utf8(bytes.as_slice()).unwrap()).unwrap());
Decodable::decode(&mut decoder).unwrap()
}
pub fn as_bytes(&self) -> Vec<u8> {
self.to_json().to_string().as_bytes().into_vec()
}
}
impl ToJson for BlobID {
fn to_json(&self) -> Json {
let mut m = TreeMap::new();
m.insert("name".to_string(), self.name.to_json());
m.insert("begin".to_string(), self.begin.to_json());
m.insert("end".to_string(), self.end.to_json());
json::Object(m).to_json()
}
}
pub enum Msg {
/// Store a new data chunk into the current blob. The callback is triggered after the blob
/// containing the chunk has been committed to persistent storage (it is then safe to use the
/// `BlobID` as persistent reference).
Store(Vec<u8>, proc(BlobID):Send -> ()),
/// Retrieve the data chunk identified by `BlobID`.
Retrieve(BlobID),
/// Flush the current blob, independent of its size.
Flush,
}
#[deriving(Eq, PartialEq, Show)]
pub enum Reply {
StoreOK(BlobID),
RetrieveOK(Vec<u8>),
FlushOK,
}
pub struct BlobStore<B> {
backend: B,
blob_index: BlobIndexProcess,
blob_desc: blob_index::BlobDesc,
buffer_data: Vec<(BlobID, Vec<u8>, proc(BlobID):Send -> ())>,
buffer_data_len: uint,
max_blob_size: uint,
}
fn empty_blob_desc() -> blob_index::BlobDesc {
blob_index::BlobDesc{name: b"".into_vec(), id: 0}
}
impl <B: BlobStoreBackend> BlobStore<B> {
pub fn new(index: BlobIndexProcess, backend: B,
max_blob_size: uint) -> BlobStore<B> {
let mut bs = BlobStore{
backend: backend,
blob_index: index,
blob_desc: empty_blob_desc(),
buffer_data: Vec::new(),
buffer_data_len: 0,
max_blob_size: max_blob_size,
};
bs.reserve_new_blob();
bs
}
#[cfg(test)]
pub fn new_for_testing(backend: B, max_blob_size: uint) -> BlobStore<B> {
let biP = Process::new(proc() { BlobIndex::new_for_testing() });
let mut bs = BlobStore{backend: backend,
blob_index: biP,
blob_desc: empty_blob_desc(),
buffer_data: Vec::new(),
buffer_data_len: 0,
max_blob_size: max_blob_size,
};
bs.reserve_new_blob();
bs
}
fn reserve_new_blob(&mut self) -> blob_index::BlobDesc {
let old_blob_desc = self.blob_desc.clone();
let res = self.blob_index.send_reply(blob_index::Reserve);
match res {
blob_index::Reserved(blob_desc) => {
self.blob_desc = blob_desc;
},
_ => fail!("Could not reserve blob."),
}
old_blob_desc
}
fn backend_store(&mut self, name: &[u8], blob: &[u8]) {
match self.backend.store(name, blob) {
Ok(()) => (),
Err(s) => fail!(s),
}
}
fn backend_read(&mut self, name: &[u8]) -> Vec<u8> {
match self.backend.retrieve(name) {
Ok(data) => data,
Err(s) => fail!(s),
}
}
fn flush(&mut self) {
if self.buffer_data_len == 0 { return }
// Replace blob id
let old_blob_desc = self.reserve_new_blob();
self.buffer_data_len = 0;
// Prepare blob
let mut ready_callback = Vec::new();
let mut blob = Vec::new();
loop {
match self.buffer_data.shift() {
Some((chunk_ref, chunk, cb)) => {
ready_callback.push((chunk_ref, cb)); | },
None => break,
}
}
self.blob_index.send_reply(blob_index::InAir(old_blob_desc.clone()));
self.backend_store(old_blob_desc.name.as_slice(), blob.as_slice());
self.blob_index.send_reply(blob_index::CommitDone(old_blob_desc));
// Go through callbacks
for (blobid, cb) in ready_callback.move_iter() {
cb(blobid);
}
}
fn maybe_flush(&mut self) {
if self.buffer_data_len >= self.max_blob_size {
self.flush();
}
}
}
impl <B: BlobStoreBackend> MsgHandler<Msg, Reply> for BlobStore<B> {
fn handle(&mut self, msg: Msg, reply: |Reply|) {
match msg {
Store(blob, cb) => {
if blob.len() == 0 {
let id = BlobID{name: vec!(0), begin: 0, end: 0};
let cb_id = id.clone();
spawn(proc(){ cb(cb_id) });
return reply(StoreOK(id));
}
let new_size = self.buffer_data_len + blob.len();
let id = BlobID{name: self.blob_desc.name.clone(),
begin: self.buffer_data_len,
end: new_size};
self.buffer_data_len = new_size;
self.buffer_data.push((id.clone(), blob.into_vec(), cb));
// To avoid unnecessary blocking, we reply with the ID *before* possibly flushing.
reply(StoreOK(id));
// Flushing can be expensive, so try not block on it.
self.maybe_flush();
},
Retrieve(id) => {
if id.begin == 0 && id.end == 0 {
return reply(RetrieveOK(vec![].into_vec()));
}
let blob = self.backend_read(id.name.as_slice());
let chunk = blob.slice(id.begin, id.end);
return reply(RetrieveOK(chunk.into_vec()));
},
Flush => {
self.flush();
return reply(FlushOK)
},
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use std::rand::{task_rng};
use quickcheck::{Config, Testable, gen};
use quickcheck::{quickcheck_config};
use process::{Process};
use std::sync::{Arc, Mutex};
use std::collections::treemap::{TreeMap};
#[deriving(Clone)]
pub struct MemoryBackend {
files: Arc<Mutex<TreeMap<Vec<u8>, Vec<u8>>>>
}
impl MemoryBackend {
pub fn new() -> MemoryBackend {
MemoryBackend{files: Arc::new(Mutex::new(TreeMap::new()))}
}
fn guarded_insert(&mut self, key: Vec<u8>, value: Vec<u8>) -> Result<(), String>{
let mut guarded_files = self.files.lock();
if guarded_files.contains_key(&key) {
return Err(format!("Key already exists: '{}'", key));
}
guarded_files.insert(key, value);
Ok(())
}
fn guarded_retrieve(&mut self, key: &[u8]) -> Result<Vec<u8>, String> {
let value_opt = self.files.lock().find(&key.into_vec()).map(|v| v.clone());
value_opt.map(|v| Ok(v)).unwrap_or_else(|| Err(format!("Unknown key: '{}'", key)))
}
}
impl BlobStoreBackend for MemoryBackend {
fn store(&mut self, name: &[u8], data: &[u8]) -> Result<(), String> {
self.guarded_insert(name.to_owned(), data.into_vec())
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
self.guarded_retrieve(name)
}
}
#[deriving(Clone)]
pub struct DevNullBackend;
impl BlobStoreBackend for DevNullBackend {
fn store(&mut self, _name: &[u8], _data: &[u8]) -> Result<(), String> {
Ok(())
}
fn retrieve(&mut self, name: &[u8]) -> Result<Vec<u8>, String> {
Err(format!("Unknown key: '{}'", name))
}
}
// QuickCheck configuration
static SIZE: uint = 100;
static CONFIG: Config = Config {
tests: 200,
max_tests: 1000,
};
// QuickCheck helpers:
fn qcheck<A: Testable>(f: A) {
quickcheck_config(CONFIG, &mut gen(task_rng(), SIZE), f)
}
#[test]
fn identity() {
fn prop(chunks: Vec<Vec<u8>>) -> bool {
let mut backend = MemoryBackend::new();
let local_backend = backend.clone();
let bsP : BlobStoreProcess<MemoryBackend> =
Process::new(proc() { BlobStore::new_for_testing(local_backend, 1024) });
let mut ids = Vec::new();
for chunk in chunks.iter() {
match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) {
StoreOK(id) => { ids.push((id, chunk)); },
_ => fail!("Unexpected reply from blob store."),
}
}
assert_eq!(bsP.send_reply(Flush), FlushOK);
// Non-empty chunks must be in the backend now:
for &(ref id, chunk) in ids.iter() {
if chunk.len() > 0 {
match backend.retrieve(id.name.as_slice()) {
Ok(_) => (),
Err(e) => fail!(e),
}
}
}
// All chunks must be available through the blob store:
for &(ref id, chunk) in ids.iter() {
match bsP.send_reply(Retrieve(id.clone())) {
RetrieveOK(found_chunk) => assert_eq!(found_chunk,
chunk.as_slice().into_vec()),
_ => fail!("Unexpected reply from blob store."),
}
}
return true;
}
qcheck(prop);
}
#[test]
fn identity_with_excessive_flushing() {
fn prop(chunks: Vec<Vec<u8>>) -> bool {
let mut backend = MemoryBackend::new();
let local_backend = backend.clone();
let bsP: BlobStoreProcess<MemoryBackend> = Process::new(proc() {
BlobStore::new_for_testing(local_backend, 1024) });
let mut ids = Vec::new();
for chunk in chunks.iter() {
match bsP.send_reply(Store(chunk.as_slice().into_vec(), proc(_){})) {
StoreOK(id) => { ids.push((id, chunk)); },
_ => fail!("Unexpected reply from blob store."),
}
assert_eq!(bsP.send_reply(Flush), FlushOK);
let &(ref id, chunk) = ids.last().unwrap();
assert_eq!(bsP.send_reply(Retrieve(id.clone())), RetrieveOK(chunk.clone()));
}
// Non-empty chunks must be in the backend now:
for &(ref id, chunk) in ids.iter() {
if chunk.len() > 0 {
match backend.retrieve(id.name.as_slice()) {
Ok(_) => (),
Err(e) => fail!(e),
}
}
}
// All chunks must be available through the blob store:
for &(ref id, chunk) in ids.iter() {
match bsP.send_reply(Retrieve(id.clone())) {
RetrieveOK(found_chunk) => assert_eq!(found_chunk,
chunk.as_slice().into_vec()),
_ => fail!("Unexpected reply from blob store."),
}
}
return true;
}
qcheck(prop);
}
#[test]
fn blobid_identity() {
fn prop(name: Vec<u8>, begin: uint, end: uint) -> bool {
let blob_id = BlobID{name: name.into_vec(),
begin: begin, end: end};
BlobID::from_bytes(blob_id.as_bytes()) == blob_id
}
qcheck(prop);
}
} | blob.push_all(chunk.as_slice()); | random_line_split |
input_loop.go | //
// Copyright © 2018 Aljabr, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package task
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/eapache/go-resiliency/breaker"
ptypes "github.com/gogo/protobuf/types"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
"github.com/AljabrIO/koalja-operator/pkg/annotatedvalue"
avclient "github.com/AljabrIO/koalja-operator/pkg/annotatedvalue/client"
koalja "github.com/AljabrIO/koalja-operator/pkg/apis/koalja/v1alpha1"
"github.com/AljabrIO/koalja-operator/pkg/constants"
"github.com/AljabrIO/koalja-operator/pkg/tracking"
"github.com/AljabrIO/koalja-operator/pkg/util/retry"
"github.com/dchest/uniuri"
)
// inputLoop subscribes to all inputs of a task and build
// snapshots of incoming annotated values, according to the policy on each input.
type inputLoop struct {
log zerolog.Logger
spec *koalja.TaskSpec
inputAddressMap map[string]string // map[inputName]AnnotatedValueSourceAddress
clientID string
snapshot InputSnapshot
mutex sync.Mutex
executionCount int32
execQueue chan (*InputSnapshot)
executor Executor
snapshotService SnapshotService
statistics *tracking.TaskStatistics
}
// newInputLoop initializes a new input loop.
func newInputLoop(log zerolog.Logger, spec *koalja.TaskSpec, pod *corev1.Pod, executor Executor, snapshotService SnapshotService, statistics *tracking.TaskStatistics) (*inputLoop, error) {
inputAddressMap := make(map[string]string)
for _, tis := range spec.Inputs {
annKey := constants.CreateInputLinkAddressAnnotationName(tis.Name)
address := pod.GetAnnotations()[annKey]
if address == "" {
return nil, fmt.Errorf("No input address annotation found for input '%s'", tis.Name)
}
inputAddressMap[tis.Name] = address
}
return &inputLoop{
log: log,
spec: spec,
inputAddressMap: inputAddressMap,
clientID: uniuri.New(),
execQueue: make(chan *InputSnapshot),
executor: executor,
snapshotService: snapshotService,
statistics: statistics,
}, nil
}
// Run the input loop until the given context is canceled.
func (il *inputLoop) Run(ctx context.Context) error {
defer close(il.execQueue)
g, lctx := errgroup.WithContext(ctx)
if len(il.spec.Inputs) > 0 {
// Watch inputs
for _, tis := range il.spec.Inputs {
tis := tis // Bring in scope
stats := il.statistics.InputByName(tis.Name)
g.Go(func() error {
return il.watchInput(lctx, il.spec.SnapshotPolicy, tis, stats)
})
}
}
if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy, run executor all the time
g.Go(func() error {
return il.runExecWithCustomLaunchPolicy(lctx)
})
}
g.Go(func() error {
return il.processExecQueue(lctx)
})
if err := g.Wait(); err != nil {
return err
}
return nil
}
// processExecQueue pulls snapshots from the exec queue and:
// - executes them in case of tasks with auto launch policy or
// - allows the executor to pull the snapshot in case of tasks with custom launch policy.
func (il *inputLoop) processExecQueue(ctx context.Context) error {
var lastCancel context.CancelFunc
for {
select {
case snapshot, ok := <-il.execQueue:
if !ok {
return nil
}
if il.spec.HasLaunchPolicyAuto() {
// Automatic launch policy; Go launch an executor
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
} else if il.spec.HasLaunchPolicyRestart() {
// Restart launch policy;
// - Cancel existing executor
if lastCancel != nil {
lastCancel()
lastCancel = nil
}
// - Go launch a new executor
var launchCtx context.Context
launchCtx, cancel := context.WithCancel(ctx)
lastCancel = cancel
go func() {
defer cancel()
if err := il.execOnSnapshot(launchCtx, snapshot); launchCtx.Err() != nil { | else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
}()
} else if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy; Make snapshot available to snapshot service
if err := il.snapshotService.Execute(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task with custom launch policy")
}
}
case <-ctx.Done():
return ctx.Err()
}
}
}
// runExecWithCustomLaunchPolicy runs the executor continuesly without
// providing it a valid snapshot when it starts.
// The executor must use the SnapshotService to pull for new snapshots.
func (il *inputLoop) runExecWithCustomLaunchPolicy(ctx context.Context) error {
b := breaker.New(5, 1, time.Second*10)
for {
snapshot := &InputSnapshot{}
if err := b.Run(func() error {
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
return maskAny(err)
}
return nil
}); ctx.Err() != nil {
return ctx.Err()
} else if err == breaker.ErrBreakerOpen {
// Circuit break open
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
}
// execOnSnapshot executes the task executor for the given snapshot.
func (il *inputLoop) execOnSnapshot(ctx context.Context, snapshot *InputSnapshot) error {
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, -1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, 1)
snapshot.AddInProgressStatistics(1)
// Update statistics on return
defer func() {
snapshot.AddInProgressStatistics(-1)
snapshot.AddProcessedStatistics(1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, -1)
}()
if err := il.executor.Execute(ctx, snapshot); ctx.Err() != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
return ctx.Err()
} else if err != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
il.log.Debug().Err(err).Msg("executor.Execute failed")
return maskAny(err)
} else {
// Acknowledge all annotated values in the snapshot
atomic.AddInt64(&il.statistics.SnapshotsSucceeded, 1)
il.log.Debug().Msg("acknowledging all annotated values in snapshot")
if err := snapshot.AckAll(ctx); err != nil {
il.log.Error().Err(err).Msg("Failed to acknowledge annotated values")
}
return nil
}
}
// processAnnotatedValue the annotated value coming from the given input.
func (il *inputLoop) processAnnotatedValue(ctx context.Context, av *annotatedvalue.AnnotatedValue, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics, ack func(context.Context, *annotatedvalue.AnnotatedValue) error) error {
il.mutex.Lock()
defer il.mutex.Unlock()
// Wait until snapshot has a place in the sequence of an annotated values for given input
for {
seqLen := il.snapshot.GetSequenceLengthForInput(tis.Name)
if seqLen < tis.GetMaxSequenceLength() {
// There is space available in the sequence to add at least 1 more annotated value
break
}
// Wait a bit
il.mutex.Unlock()
select {
case <-time.After(time.Millisecond * 50):
// Retry
il.mutex.Lock()
case <-ctx.Done():
// Context canceled
il.mutex.Lock()
return ctx.Err()
}
}
// Set the annotated value in the snapshot
if err := il.snapshot.Set(ctx, tis.Name, av, tis.GetMinSequenceLength(), tis.GetMaxSequenceLength(), stats, ack); err != nil {
return err
}
// Build list of inputs that we use in the snapshot (leave out ones with MergeInto)
snapshotInputs := il.spec.SnapshotInputs()
// See if we should execute the task now
if !il.snapshot.IsReadyForExecution(len(snapshotInputs)) {
// Not all inputs have received sufficient annotated values yet
return nil
}
// Clone the snapshot
clonedSnapshot := il.snapshot.Clone()
il.executionCount++
// Prepare snapshot for next execution
for _, inp := range snapshotInputs {
if snapshotPolicy.IsAllNew() {
// Delete annotated value
il.snapshot.Delete(inp.Name)
} else if snapshotPolicy.IsSlidingWindow() {
// Remove Slide number of values
il.snapshot.Slide(inp.Name, inp.GetSlide())
// No need to acknowledge remaining values
il.snapshot.RemoveAck(inp.Name)
} else if snapshotPolicy.IsSwapNew4Old() {
// Remove need to acknowledge annotated value
il.snapshot.RemoveAck(inp.Name)
}
}
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, 1)
// Push snapshot into execution queue
il.mutex.Unlock()
il.execQueue <- clonedSnapshot
il.mutex.Lock()
return nil
}
// watchInput subscribes to the given input and gathers annotated values until the given context is canceled.
func (il *inputLoop) watchInput(ctx context.Context, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics) error {
// Create client
address := il.inputAddressMap[tis.Name]
tisOut := tis
if tis.HasMergeInto() {
tisOut, _ = il.spec.InputByName(tis.MergeInto)
}
// Prepare loop
subscribeAndReadLoop := func(ctx context.Context, c avclient.AnnotatedValueSourceClient) error {
defer c.CloseConnection()
resp, err := c.Subscribe(ctx, &annotatedvalue.SubscribeRequest{
ClientID: il.clientID,
})
if err != nil {
return err
}
subscr := *resp.GetSubscription()
ack := func(ctx context.Context, av *annotatedvalue.AnnotatedValue) error {
if err := retry.Do(ctx, func(ctx context.Context) error {
if _, err := c.Ack(ctx, &annotatedvalue.AckRequest{
Subscription: &subscr,
AnnotatedValueID: av.GetID(),
}); err != nil {
il.log.Debug().Err(err).Msg("Ack annotated value attempt failed")
return err
}
return nil
}, retry.Timeout(constants.TimeoutAckAnnotatedValue)); err != nil {
il.log.Error().Err(err).Msg("Failed to ack annotated value")
return maskAny(err)
}
return nil
}
for {
resp, err := c.Next(ctx, &annotatedvalue.NextRequest{
Subscription: &subscr,
WaitTimeout: ptypes.DurationProto(time.Second * 30),
})
if ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
// handle err
il.log.Error().Err(err).Msg("Failed to fetch next annotated value")
} else {
// Process annotated value (if any)
if av := resp.GetAnnotatedValue(); av != nil {
atomic.AddInt64(&stats.AnnotatedValuesReceived, 1)
if err := il.processAnnotatedValue(ctx, av, snapshotPolicy, tisOut, stats, ack); err != nil {
il.log.Error().Err(err).Msg("Failed to process annotated value")
}
}
}
}
}
// Keep creating connection, subscribe and loop
for {
c, err := avclient.NewAnnotatedValueSourceClient(address)
if err == nil {
if err := subscribeAndReadLoop(ctx, c); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failure in subscribe & read annotated value loop")
}
} else if ctx.Err() != nil {
return ctx.Err()
} else {
il.log.Error().Err(err).Msg("Failed to create annotated value source client")
}
// Wait a bit
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
|
// Context canceled, ignore
} | conditional_block |
input_loop.go | //
// Copyright © 2018 Aljabr, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package task
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/eapache/go-resiliency/breaker"
ptypes "github.com/gogo/protobuf/types"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
"github.com/AljabrIO/koalja-operator/pkg/annotatedvalue"
avclient "github.com/AljabrIO/koalja-operator/pkg/annotatedvalue/client"
koalja "github.com/AljabrIO/koalja-operator/pkg/apis/koalja/v1alpha1"
"github.com/AljabrIO/koalja-operator/pkg/constants"
"github.com/AljabrIO/koalja-operator/pkg/tracking"
"github.com/AljabrIO/koalja-operator/pkg/util/retry"
"github.com/dchest/uniuri"
)
// inputLoop subscribes to all inputs of a task and build
// snapshots of incoming annotated values, according to the policy on each input.
type inputLoop struct {
log zerolog.Logger
spec *koalja.TaskSpec
inputAddressMap map[string]string // map[inputName]AnnotatedValueSourceAddress
clientID string
snapshot InputSnapshot
mutex sync.Mutex
executionCount int32
execQueue chan (*InputSnapshot)
executor Executor
snapshotService SnapshotService
statistics *tracking.TaskStatistics
}
// newInputLoop initializes a new input loop.
func newInputLoop(log zerolog.Logger, spec *koalja.TaskSpec, pod *corev1.Pod, executor Executor, snapshotService SnapshotService, statistics *tracking.TaskStatistics) (*inputLoop, error) {
inputAddressMap := make(map[string]string)
for _, tis := range spec.Inputs {
annKey := constants.CreateInputLinkAddressAnnotationName(tis.Name)
address := pod.GetAnnotations()[annKey]
if address == "" {
return nil, fmt.Errorf("No input address annotation found for input '%s'", tis.Name)
}
inputAddressMap[tis.Name] = address
}
return &inputLoop{
log: log,
spec: spec,
inputAddressMap: inputAddressMap,
clientID: uniuri.New(),
execQueue: make(chan *InputSnapshot),
executor: executor,
snapshotService: snapshotService,
statistics: statistics,
}, nil
}
// Run the input loop until the given context is canceled.
func (il *inputLoop) Run(ctx context.Context) error { |
// processExecQueue pulls snapshots from the exec queue and:
// - executes them in case of tasks with auto launch policy or
// - allows the executor to pull the snapshot in case of tasks with custom launch policy.
func (il *inputLoop) processExecQueue(ctx context.Context) error {
var lastCancel context.CancelFunc
for {
select {
case snapshot, ok := <-il.execQueue:
if !ok {
return nil
}
if il.spec.HasLaunchPolicyAuto() {
// Automatic launch policy; Go launch an executor
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
} else if il.spec.HasLaunchPolicyRestart() {
// Restart launch policy;
// - Cancel existing executor
if lastCancel != nil {
lastCancel()
lastCancel = nil
}
// - Go launch a new executor
var launchCtx context.Context
launchCtx, cancel := context.WithCancel(ctx)
lastCancel = cancel
go func() {
defer cancel()
if err := il.execOnSnapshot(launchCtx, snapshot); launchCtx.Err() != nil {
// Context canceled, ignore
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
}()
} else if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy; Make snapshot available to snapshot service
if err := il.snapshotService.Execute(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task with custom launch policy")
}
}
case <-ctx.Done():
return ctx.Err()
}
}
}
// runExecWithCustomLaunchPolicy runs the executor continuesly without
// providing it a valid snapshot when it starts.
// The executor must use the SnapshotService to pull for new snapshots.
func (il *inputLoop) runExecWithCustomLaunchPolicy(ctx context.Context) error {
b := breaker.New(5, 1, time.Second*10)
for {
snapshot := &InputSnapshot{}
if err := b.Run(func() error {
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
return maskAny(err)
}
return nil
}); ctx.Err() != nil {
return ctx.Err()
} else if err == breaker.ErrBreakerOpen {
// Circuit break open
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
}
// execOnSnapshot executes the task executor for the given snapshot.
func (il *inputLoop) execOnSnapshot(ctx context.Context, snapshot *InputSnapshot) error {
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, -1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, 1)
snapshot.AddInProgressStatistics(1)
// Update statistics on return
defer func() {
snapshot.AddInProgressStatistics(-1)
snapshot.AddProcessedStatistics(1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, -1)
}()
if err := il.executor.Execute(ctx, snapshot); ctx.Err() != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
return ctx.Err()
} else if err != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
il.log.Debug().Err(err).Msg("executor.Execute failed")
return maskAny(err)
} else {
// Acknowledge all annotated values in the snapshot
atomic.AddInt64(&il.statistics.SnapshotsSucceeded, 1)
il.log.Debug().Msg("acknowledging all annotated values in snapshot")
if err := snapshot.AckAll(ctx); err != nil {
il.log.Error().Err(err).Msg("Failed to acknowledge annotated values")
}
return nil
}
}
// processAnnotatedValue the annotated value coming from the given input.
func (il *inputLoop) processAnnotatedValue(ctx context.Context, av *annotatedvalue.AnnotatedValue, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics, ack func(context.Context, *annotatedvalue.AnnotatedValue) error) error {
il.mutex.Lock()
defer il.mutex.Unlock()
// Wait until snapshot has a place in the sequence of an annotated values for given input
for {
seqLen := il.snapshot.GetSequenceLengthForInput(tis.Name)
if seqLen < tis.GetMaxSequenceLength() {
// There is space available in the sequence to add at least 1 more annotated value
break
}
// Wait a bit
il.mutex.Unlock()
select {
case <-time.After(time.Millisecond * 50):
// Retry
il.mutex.Lock()
case <-ctx.Done():
// Context canceled
il.mutex.Lock()
return ctx.Err()
}
}
// Set the annotated value in the snapshot
if err := il.snapshot.Set(ctx, tis.Name, av, tis.GetMinSequenceLength(), tis.GetMaxSequenceLength(), stats, ack); err != nil {
return err
}
// Build list of inputs that we use in the snapshot (leave out ones with MergeInto)
snapshotInputs := il.spec.SnapshotInputs()
// See if we should execute the task now
if !il.snapshot.IsReadyForExecution(len(snapshotInputs)) {
// Not all inputs have received sufficient annotated values yet
return nil
}
// Clone the snapshot
clonedSnapshot := il.snapshot.Clone()
il.executionCount++
// Prepare snapshot for next execution
for _, inp := range snapshotInputs {
if snapshotPolicy.IsAllNew() {
// Delete annotated value
il.snapshot.Delete(inp.Name)
} else if snapshotPolicy.IsSlidingWindow() {
// Remove Slide number of values
il.snapshot.Slide(inp.Name, inp.GetSlide())
// No need to acknowledge remaining values
il.snapshot.RemoveAck(inp.Name)
} else if snapshotPolicy.IsSwapNew4Old() {
// Remove need to acknowledge annotated value
il.snapshot.RemoveAck(inp.Name)
}
}
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, 1)
// Push snapshot into execution queue
il.mutex.Unlock()
il.execQueue <- clonedSnapshot
il.mutex.Lock()
return nil
}
// watchInput subscribes to the given input and gathers annotated values until the given context is canceled.
func (il *inputLoop) watchInput(ctx context.Context, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics) error {
// Create client
address := il.inputAddressMap[tis.Name]
tisOut := tis
if tis.HasMergeInto() {
tisOut, _ = il.spec.InputByName(tis.MergeInto)
}
// Prepare loop
subscribeAndReadLoop := func(ctx context.Context, c avclient.AnnotatedValueSourceClient) error {
defer c.CloseConnection()
resp, err := c.Subscribe(ctx, &annotatedvalue.SubscribeRequest{
ClientID: il.clientID,
})
if err != nil {
return err
}
subscr := *resp.GetSubscription()
ack := func(ctx context.Context, av *annotatedvalue.AnnotatedValue) error {
if err := retry.Do(ctx, func(ctx context.Context) error {
if _, err := c.Ack(ctx, &annotatedvalue.AckRequest{
Subscription: &subscr,
AnnotatedValueID: av.GetID(),
}); err != nil {
il.log.Debug().Err(err).Msg("Ack annotated value attempt failed")
return err
}
return nil
}, retry.Timeout(constants.TimeoutAckAnnotatedValue)); err != nil {
il.log.Error().Err(err).Msg("Failed to ack annotated value")
return maskAny(err)
}
return nil
}
for {
resp, err := c.Next(ctx, &annotatedvalue.NextRequest{
Subscription: &subscr,
WaitTimeout: ptypes.DurationProto(time.Second * 30),
})
if ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
// handle err
il.log.Error().Err(err).Msg("Failed to fetch next annotated value")
} else {
// Process annotated value (if any)
if av := resp.GetAnnotatedValue(); av != nil {
atomic.AddInt64(&stats.AnnotatedValuesReceived, 1)
if err := il.processAnnotatedValue(ctx, av, snapshotPolicy, tisOut, stats, ack); err != nil {
il.log.Error().Err(err).Msg("Failed to process annotated value")
}
}
}
}
}
// Keep creating connection, subscribe and loop
for {
c, err := avclient.NewAnnotatedValueSourceClient(address)
if err == nil {
if err := subscribeAndReadLoop(ctx, c); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failure in subscribe & read annotated value loop")
}
} else if ctx.Err() != nil {
return ctx.Err()
} else {
il.log.Error().Err(err).Msg("Failed to create annotated value source client")
}
// Wait a bit
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
|
defer close(il.execQueue)
g, lctx := errgroup.WithContext(ctx)
if len(il.spec.Inputs) > 0 {
// Watch inputs
for _, tis := range il.spec.Inputs {
tis := tis // Bring in scope
stats := il.statistics.InputByName(tis.Name)
g.Go(func() error {
return il.watchInput(lctx, il.spec.SnapshotPolicy, tis, stats)
})
}
}
if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy, run executor all the time
g.Go(func() error {
return il.runExecWithCustomLaunchPolicy(lctx)
})
}
g.Go(func() error {
return il.processExecQueue(lctx)
})
if err := g.Wait(); err != nil {
return err
}
return nil
}
| identifier_body |
input_loop.go | //
// Copyright © 2018 Aljabr, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package task
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/eapache/go-resiliency/breaker"
ptypes "github.com/gogo/protobuf/types"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
"github.com/AljabrIO/koalja-operator/pkg/annotatedvalue"
avclient "github.com/AljabrIO/koalja-operator/pkg/annotatedvalue/client"
koalja "github.com/AljabrIO/koalja-operator/pkg/apis/koalja/v1alpha1"
"github.com/AljabrIO/koalja-operator/pkg/constants"
"github.com/AljabrIO/koalja-operator/pkg/tracking"
"github.com/AljabrIO/koalja-operator/pkg/util/retry"
"github.com/dchest/uniuri"
)
// inputLoop subscribes to all inputs of a task and build
// snapshots of incoming annotated values, according to the policy on each input.
type inputLoop struct {
log zerolog.Logger
spec *koalja.TaskSpec
inputAddressMap map[string]string // map[inputName]AnnotatedValueSourceAddress
clientID string
snapshot InputSnapshot
mutex sync.Mutex
executionCount int32
execQueue chan (*InputSnapshot)
executor Executor
snapshotService SnapshotService
statistics *tracking.TaskStatistics
}
// newInputLoop initializes a new input loop.
func newInputLoop(log zerolog.Logger, spec *koalja.TaskSpec, pod *corev1.Pod, executor Executor, snapshotService SnapshotService, statistics *tracking.TaskStatistics) (*inputLoop, error) {
inputAddressMap := make(map[string]string)
for _, tis := range spec.Inputs {
annKey := constants.CreateInputLinkAddressAnnotationName(tis.Name)
address := pod.GetAnnotations()[annKey]
if address == "" {
return nil, fmt.Errorf("No input address annotation found for input '%s'", tis.Name)
}
inputAddressMap[tis.Name] = address
}
return &inputLoop{
log: log,
spec: spec,
inputAddressMap: inputAddressMap,
clientID: uniuri.New(),
execQueue: make(chan *InputSnapshot),
executor: executor,
snapshotService: snapshotService,
statistics: statistics, | // Run the input loop until the given context is canceled.
func (il *inputLoop) Run(ctx context.Context) error {
defer close(il.execQueue)
g, lctx := errgroup.WithContext(ctx)
if len(il.spec.Inputs) > 0 {
// Watch inputs
for _, tis := range il.spec.Inputs {
tis := tis // Bring in scope
stats := il.statistics.InputByName(tis.Name)
g.Go(func() error {
return il.watchInput(lctx, il.spec.SnapshotPolicy, tis, stats)
})
}
}
if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy, run executor all the time
g.Go(func() error {
return il.runExecWithCustomLaunchPolicy(lctx)
})
}
g.Go(func() error {
return il.processExecQueue(lctx)
})
if err := g.Wait(); err != nil {
return err
}
return nil
}
// processExecQueue pulls snapshots from the exec queue and:
// - executes them in case of tasks with auto launch policy or
// - allows the executor to pull the snapshot in case of tasks with custom launch policy.
func (il *inputLoop) processExecQueue(ctx context.Context) error {
var lastCancel context.CancelFunc
for {
select {
case snapshot, ok := <-il.execQueue:
if !ok {
return nil
}
if il.spec.HasLaunchPolicyAuto() {
// Automatic launch policy; Go launch an executor
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
} else if il.spec.HasLaunchPolicyRestart() {
// Restart launch policy;
// - Cancel existing executor
if lastCancel != nil {
lastCancel()
lastCancel = nil
}
// - Go launch a new executor
var launchCtx context.Context
launchCtx, cancel := context.WithCancel(ctx)
lastCancel = cancel
go func() {
defer cancel()
if err := il.execOnSnapshot(launchCtx, snapshot); launchCtx.Err() != nil {
// Context canceled, ignore
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
}()
} else if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy; Make snapshot available to snapshot service
if err := il.snapshotService.Execute(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task with custom launch policy")
}
}
case <-ctx.Done():
return ctx.Err()
}
}
}
// runExecWithCustomLaunchPolicy runs the executor continuesly without
// providing it a valid snapshot when it starts.
// The executor must use the SnapshotService to pull for new snapshots.
func (il *inputLoop) runExecWithCustomLaunchPolicy(ctx context.Context) error {
b := breaker.New(5, 1, time.Second*10)
for {
snapshot := &InputSnapshot{}
if err := b.Run(func() error {
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
return maskAny(err)
}
return nil
}); ctx.Err() != nil {
return ctx.Err()
} else if err == breaker.ErrBreakerOpen {
// Circuit break open
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
}
// execOnSnapshot executes the task executor for the given snapshot.
func (il *inputLoop) execOnSnapshot(ctx context.Context, snapshot *InputSnapshot) error {
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, -1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, 1)
snapshot.AddInProgressStatistics(1)
// Update statistics on return
defer func() {
snapshot.AddInProgressStatistics(-1)
snapshot.AddProcessedStatistics(1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, -1)
}()
if err := il.executor.Execute(ctx, snapshot); ctx.Err() != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
return ctx.Err()
} else if err != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
il.log.Debug().Err(err).Msg("executor.Execute failed")
return maskAny(err)
} else {
// Acknowledge all annotated values in the snapshot
atomic.AddInt64(&il.statistics.SnapshotsSucceeded, 1)
il.log.Debug().Msg("acknowledging all annotated values in snapshot")
if err := snapshot.AckAll(ctx); err != nil {
il.log.Error().Err(err).Msg("Failed to acknowledge annotated values")
}
return nil
}
}
// processAnnotatedValue the annotated value coming from the given input.
func (il *inputLoop) processAnnotatedValue(ctx context.Context, av *annotatedvalue.AnnotatedValue, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics, ack func(context.Context, *annotatedvalue.AnnotatedValue) error) error {
il.mutex.Lock()
defer il.mutex.Unlock()
// Wait until snapshot has a place in the sequence of an annotated values for given input
for {
seqLen := il.snapshot.GetSequenceLengthForInput(tis.Name)
if seqLen < tis.GetMaxSequenceLength() {
// There is space available in the sequence to add at least 1 more annotated value
break
}
// Wait a bit
il.mutex.Unlock()
select {
case <-time.After(time.Millisecond * 50):
// Retry
il.mutex.Lock()
case <-ctx.Done():
// Context canceled
il.mutex.Lock()
return ctx.Err()
}
}
// Set the annotated value in the snapshot
if err := il.snapshot.Set(ctx, tis.Name, av, tis.GetMinSequenceLength(), tis.GetMaxSequenceLength(), stats, ack); err != nil {
return err
}
// Build list of inputs that we use in the snapshot (leave out ones with MergeInto)
snapshotInputs := il.spec.SnapshotInputs()
// See if we should execute the task now
if !il.snapshot.IsReadyForExecution(len(snapshotInputs)) {
// Not all inputs have received sufficient annotated values yet
return nil
}
// Clone the snapshot
clonedSnapshot := il.snapshot.Clone()
il.executionCount++
// Prepare snapshot for next execution
for _, inp := range snapshotInputs {
if snapshotPolicy.IsAllNew() {
// Delete annotated value
il.snapshot.Delete(inp.Name)
} else if snapshotPolicy.IsSlidingWindow() {
// Remove Slide number of values
il.snapshot.Slide(inp.Name, inp.GetSlide())
// No need to acknowledge remaining values
il.snapshot.RemoveAck(inp.Name)
} else if snapshotPolicy.IsSwapNew4Old() {
// Remove need to acknowledge annotated value
il.snapshot.RemoveAck(inp.Name)
}
}
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, 1)
// Push snapshot into execution queue
il.mutex.Unlock()
il.execQueue <- clonedSnapshot
il.mutex.Lock()
return nil
}
// watchInput subscribes to the given input and gathers annotated values until the given context is canceled.
func (il *inputLoop) watchInput(ctx context.Context, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics) error {
// Create client
address := il.inputAddressMap[tis.Name]
tisOut := tis
if tis.HasMergeInto() {
tisOut, _ = il.spec.InputByName(tis.MergeInto)
}
// Prepare loop
subscribeAndReadLoop := func(ctx context.Context, c avclient.AnnotatedValueSourceClient) error {
defer c.CloseConnection()
resp, err := c.Subscribe(ctx, &annotatedvalue.SubscribeRequest{
ClientID: il.clientID,
})
if err != nil {
return err
}
subscr := *resp.GetSubscription()
ack := func(ctx context.Context, av *annotatedvalue.AnnotatedValue) error {
if err := retry.Do(ctx, func(ctx context.Context) error {
if _, err := c.Ack(ctx, &annotatedvalue.AckRequest{
Subscription: &subscr,
AnnotatedValueID: av.GetID(),
}); err != nil {
il.log.Debug().Err(err).Msg("Ack annotated value attempt failed")
return err
}
return nil
}, retry.Timeout(constants.TimeoutAckAnnotatedValue)); err != nil {
il.log.Error().Err(err).Msg("Failed to ack annotated value")
return maskAny(err)
}
return nil
}
for {
resp, err := c.Next(ctx, &annotatedvalue.NextRequest{
Subscription: &subscr,
WaitTimeout: ptypes.DurationProto(time.Second * 30),
})
if ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
// handle err
il.log.Error().Err(err).Msg("Failed to fetch next annotated value")
} else {
// Process annotated value (if any)
if av := resp.GetAnnotatedValue(); av != nil {
atomic.AddInt64(&stats.AnnotatedValuesReceived, 1)
if err := il.processAnnotatedValue(ctx, av, snapshotPolicy, tisOut, stats, ack); err != nil {
il.log.Error().Err(err).Msg("Failed to process annotated value")
}
}
}
}
}
// Keep creating connection, subscribe and loop
for {
c, err := avclient.NewAnnotatedValueSourceClient(address)
if err == nil {
if err := subscribeAndReadLoop(ctx, c); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failure in subscribe & read annotated value loop")
}
} else if ctx.Err() != nil {
return ctx.Err()
} else {
il.log.Error().Err(err).Msg("Failed to create annotated value source client")
}
// Wait a bit
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
} | }, nil
}
| random_line_split |
input_loop.go | //
// Copyright © 2018 Aljabr, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package task
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/eapache/go-resiliency/breaker"
ptypes "github.com/gogo/protobuf/types"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
"github.com/AljabrIO/koalja-operator/pkg/annotatedvalue"
avclient "github.com/AljabrIO/koalja-operator/pkg/annotatedvalue/client"
koalja "github.com/AljabrIO/koalja-operator/pkg/apis/koalja/v1alpha1"
"github.com/AljabrIO/koalja-operator/pkg/constants"
"github.com/AljabrIO/koalja-operator/pkg/tracking"
"github.com/AljabrIO/koalja-operator/pkg/util/retry"
"github.com/dchest/uniuri"
)
// inputLoop subscribes to all inputs of a task and build
// snapshots of incoming annotated values, according to the policy on each input.
type inputLoop struct {
log zerolog.Logger
spec *koalja.TaskSpec
inputAddressMap map[string]string // map[inputName]AnnotatedValueSourceAddress
clientID string
snapshot InputSnapshot
mutex sync.Mutex
executionCount int32
execQueue chan (*InputSnapshot)
executor Executor
snapshotService SnapshotService
statistics *tracking.TaskStatistics
}
// newInputLoop initializes a new input loop.
func newInputLoop(log zerolog.Logger, spec *koalja.TaskSpec, pod *corev1.Pod, executor Executor, snapshotService SnapshotService, statistics *tracking.TaskStatistics) (*inputLoop, error) {
inputAddressMap := make(map[string]string)
for _, tis := range spec.Inputs {
annKey := constants.CreateInputLinkAddressAnnotationName(tis.Name)
address := pod.GetAnnotations()[annKey]
if address == "" {
return nil, fmt.Errorf("No input address annotation found for input '%s'", tis.Name)
}
inputAddressMap[tis.Name] = address
}
return &inputLoop{
log: log,
spec: spec,
inputAddressMap: inputAddressMap,
clientID: uniuri.New(),
execQueue: make(chan *InputSnapshot),
executor: executor,
snapshotService: snapshotService,
statistics: statistics,
}, nil
}
// Run the input loop until the given context is canceled.
func (il *inputLoop) Run(ctx context.Context) error {
defer close(il.execQueue)
g, lctx := errgroup.WithContext(ctx)
if len(il.spec.Inputs) > 0 {
// Watch inputs
for _, tis := range il.spec.Inputs {
tis := tis // Bring in scope
stats := il.statistics.InputByName(tis.Name)
g.Go(func() error {
return il.watchInput(lctx, il.spec.SnapshotPolicy, tis, stats)
})
}
}
if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy, run executor all the time
g.Go(func() error {
return il.runExecWithCustomLaunchPolicy(lctx)
})
}
g.Go(func() error {
return il.processExecQueue(lctx)
})
if err := g.Wait(); err != nil {
return err
}
return nil
}
// processExecQueue pulls snapshots from the exec queue and:
// - executes them in case of tasks with auto launch policy or
// - allows the executor to pull the snapshot in case of tasks with custom launch policy.
func (il *inputLoop) processExecQueue(ctx context.Context) error {
var lastCancel context.CancelFunc
for {
select {
case snapshot, ok := <-il.execQueue:
if !ok {
return nil
}
if il.spec.HasLaunchPolicyAuto() {
// Automatic launch policy; Go launch an executor
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
} else if il.spec.HasLaunchPolicyRestart() {
// Restart launch policy;
// - Cancel existing executor
if lastCancel != nil {
lastCancel()
lastCancel = nil
}
// - Go launch a new executor
var launchCtx context.Context
launchCtx, cancel := context.WithCancel(ctx)
lastCancel = cancel
go func() {
defer cancel()
if err := il.execOnSnapshot(launchCtx, snapshot); launchCtx.Err() != nil {
// Context canceled, ignore
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
}
}()
} else if il.spec.HasLaunchPolicyCustom() {
// Custom launch policy; Make snapshot available to snapshot service
if err := il.snapshotService.Execute(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task with custom launch policy")
}
}
case <-ctx.Done():
return ctx.Err()
}
}
}
// runExecWithCustomLaunchPolicy runs the executor continuesly without
// providing it a valid snapshot when it starts.
// The executor must use the SnapshotService to pull for new snapshots.
func (il *inputLoop) runExecWithCustomLaunchPolicy(ctx context.Context) error {
b := breaker.New(5, 1, time.Second*10)
for {
snapshot := &InputSnapshot{}
if err := b.Run(func() error {
if err := il.execOnSnapshot(ctx, snapshot); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failed to execute task")
return maskAny(err)
}
return nil
}); ctx.Err() != nil {
return ctx.Err()
} else if err == breaker.ErrBreakerOpen {
// Circuit break open
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
}
// execOnSnapshot executes the task executor for the given snapshot.
func (il *inputLoop) e | ctx context.Context, snapshot *InputSnapshot) error {
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, -1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, 1)
snapshot.AddInProgressStatistics(1)
// Update statistics on return
defer func() {
snapshot.AddInProgressStatistics(-1)
snapshot.AddProcessedStatistics(1)
atomic.AddInt64(&il.statistics.SnapshotsInProgress, -1)
}()
if err := il.executor.Execute(ctx, snapshot); ctx.Err() != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
return ctx.Err()
} else if err != nil {
atomic.AddInt64(&il.statistics.SnapshotsFailed, 1)
il.log.Debug().Err(err).Msg("executor.Execute failed")
return maskAny(err)
} else {
// Acknowledge all annotated values in the snapshot
atomic.AddInt64(&il.statistics.SnapshotsSucceeded, 1)
il.log.Debug().Msg("acknowledging all annotated values in snapshot")
if err := snapshot.AckAll(ctx); err != nil {
il.log.Error().Err(err).Msg("Failed to acknowledge annotated values")
}
return nil
}
}
// processAnnotatedValue the annotated value coming from the given input.
func (il *inputLoop) processAnnotatedValue(ctx context.Context, av *annotatedvalue.AnnotatedValue, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics, ack func(context.Context, *annotatedvalue.AnnotatedValue) error) error {
il.mutex.Lock()
defer il.mutex.Unlock()
// Wait until snapshot has a place in the sequence of an annotated values for given input
for {
seqLen := il.snapshot.GetSequenceLengthForInput(tis.Name)
if seqLen < tis.GetMaxSequenceLength() {
// There is space available in the sequence to add at least 1 more annotated value
break
}
// Wait a bit
il.mutex.Unlock()
select {
case <-time.After(time.Millisecond * 50):
// Retry
il.mutex.Lock()
case <-ctx.Done():
// Context canceled
il.mutex.Lock()
return ctx.Err()
}
}
// Set the annotated value in the snapshot
if err := il.snapshot.Set(ctx, tis.Name, av, tis.GetMinSequenceLength(), tis.GetMaxSequenceLength(), stats, ack); err != nil {
return err
}
// Build list of inputs that we use in the snapshot (leave out ones with MergeInto)
snapshotInputs := il.spec.SnapshotInputs()
// See if we should execute the task now
if !il.snapshot.IsReadyForExecution(len(snapshotInputs)) {
// Not all inputs have received sufficient annotated values yet
return nil
}
// Clone the snapshot
clonedSnapshot := il.snapshot.Clone()
il.executionCount++
// Prepare snapshot for next execution
for _, inp := range snapshotInputs {
if snapshotPolicy.IsAllNew() {
// Delete annotated value
il.snapshot.Delete(inp.Name)
} else if snapshotPolicy.IsSlidingWindow() {
// Remove Slide number of values
il.snapshot.Slide(inp.Name, inp.GetSlide())
// No need to acknowledge remaining values
il.snapshot.RemoveAck(inp.Name)
} else if snapshotPolicy.IsSwapNew4Old() {
// Remove need to acknowledge annotated value
il.snapshot.RemoveAck(inp.Name)
}
}
// Update statistics
atomic.AddInt64(&il.statistics.SnapshotsWaiting, 1)
// Push snapshot into execution queue
il.mutex.Unlock()
il.execQueue <- clonedSnapshot
il.mutex.Lock()
return nil
}
// watchInput subscribes to the given input and gathers annotated values until the given context is canceled.
func (il *inputLoop) watchInput(ctx context.Context, snapshotPolicy koalja.SnapshotPolicy, tis koalja.TaskInputSpec, stats *tracking.TaskInputStatistics) error {
// Create client
address := il.inputAddressMap[tis.Name]
tisOut := tis
if tis.HasMergeInto() {
tisOut, _ = il.spec.InputByName(tis.MergeInto)
}
// Prepare loop
subscribeAndReadLoop := func(ctx context.Context, c avclient.AnnotatedValueSourceClient) error {
defer c.CloseConnection()
resp, err := c.Subscribe(ctx, &annotatedvalue.SubscribeRequest{
ClientID: il.clientID,
})
if err != nil {
return err
}
subscr := *resp.GetSubscription()
ack := func(ctx context.Context, av *annotatedvalue.AnnotatedValue) error {
if err := retry.Do(ctx, func(ctx context.Context) error {
if _, err := c.Ack(ctx, &annotatedvalue.AckRequest{
Subscription: &subscr,
AnnotatedValueID: av.GetID(),
}); err != nil {
il.log.Debug().Err(err).Msg("Ack annotated value attempt failed")
return err
}
return nil
}, retry.Timeout(constants.TimeoutAckAnnotatedValue)); err != nil {
il.log.Error().Err(err).Msg("Failed to ack annotated value")
return maskAny(err)
}
return nil
}
for {
resp, err := c.Next(ctx, &annotatedvalue.NextRequest{
Subscription: &subscr,
WaitTimeout: ptypes.DurationProto(time.Second * 30),
})
if ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
// handle err
il.log.Error().Err(err).Msg("Failed to fetch next annotated value")
} else {
// Process annotated value (if any)
if av := resp.GetAnnotatedValue(); av != nil {
atomic.AddInt64(&stats.AnnotatedValuesReceived, 1)
if err := il.processAnnotatedValue(ctx, av, snapshotPolicy, tisOut, stats, ack); err != nil {
il.log.Error().Err(err).Msg("Failed to process annotated value")
}
}
}
}
}
// Keep creating connection, subscribe and loop
for {
c, err := avclient.NewAnnotatedValueSourceClient(address)
if err == nil {
if err := subscribeAndReadLoop(ctx, c); ctx.Err() != nil {
return ctx.Err()
} else if err != nil {
il.log.Error().Err(err).Msg("Failure in subscribe & read annotated value loop")
}
} else if ctx.Err() != nil {
return ctx.Err()
} else {
il.log.Error().Err(err).Msg("Failed to create annotated value source client")
}
// Wait a bit
select {
case <-time.After(time.Second * 5):
// Retry
case <-ctx.Done():
return ctx.Err()
}
}
}
| xecOnSnapshot( | identifier_name |
Confess.js | import React, { Component } from 'react';
import {
Container,
Icon,
} from 'semantic-ui-react';
import { Link } from 'react-router-dom';
import Tetzel from '../build/contracts/Tetzel.json';
import TetzelCrowdsale from '../build/contracts/TetzelCrowdsale.json';
import TermsAndConditionsModal from './components/TermsAndConditionsModal';
import ConfessSin from './components/ConfessSin';
import ValueSin from './components/ValueSin';
import PurchaseSin from './components/PurchaseSin';
import Forgiveness from './components/Forgiveness';
import './css/confess.css';
export default class Confess extends Component {
constructor(props) {
super(props);
this.state = {
errorMsg: 'Confession has ended. You can still view the confessional but will be unable to confess.',
tx: null,
tetzelInstance: null,
tetzelAddress: 'Loading...',
tetzelCoinAddress: 'Loading...',
account: 'Loading...',
sinText: '',
sinValueUSD: 0,
sinValueETH: 0,
sinRate: 500,
sinRecipient: '',
testSinValues: [0, 0, 0],
ethSpotPrice: null,
pending: false,
activeView: 'CONFESS_SIN',
};
}
async componentWillMount() {
if (this.props.web3) {
try {
// We don't need to worry about fetching accounts now that
// the confessional has closed. This would be otherwise uncommented.
// await this.fetchAccount();
await this.instantiateContracts();
await this.fetchEtherPrice();
} catch(e) {
this.setState({errorMsg: e.message});
}
}
}
async fetchAccount() {
const errorMsg = 'We couldn\'t find an Ethereum account. ' +
'Please check MetaMask or your Web3 provider. ' +
'You can still view the confessional but will not ' +
'be able to confess.';
try {
var accounts = await this._getAccountsPromise();
} catch(e) {
this.setState({errorMsg: errorMsg});
}
if (accounts.length === 0) {
this.setState({errorMsg: errorMsg});
} else {
// Initial SIN recipient is the current account by default
this.setState({account: accounts[0], sinRecipient: accounts[0]});
}
}
_getAccountsPromise() {
var self = this;
return new Promise(function (resolve, reject) {
self.props.web3.eth.getAccounts(function (e, accounts) {
if (e !== null) {
reject(e);
} else {
resolve(accounts);
}
});
});
}
async fetchEtherPrice() {
const priceUrl = "https://api.coinbase.com/v2/prices/ETH-USD/spot";
let response = await fetch(priceUrl);
let responseJSON = await response.json();
this.setState({ethSpotPrice: parseFloat(responseJSON.data.amount)});
}
async instantiateContracts() {
const contract = require('truffle-contract');
const tetzel = contract(Tetzel);
const tetzelCrowdsale = contract(TetzelCrowdsale);
tetzel.setProvider(this.props.web3.currentProvider)
tetzelCrowdsale.setProvider(this.props.web3.currentProvider)
var tetzelInstance = await tetzel.deployed();
var tetzelCrowdsaleInstance = await tetzelCrowdsale.deployed();
var tetzelCoinAddress = await tetzelCrowdsaleInstance.token();
this.setState({
tetzelInstance: tetzelInstance,
tetzelAddress: tetzelInstance.address,
tetzelCoinAddress: tetzelCoinAddress
});
}
sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async purchase() {
var sinValue = parseFloat(parseFloat(this.state.sinValueETH).toFixed(8));
var isValidSin = this.state.sinText.length > 0;
var isValidPayment = typeof sinValue === 'number' && sinValue > 0;
var isValidAddress = this.props.web3.isAddress(this.state.sinRecipient);
if (!isValidSin) {
throw new Error('Your sin is not valid. Please confess before buying SIN tokens.');
}
if (!isValidPayment) {
throw new Error('Please enter a payment amount greater than 0.');
}
if (!isValidAddress) {
throw new Error('Please enter a valid Ethereum address.');
}
try {
const txHash = await this.state.tetzelInstance.confess.sendTransaction(
this.state.sinRecipient,
this.state.sinText,
{
from: this.state.account,
value: this.props.web3.toWei(sinValue, 'ether'),
gas: 300000,
}
);
this.setState({tx: txHash, pending: true});
var isSuccess = false;
var count = 0;
while(!isSuccess && count < 24) {
var txReceipt = await this.getTransactionReceipt(txHash);
if (txReceipt !== null) {
isSuccess = this.checkTxSuccess(txReceipt);
if (isSuccess) break;
}
count += 1;
await this.sleep(5000);
}
var txStatus;
if (isSuccess) {
txStatus = {complete: true, msg: ''};
} else {
throw new Error('Transaction failed. Please check Etherscan for more details. Transaction hash: ' + this.state.tx);
}
} catch(e) {
txStatus = {complete: false, msg: e.message};
} finally {
this.setState({pending: false});
}
return txStatus;
}
/*
Checks whether or not a transaction succeeded by looking for an event (`Confess`)
triggered by the Tetzel contract. We need to do this because there's no way to tell
the difference between a transaction that failed due to out of gas errors
on internal transactions but is still successfully mined and a successful
transaction.
*/
checkTxSuccess(txReceipt) {
return txReceipt.logs.reduce((acc, logObj) => {
return logObj.address === this.state.tetzelAddress || acc
}, false);
}
/*
Promisified version of web3's getTransactionReceipt
*/
getTransactionReceipt(txHash) {
var self = this;
return new Promise(function (resolve, reject) {
self.props.web3.eth.getTransactionReceipt(txHash, function (err, result) {
if (err !== null) {
reject(err);
} else {
resolve(result);
}
});
});
}
updateSinValue(val, unit) {
if (unit === 'USD') {
this.setState({
sinValueUSD: val,
sinValueETH: val / this.state.ethSpotPrice
});
} else if (unit === 'ETH') {
this.setState({
sinValueUSD: val * this.state.ethSpotPrice,
sinValueETH: val
});
} else {
throw new Error("Invalid unit for updateSinValue");
}
}
updateSinText(txt) {
this.setState({sinText: txt});
}
validateSinText() {
return this.state.sinText.length > 0;
}
handleInvalidSinText() {
this.setState({errorMsg: 'Please confess before moving on.'});
}
updateTestSinValues(idx, val) {
var newTestSinValues = this.state.testSinValues.slice();
newTestSinValues[idx] = val;
this.setState({testSinValues: newTestSinValues});
}
updateSinRecipient(val) {
this.setState({sinRecipient: val});
} | changeActiveView(nextView) {
const validViews = ['CONFESS_SIN', 'VALUE_SIN', 'PURCHASE_SIN', 'FORGIVENESS'];
if (validViews.indexOf(nextView) === -1) {
throw new Error('Invalid view');
}
// Validating sin text input shouldn't have to occur here, but it does
// with the way this is currently set up
// TODO: Refactor this
if (this.state.activeView === 'CONFESS_SIN' && !this.validateSinText()) {
this.handleInvalidSinText();
} else {
this.setState({activeView: nextView, errorMsg: ''});
}
}
render() {
const showActiveView = () => {
switch(this.state.activeView) {
case 'CONFESS_SIN':
return (
<ConfessSin
errorMsg={ this.state.errorMsg }
sinText={ this.state.sinText }
updateSinText={ this.updateSinText.bind(this) }
onNext={ () => this.changeActiveView('VALUE_SIN') } />
);
case 'VALUE_SIN':
return (
<ValueSin
sinText={ this.state.sinText }
sinValueUSD={ this.state.sinValueUSD }
testSinValues={ this.state.testSinValues }
updateSinValue={ this.updateSinValue.bind(this) }
updateTestSinValues={ this.updateTestSinValues.bind(this) }
onNext={ () => this.changeActiveView('PURCHASE_SIN') } />
);
case 'PURCHASE_SIN':
return (
<PurchaseSin
errorMsg={ this.state.errorMsg }
tetzelAddress={ this.state.tetzelAddress }
sinRate={ this.state.sinRate }
sinValueETH={ this.state.sinValueETH }
sinText={ this.state.sinText }
sinRecipient={ this.state.sinRecipient }
ethSpotPrice={ this.state.ethSpotPrice }
updateSinRecipient={ this.updateSinRecipient.bind(this) }
updateSinValue={ this.updateSinValue.bind(this) }
pending={ this.state.pending }
tx={ this.state.tx }
onPurchase={ async () => {
this.setState({errorMsg: ''});
try {
let responseObj = await this.purchase();
if (responseObj.complete) {
this.changeActiveView('FORGIVENESS');
} else {
throw new Error(responseObj.msg);
}
} catch(e) {
this.setState({errorMsg: e.message});
}
}} />
);
case 'FORGIVENESS':
const boughtSinsForSelf = this.state.account === this.state.sinRecipient;
console.log(boughtSinsForSelf);
console.log(this.state.account);
console.log(this.state.sinRecipient);
return (
<Forgiveness
tx={ this.state.tx }
web3={ this.props.web3 }
boughtSinsForSelf={ boughtSinsForSelf }
sinRecipient={ this.state.sinRecipient }
tokenAmount={ this.state.sinValueETH * this.state.sinRate } />
);
}
}
const ConfessionNav = () => {
return(
<div className='icon-wrapper'>
<Icon
onClick={ () => this.changeActiveView('CONFESS_SIN') }
name={ this.state.activeView === 'CONFESS_SIN' ? 'circle' : 'circle thin'} />
<Icon
onClick={ () => this.changeActiveView('VALUE_SIN') }
name={ this.state.activeView === 'VALUE_SIN' ? 'circle' : 'circle thin'} />
<Icon
onClick={ () => this.changeActiveView('PURCHASE_SIN') }
name={
(this.state.activeView === 'PURCHASE_SIN'
|| this.state.activeView === 'FORGIVENESS') ? 'circle' : 'circle thin'} />
</div>
);
}
return(
<Container className='confess-wrapper'>
<TermsAndConditionsModal />
<div className='confess-top'>
<Link className='arrow-link exit-link' to='/'><Icon name='long arrow left' /> Exit Confession</Link>
</div>
<div className='confess-content'>
{ showActiveView() }
</div>
<div className='confess-footer'>
{ ConfessionNav() }
</div>
</Container>
);
}
} | random_line_split | |
Confess.js | import React, { Component } from 'react';
import {
Container,
Icon,
} from 'semantic-ui-react';
import { Link } from 'react-router-dom';
import Tetzel from '../build/contracts/Tetzel.json';
import TetzelCrowdsale from '../build/contracts/TetzelCrowdsale.json';
import TermsAndConditionsModal from './components/TermsAndConditionsModal';
import ConfessSin from './components/ConfessSin';
import ValueSin from './components/ValueSin';
import PurchaseSin from './components/PurchaseSin';
import Forgiveness from './components/Forgiveness';
import './css/confess.css';
export default class Confess extends Component {
constructor(props) {
super(props);
this.state = {
errorMsg: 'Confession has ended. You can still view the confessional but will be unable to confess.',
tx: null,
tetzelInstance: null,
tetzelAddress: 'Loading...',
tetzelCoinAddress: 'Loading...',
account: 'Loading...',
sinText: '',
sinValueUSD: 0,
sinValueETH: 0,
sinRate: 500,
sinRecipient: '',
testSinValues: [0, 0, 0],
ethSpotPrice: null,
pending: false,
activeView: 'CONFESS_SIN',
};
}
async componentWillMount() {
if (this.props.web3) {
try {
// We don't need to worry about fetching accounts now that
// the confessional has closed. This would be otherwise uncommented.
// await this.fetchAccount();
await this.instantiateContracts();
await this.fetchEtherPrice();
} catch(e) {
this.setState({errorMsg: e.message});
}
}
}
async fetchAccount() {
const errorMsg = 'We couldn\'t find an Ethereum account. ' +
'Please check MetaMask or your Web3 provider. ' +
'You can still view the confessional but will not ' +
'be able to confess.';
try {
var accounts = await this._getAccountsPromise();
} catch(e) {
this.setState({errorMsg: errorMsg});
}
if (accounts.length === 0) {
this.setState({errorMsg: errorMsg});
} else {
// Initial SIN recipient is the current account by default
this.setState({account: accounts[0], sinRecipient: accounts[0]});
}
}
_getAccountsPromise() {
var self = this;
return new Promise(function (resolve, reject) {
self.props.web3.eth.getAccounts(function (e, accounts) {
if (e !== null) {
reject(e);
} else {
resolve(accounts);
}
});
});
}
async fetchEtherPrice() {
const priceUrl = "https://api.coinbase.com/v2/prices/ETH-USD/spot";
let response = await fetch(priceUrl);
let responseJSON = await response.json();
this.setState({ethSpotPrice: parseFloat(responseJSON.data.amount)});
}
async instantiateContracts() {
const contract = require('truffle-contract');
const tetzel = contract(Tetzel);
const tetzelCrowdsale = contract(TetzelCrowdsale);
tetzel.setProvider(this.props.web3.currentProvider)
tetzelCrowdsale.setProvider(this.props.web3.currentProvider)
var tetzelInstance = await tetzel.deployed();
var tetzelCrowdsaleInstance = await tetzelCrowdsale.deployed();
var tetzelCoinAddress = await tetzelCrowdsaleInstance.token();
this.setState({
tetzelInstance: tetzelInstance,
tetzelAddress: tetzelInstance.address,
tetzelCoinAddress: tetzelCoinAddress
});
}
sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async purchase() {
var sinValue = parseFloat(parseFloat(this.state.sinValueETH).toFixed(8));
var isValidSin = this.state.sinText.length > 0;
var isValidPayment = typeof sinValue === 'number' && sinValue > 0;
var isValidAddress = this.props.web3.isAddress(this.state.sinRecipient);
if (!isValidSin) {
throw new Error('Your sin is not valid. Please confess before buying SIN tokens.');
}
if (!isValidPayment) {
throw new Error('Please enter a payment amount greater than 0.');
}
if (!isValidAddress) {
throw new Error('Please enter a valid Ethereum address.');
}
try {
const txHash = await this.state.tetzelInstance.confess.sendTransaction(
this.state.sinRecipient,
this.state.sinText,
{
from: this.state.account,
value: this.props.web3.toWei(sinValue, 'ether'),
gas: 300000,
}
);
this.setState({tx: txHash, pending: true});
var isSuccess = false;
var count = 0;
while(!isSuccess && count < 24) {
var txReceipt = await this.getTransactionReceipt(txHash);
if (txReceipt !== null) {
isSuccess = this.checkTxSuccess(txReceipt);
if (isSuccess) break;
}
count += 1;
await this.sleep(5000);
}
var txStatus;
if (isSuccess) {
txStatus = {complete: true, msg: ''};
} else {
throw new Error('Transaction failed. Please check Etherscan for more details. Transaction hash: ' + this.state.tx);
}
} catch(e) {
txStatus = {complete: false, msg: e.message};
} finally {
this.setState({pending: false});
}
return txStatus;
}
/*
Checks whether or not a transaction succeeded by looking for an event (`Confess`)
triggered by the Tetzel contract. We need to do this because there's no way to tell
the difference between a transaction that failed due to out of gas errors
on internal transactions but is still successfully mined and a successful
transaction.
*/
checkTxSuccess(txReceipt) {
return txReceipt.logs.reduce((acc, logObj) => {
return logObj.address === this.state.tetzelAddress || acc
}, false);
}
/*
Promisified version of web3's getTransactionReceipt
*/
getTransactionReceipt(txHash) {
var self = this;
return new Promise(function (resolve, reject) {
self.props.web3.eth.getTransactionReceipt(txHash, function (err, result) {
if (err !== null) {
reject(err);
} else {
resolve(result);
}
});
});
}
updateSinValue(val, unit) {
if (unit === 'USD') {
this.setState({
sinValueUSD: val,
sinValueETH: val / this.state.ethSpotPrice
});
} else if (unit === 'ETH') {
this.setState({
sinValueUSD: val * this.state.ethSpotPrice,
sinValueETH: val
});
} else {
throw new Error("Invalid unit for updateSinValue");
}
}
updateSinText(txt) {
this.setState({sinText: txt});
}
validateSinText() |
handleInvalidSinText() {
this.setState({errorMsg: 'Please confess before moving on.'});
}
updateTestSinValues(idx, val) {
var newTestSinValues = this.state.testSinValues.slice();
newTestSinValues[idx] = val;
this.setState({testSinValues: newTestSinValues});
}
updateSinRecipient(val) {
this.setState({sinRecipient: val});
}
changeActiveView(nextView) {
const validViews = ['CONFESS_SIN', 'VALUE_SIN', 'PURCHASE_SIN', 'FORGIVENESS'];
if (validViews.indexOf(nextView) === -1) {
throw new Error('Invalid view');
}
// Validating sin text input shouldn't have to occur here, but it does
// with the way this is currently set up
// TODO: Refactor this
if (this.state.activeView === 'CONFESS_SIN' && !this.validateSinText()) {
this.handleInvalidSinText();
} else {
this.setState({activeView: nextView, errorMsg: ''});
}
}
render() {
const showActiveView = () => {
switch(this.state.activeView) {
case 'CONFESS_SIN':
return (
<ConfessSin
errorMsg={ this.state.errorMsg }
sinText={ this.state.sinText }
updateSinText={ this.updateSinText.bind(this) }
onNext={ () => this.changeActiveView('VALUE_SIN') } />
);
case 'VALUE_SIN':
return (
<ValueSin
sinText={ this.state.sinText }
sinValueUSD={ this.state.sinValueUSD }
testSinValues={ this.state.testSinValues }
updateSinValue={ this.updateSinValue.bind(this) }
updateTestSinValues={ this.updateTestSinValues.bind(this) }
onNext={ () => this.changeActiveView('PURCHASE_SIN') } />
);
case 'PURCHASE_SIN':
return (
<PurchaseSin
errorMsg={ this.state.errorMsg }
tetzelAddress={ this.state.tetzelAddress }
sinRate={ this.state.sinRate }
sinValueETH={ this.state.sinValueETH }
sinText={ this.state.sinText }
sinRecipient={ this.state.sinRecipient }
ethSpotPrice={ this.state.ethSpotPrice }
updateSinRecipient={ this.updateSinRecipient.bind(this) }
updateSinValue={ this.updateSinValue.bind(this) }
pending={ this.state.pending }
tx={ this.state.tx }
onPurchase={ async () => {
this.setState({errorMsg: ''});
try {
let responseObj = await this.purchase();
if (responseObj.complete) {
this.changeActiveView('FORGIVENESS');
} else {
throw new Error(responseObj.msg);
}
} catch(e) {
this.setState({errorMsg: e.message});
}
}} />
);
case 'FORGIVENESS':
const boughtSinsForSelf = this.state.account === this.state.sinRecipient;
console.log(boughtSinsForSelf);
console.log(this.state.account);
console.log(this.state.sinRecipient);
return (
<Forgiveness
tx={ this.state.tx }
web3={ this.props.web3 }
boughtSinsForSelf={ boughtSinsForSelf }
sinRecipient={ this.state.sinRecipient }
tokenAmount={ this.state.sinValueETH * this.state.sinRate } />
);
}
}
const ConfessionNav = () => {
return(
<div className='icon-wrapper'>
<Icon
onClick={ () => this.changeActiveView('CONFESS_SIN') }
name={ this.state.activeView === 'CONFESS_SIN' ? 'circle' : 'circle thin'} />
<Icon
onClick={ () => this.changeActiveView('VALUE_SIN') }
name={ this.state.activeView === 'VALUE_SIN' ? 'circle' : 'circle thin'} />
<Icon
onClick={ () => this.changeActiveView('PURCHASE_SIN') }
name={
(this.state.activeView === 'PURCHASE_SIN'
|| this.state.activeView === 'FORGIVENESS') ? 'circle' : 'circle thin'} />
</div>
);
}
return(
<Container className='confess-wrapper'>
<TermsAndConditionsModal />
<div className='confess-top'>
<Link className='arrow-link exit-link' to='/'><Icon name='long arrow left' /> Exit Confession</Link>
</div>
<div className='confess-content'>
{ showActiveView() }
</div>
<div className='confess-footer'>
{ ConfessionNav() }
</div>
</Container>
);
}
}
| {
return this.state.sinText.length > 0;
} | identifier_body |
Confess.js | import React, { Component } from 'react';
import {
Container,
Icon,
} from 'semantic-ui-react';
import { Link } from 'react-router-dom';
import Tetzel from '../build/contracts/Tetzel.json';
import TetzelCrowdsale from '../build/contracts/TetzelCrowdsale.json';
import TermsAndConditionsModal from './components/TermsAndConditionsModal';
import ConfessSin from './components/ConfessSin';
import ValueSin from './components/ValueSin';
import PurchaseSin from './components/PurchaseSin';
import Forgiveness from './components/Forgiveness';
import './css/confess.css';
export default class Confess extends Component {
constructor(props) {
super(props);
this.state = {
errorMsg: 'Confession has ended. You can still view the confessional but will be unable to confess.',
tx: null,
tetzelInstance: null,
tetzelAddress: 'Loading...',
tetzelCoinAddress: 'Loading...',
account: 'Loading...',
sinText: '',
sinValueUSD: 0,
sinValueETH: 0,
sinRate: 500,
sinRecipient: '',
testSinValues: [0, 0, 0],
ethSpotPrice: null,
pending: false,
activeView: 'CONFESS_SIN',
};
}
async componentWillMount() {
if (this.props.web3) {
try {
// We don't need to worry about fetching accounts now that
// the confessional has closed. This would be otherwise uncommented.
// await this.fetchAccount();
await this.instantiateContracts();
await this.fetchEtherPrice();
} catch(e) {
this.setState({errorMsg: e.message});
}
}
}
async fetchAccount() {
const errorMsg = 'We couldn\'t find an Ethereum account. ' +
'Please check MetaMask or your Web3 provider. ' +
'You can still view the confessional but will not ' +
'be able to confess.';
try {
var accounts = await this._getAccountsPromise();
} catch(e) {
this.setState({errorMsg: errorMsg});
}
if (accounts.length === 0) {
this.setState({errorMsg: errorMsg});
} else {
// Initial SIN recipient is the current account by default
this.setState({account: accounts[0], sinRecipient: accounts[0]});
}
}
_getAccountsPromise() {
var self = this;
return new Promise(function (resolve, reject) {
self.props.web3.eth.getAccounts(function (e, accounts) {
if (e !== null) {
reject(e);
} else {
resolve(accounts);
}
});
});
}
async fetchEtherPrice() {
const priceUrl = "https://api.coinbase.com/v2/prices/ETH-USD/spot";
let response = await fetch(priceUrl);
let responseJSON = await response.json();
this.setState({ethSpotPrice: parseFloat(responseJSON.data.amount)});
}
async instantiateContracts() {
const contract = require('truffle-contract');
const tetzel = contract(Tetzel);
const tetzelCrowdsale = contract(TetzelCrowdsale);
tetzel.setProvider(this.props.web3.currentProvider)
tetzelCrowdsale.setProvider(this.props.web3.currentProvider)
var tetzelInstance = await tetzel.deployed();
var tetzelCrowdsaleInstance = await tetzelCrowdsale.deployed();
var tetzelCoinAddress = await tetzelCrowdsaleInstance.token();
this.setState({
tetzelInstance: tetzelInstance,
tetzelAddress: tetzelInstance.address,
tetzelCoinAddress: tetzelCoinAddress
});
}
sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
async purchase() {
var sinValue = parseFloat(parseFloat(this.state.sinValueETH).toFixed(8));
var isValidSin = this.state.sinText.length > 0;
var isValidPayment = typeof sinValue === 'number' && sinValue > 0;
var isValidAddress = this.props.web3.isAddress(this.state.sinRecipient);
if (!isValidSin) {
throw new Error('Your sin is not valid. Please confess before buying SIN tokens.');
}
if (!isValidPayment) {
throw new Error('Please enter a payment amount greater than 0.');
}
if (!isValidAddress) {
throw new Error('Please enter a valid Ethereum address.');
}
try {
const txHash = await this.state.tetzelInstance.confess.sendTransaction(
this.state.sinRecipient,
this.state.sinText,
{
from: this.state.account,
value: this.props.web3.toWei(sinValue, 'ether'),
gas: 300000,
}
);
this.setState({tx: txHash, pending: true});
var isSuccess = false;
var count = 0;
while(!isSuccess && count < 24) {
var txReceipt = await this.getTransactionReceipt(txHash);
if (txReceipt !== null) {
isSuccess = this.checkTxSuccess(txReceipt);
if (isSuccess) break;
}
count += 1;
await this.sleep(5000);
}
var txStatus;
if (isSuccess) {
txStatus = {complete: true, msg: ''};
} else {
throw new Error('Transaction failed. Please check Etherscan for more details. Transaction hash: ' + this.state.tx);
}
} catch(e) {
txStatus = {complete: false, msg: e.message};
} finally {
this.setState({pending: false});
}
return txStatus;
}
/*
Checks whether or not a transaction succeeded by looking for an event (`Confess`)
triggered by the Tetzel contract. We need to do this because there's no way to tell
the difference between a transaction that failed due to out of gas errors
on internal transactions but is still successfully mined and a successful
transaction.
*/
checkTxSuccess(txReceipt) {
return txReceipt.logs.reduce((acc, logObj) => {
return logObj.address === this.state.tetzelAddress || acc
}, false);
}
/*
Promisified version of web3's getTransactionReceipt
*/
getTransactionReceipt(txHash) {
var self = this;
return new Promise(function (resolve, reject) {
self.props.web3.eth.getTransactionReceipt(txHash, function (err, result) {
if (err !== null) {
reject(err);
} else {
resolve(result);
}
});
});
}
updateSinValue(val, unit) {
if (unit === 'USD') {
this.setState({
sinValueUSD: val,
sinValueETH: val / this.state.ethSpotPrice
});
} else if (unit === 'ETH') {
this.setState({
sinValueUSD: val * this.state.ethSpotPrice,
sinValueETH: val
});
} else {
throw new Error("Invalid unit for updateSinValue");
}
}
updateSinText(txt) {
this.setState({sinText: txt});
}
validateSinText() {
return this.state.sinText.length > 0;
}
handleInvalidSinText() {
this.setState({errorMsg: 'Please confess before moving on.'});
}
updateTestSinValues(idx, val) {
var newTestSinValues = this.state.testSinValues.slice();
newTestSinValues[idx] = val;
this.setState({testSinValues: newTestSinValues});
}
updateSinRecipient(val) {
this.setState({sinRecipient: val});
}
changeActiveView(nextView) {
const validViews = ['CONFESS_SIN', 'VALUE_SIN', 'PURCHASE_SIN', 'FORGIVENESS'];
if (validViews.indexOf(nextView) === -1) {
throw new Error('Invalid view');
}
// Validating sin text input shouldn't have to occur here, but it does
// with the way this is currently set up
// TODO: Refactor this
if (this.state.activeView === 'CONFESS_SIN' && !this.validateSinText()) {
this.handleInvalidSinText();
} else {
this.setState({activeView: nextView, errorMsg: ''});
}
}
| () {
const showActiveView = () => {
switch(this.state.activeView) {
case 'CONFESS_SIN':
return (
<ConfessSin
errorMsg={ this.state.errorMsg }
sinText={ this.state.sinText }
updateSinText={ this.updateSinText.bind(this) }
onNext={ () => this.changeActiveView('VALUE_SIN') } />
);
case 'VALUE_SIN':
return (
<ValueSin
sinText={ this.state.sinText }
sinValueUSD={ this.state.sinValueUSD }
testSinValues={ this.state.testSinValues }
updateSinValue={ this.updateSinValue.bind(this) }
updateTestSinValues={ this.updateTestSinValues.bind(this) }
onNext={ () => this.changeActiveView('PURCHASE_SIN') } />
);
case 'PURCHASE_SIN':
return (
<PurchaseSin
errorMsg={ this.state.errorMsg }
tetzelAddress={ this.state.tetzelAddress }
sinRate={ this.state.sinRate }
sinValueETH={ this.state.sinValueETH }
sinText={ this.state.sinText }
sinRecipient={ this.state.sinRecipient }
ethSpotPrice={ this.state.ethSpotPrice }
updateSinRecipient={ this.updateSinRecipient.bind(this) }
updateSinValue={ this.updateSinValue.bind(this) }
pending={ this.state.pending }
tx={ this.state.tx }
onPurchase={ async () => {
this.setState({errorMsg: ''});
try {
let responseObj = await this.purchase();
if (responseObj.complete) {
this.changeActiveView('FORGIVENESS');
} else {
throw new Error(responseObj.msg);
}
} catch(e) {
this.setState({errorMsg: e.message});
}
}} />
);
case 'FORGIVENESS':
const boughtSinsForSelf = this.state.account === this.state.sinRecipient;
console.log(boughtSinsForSelf);
console.log(this.state.account);
console.log(this.state.sinRecipient);
return (
<Forgiveness
tx={ this.state.tx }
web3={ this.props.web3 }
boughtSinsForSelf={ boughtSinsForSelf }
sinRecipient={ this.state.sinRecipient }
tokenAmount={ this.state.sinValueETH * this.state.sinRate } />
);
}
}
const ConfessionNav = () => {
return(
<div className='icon-wrapper'>
<Icon
onClick={ () => this.changeActiveView('CONFESS_SIN') }
name={ this.state.activeView === 'CONFESS_SIN' ? 'circle' : 'circle thin'} />
<Icon
onClick={ () => this.changeActiveView('VALUE_SIN') }
name={ this.state.activeView === 'VALUE_SIN' ? 'circle' : 'circle thin'} />
<Icon
onClick={ () => this.changeActiveView('PURCHASE_SIN') }
name={
(this.state.activeView === 'PURCHASE_SIN'
|| this.state.activeView === 'FORGIVENESS') ? 'circle' : 'circle thin'} />
</div>
);
}
return(
<Container className='confess-wrapper'>
<TermsAndConditionsModal />
<div className='confess-top'>
<Link className='arrow-link exit-link' to='/'><Icon name='long arrow left' /> Exit Confession</Link>
</div>
<div className='confess-content'>
{ showActiveView() }
</div>
<div className='confess-footer'>
{ ConfessionNav() }
</div>
</Container>
);
}
}
| render | identifier_name |
mod.rs | #![warn(missing_docs)]
//! Contains all data structures and method to work with model resources.
//!
//! Model is an isolated scene that is used to create copies of its data - this
//! process is known as `instantiation`. Isolation in this context means that
//! such scene cannot be modified, rendered, etc. It just a data source.
//!
//! All instances will have references to resource they were created from - this
//! will help to get correct vertex and indices buffers when loading a save file,
//! loader will just take all needed data from resource so we don't need to store
//! such data in save file. Also this mechanism works perfectly when you changing
//! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign
//! correct visual data when loading a saved game.
//!
//! # Supported formats
//!
//! Currently only FBX (common format in game industry for storing complex 3d models)
//! and RGS (native Fyroxed format) formats are supported.
use crate::{
animation::Animation,
asset::{
manager::ResourceManager, options::ImportOptions, Resource, ResourceData,
MODEL_RESOURCE_UUID,
},
core::{
algebra::{UnitQuaternion, Vector3},
log::{Log, MessageKind},
pool::Handle,
reflect::prelude::*,
uuid::Uuid,
variable::mark_inheritable_properties_non_modified,
visitor::{Visit, VisitError, VisitResult, Visitor},
TypeUuidProvider,
},
engine::SerializationContext,
resource::fbx::{self, error::FbxError},
scene::{
animation::AnimationPlayer,
graph::{map::NodeHandleMap, Graph},
node::Node,
Scene, SceneLoader,
},
};
use serde::{Deserialize, Serialize};
use std::{
any::Any,
borrow::Cow,
fmt::{Display, Formatter},
path::{Path, PathBuf},
sync::Arc,
};
use strum_macros::{AsRefStr, EnumString, EnumVariantNames};
pub mod loader;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)]
#[repr(u32)]
pub(crate) enum NodeMapping {
UseNames = 0,
UseHandles = 1,
}
/// See module docs.
#[derive(Debug, Visit, Reflect)]
pub struct Model {
pub(crate) path: PathBuf,
#[visit(skip)]
pub(crate) mapping: NodeMapping,
#[visit(skip)]
scene: Scene,
}
impl TypeUuidProvider for Model {
fn type_uuid() -> Uuid {
MODEL_RESOURCE_UUID
}
}
/// Type alias for model resources.
pub type ModelResource = Resource<Model>;
/// Extension trait for model resources.
pub trait ModelResourceExtension: Sized {
/// Tries to instantiate model from given resource.
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap);
/// Tries to instantiate model from given resource.
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>;
/// Instantiates a prefab and places it at specified position and orientation in global coordinates.
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene.
///
/// Animation retargeting allows you to "transfer" animation from a model to a model
/// instance on a scene. Imagine you have a character that should have multiple animations
/// like idle, run, shoot, walk, etc. and you want to store each animation in a separate
/// file. Then when you creating a character on a level you want to have all possible
/// animations assigned to a character, this is where this function comes into play:
/// you just load a model of your character with skeleton, but without any animations,
/// then you load several "models" which have only skeleton with some animation (such
/// "models" can be considered as "animation" resources). After this you need to
/// instantiate model on your level and retarget all animations you need to that instance
/// from other "models". All you have after this is a handle to a model and bunch of
/// handles to specific animations. After this animations can be blended in any combinations
/// you need to. For example idle animation can be blended with walk animation when your
/// character starts walking.
///
/// # Notes
///
/// Most of the 3d model formats can contain only one animation, so in most cases
/// this function will return vector with only one animation.
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to the specified animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`]
/// component.
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to a first animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`).
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>;
}
impl ModelResourceExtension for ModelResource {
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap) {
let (root, old_to_new) =
model_data
.scene
.graph
.copy_node(handle, dest_graph, &mut |_, _| true);
// Notify instantiated nodes about resource they were created from.
let mut stack = vec![root];
while let Some(node_handle) = stack.pop() {
let node = &mut dest_graph[node_handle];
node.resource = Some(model.clone());
// Reset resource instance root flag, this is needed because a node after instantiation cannot
// be a root anymore.
node.is_resource_instance_root = false;
// Reset inheritable properties, so property inheritance system will take properties
// from parent objects on resolve stage.
node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node));
// Continue on children.
stack.extend_from_slice(node.children());
}
// Fill original handles to instances.
for (&old, &new) in old_to_new.inner().iter() {
dest_graph[new].original_handle_in_resource = old;
}
dest_graph.update_hierarchical_data_for_descendants(root);
(root, old_to_new)
}
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> {
let data = self.data_ref();
let instance_root = Self::instantiate_from(
self.clone(),
&data,
data.scene.graph.get_root(),
&mut dest_scene.graph,
)
.0;
dest_scene.graph[instance_root].is_resource_instance_root = true;
std::mem::drop(data);
instance_root
}
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
let root = self.instantiate(scene);
scene.graph[root]
.local_transform_mut()
.set_position(position)
.set_rotation(orientation);
scene.graph.update_hierarchical_data_for_descendants(root);
root
}
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> {
let mut retargetted_animations = Vec::new();
let data = self.data_ref();
for src_node_ref in data.scene.graph.linear_iter() {
if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() {
for src_anim in src_player.animations().iter() {
let mut anim_copy = src_anim.clone();
// Remap animation track nodes from resource to instance. This is required
// because we've made a plain copy and it has tracks with node handles mapped
// to nodes of internal scene.
for (i, ref_track) in src_anim.tracks().iter().enumerate() {
let ref_node = &data.scene.graph[ref_track.target()];
let track = &mut anim_copy.tracks_mut()[i];
// Find instantiated node that corresponds to node in resource
match graph.find_by_name(root, ref_node.name()) {
Some((instance_node, _)) => {
// One-to-one track mapping so there is [i] indexing.
track.set_target(instance_node);
}
None => {
track.set_target(Handle::NONE);
Log::writeln(
MessageKind::Error,
format!(
"Failed to retarget animation {:?} for node {}",
data.path(),
ref_node.name()
),
);
}
}
}
retargetted_animations.push(anim_copy);
}
}
}
retargetted_animations
}
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>> {
let mut animation_handles = Vec::new();
let animations = self.retarget_animations_directly(root, graph);
let dest_animation_player = graph[dest_animation_player]
.query_component_mut::<AnimationPlayer>()
.unwrap();
for animation in animations {
animation_handles.push(dest_animation_player.animations_mut().add(animation));
}
animation_handles
}
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> {
if let Some((animation_player, _)) = graph.find(root, &mut |n| {
n.query_component_ref::<AnimationPlayer>().is_some()
}) {
self.retarget_animations_to_player(root, animation_player, graph)
} else {
Default::default()
}
}
}
impl ResourceData for Model {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path)
}
fn set_path(&mut self, path: PathBuf) {
self.path = path;
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn type_uuid(&self) -> Uuid {
<Self as TypeUuidProvider>::type_uuid()
}
}
impl Default for Model {
fn default() -> Self {
Self {
path: PathBuf::new(),
mapping: NodeMapping::UseNames,
scene: Scene::new(),
}
}
}
/// Defines a way of searching materials when loading a model resource from foreign file format such as FBX.
///
/// # Motivation
///
/// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths,
/// which makes it impossible to use with "location-independent" application like games. To fix that issue, the
/// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping
/// everything but file name from an external resource's path, then it uses one of the following methods to find
/// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`]
/// method, or even use global search starting from the working directory of your game
/// ([`MaterialSearchOptions::WorkingDirectory`])
#[derive(
Clone,
Debug,
Visit,
PartialEq,
Eq,
Deserialize,
Serialize,
Reflect,
AsRefStr,
EnumString,
EnumVariantNames,
)]
pub enum MaterialSearchOptions {
/// Search in specified materials directory. It is suitable for cases when
/// your model resource use shared textures.
///
/// # Platform specific
///
/// Works on every platform.
MaterialsDirectory(PathBuf),
/// Recursive-up search. It is suitable for cases when textures are placed
/// near your model resource. This is **default** option.
///
/// # Platform specific
///
/// Works on every platform.
RecursiveUp,
/// Global search starting from working directory. Slowest option with a lot of ambiguities -
/// it may load unexpected file in cases when there are two or more files with same name but
/// lying in different directories.
///
/// # Platform specific
///
/// WebAssembly - **not supported** due to lack of file system.
WorkingDirectory,
/// Try to use paths stored in the model resource directly. This options has limited usage,
/// it is suitable to load animations, or any other model which does not have any materials.
///
/// # Important notes
///
/// RGS (native engine scenes) files should be loaded with this option by default, otherwise
/// the engine won't be able to correctly find materials.
UsePathDirectly,
}
impl Default for MaterialSearchOptions {
fn default() -> Self {
Self::RecursiveUp
}
}
impl MaterialSearchOptions {
/// A helper to create MaterialsDirectory variant.
pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self { | /// A set of options that will be applied to a model resource when loading it from external source.
///
/// # Details
///
/// The engine has a convenient way of storing import options in a `.options` files. For example you may
/// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options`
/// extension: `foo.fbx.options`. The content of an options file could be something like this:
///
/// ```text
/// (
/// material_search_options: RecursiveUp
/// )
/// ```
///
/// Check documentation of the field of the structure for more info about each parameter.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)]
pub struct ModelImportOptions {
/// See [`MaterialSearchOptions`] docs for more info.
#[serde(default)]
pub material_search_options: MaterialSearchOptions,
}
impl ImportOptions for ModelImportOptions {}
/// All possible errors that may occur while trying to load model from some
/// data source.
#[derive(Debug)]
pub enum ModelLoadError {
/// An error occurred while reading a data source.
Visit(VisitError),
/// Format is not supported.
NotSupported(String),
/// An error occurred while loading FBX file.
Fbx(FbxError),
}
impl Display for ModelLoadError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ModelLoadError::Visit(v) => {
write!(f, "An error occurred while reading a data source {v:?}")
}
ModelLoadError::NotSupported(v) => {
write!(f, "Model format is not supported: {v}")
}
ModelLoadError::Fbx(v) => v.fmt(f),
}
}
}
impl From<FbxError> for ModelLoadError {
fn from(fbx: FbxError) -> Self {
ModelLoadError::Fbx(fbx)
}
}
impl From<VisitError> for ModelLoadError {
fn from(e: VisitError) -> Self {
ModelLoadError::Visit(e)
}
}
impl Model {
pub(crate) async fn load<P: AsRef<Path>>(
path: P,
serialization_context: Arc<SerializationContext>,
resource_manager: ResourceManager,
model_import_options: ModelImportOptions,
) -> Result<Self, ModelLoadError> {
let extension = path
.as_ref()
.extension()
.unwrap_or_default()
.to_string_lossy()
.as_ref()
.to_lowercase();
let (scene, mapping) = match extension.as_ref() {
"fbx" => {
let mut scene = Scene::new();
if let Some(filename) = path.as_ref().file_name() {
let root = scene.graph.get_root();
scene.graph[root].set_name(&filename.to_string_lossy());
}
fbx::load_to_scene(
&mut scene,
resource_manager,
path.as_ref(),
&model_import_options,
)
.await?;
// Set NodeMapping::UseNames as mapping here because FBX does not have
// any persistent unique ids, and we have to use names.
(scene, NodeMapping::UseNames)
}
// Scene can be used directly as model resource. Such scenes can be created in
// Fyroxed.
"rgs" => (
SceneLoader::from_file(
path.as_ref(),
serialization_context,
resource_manager.clone(),
)
.await?
.finish()
.await,
NodeMapping::UseHandles,
),
// TODO: Add more formats.
_ => {
return Err(ModelLoadError::NotSupported(format!(
"Unsupported model resource format: {}",
extension
)))
}
};
Ok(Self {
path: path.as_ref().to_owned(),
scene,
mapping,
})
}
/// Returns shared reference to internal scene, there is no way to obtain
/// mutable reference to inner scene because resource is immutable source
/// of data.
pub fn get_scene(&self) -> &Scene {
&self.scene
}
/// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a
/// handle and a reference to the found node. If nothing is found, it returns [`None`].
pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> {
self.scene.graph.find_by_name_from_root(name)
}
pub(crate) fn get_scene_mut(&mut self) -> &mut Scene {
&mut self.scene
}
} | Self::MaterialsDirectory(path.as_ref().to_path_buf())
}
}
| random_line_split |
mod.rs | #![warn(missing_docs)]
//! Contains all data structures and method to work with model resources.
//!
//! Model is an isolated scene that is used to create copies of its data - this
//! process is known as `instantiation`. Isolation in this context means that
//! such scene cannot be modified, rendered, etc. It just a data source.
//!
//! All instances will have references to resource they were created from - this
//! will help to get correct vertex and indices buffers when loading a save file,
//! loader will just take all needed data from resource so we don't need to store
//! such data in save file. Also this mechanism works perfectly when you changing
//! resource in external editor (3Ds max, Maya, Blender, etc.) engine will assign
//! correct visual data when loading a saved game.
//!
//! # Supported formats
//!
//! Currently only FBX (common format in game industry for storing complex 3d models)
//! and RGS (native Fyroxed format) formats are supported.
use crate::{
animation::Animation,
asset::{
manager::ResourceManager, options::ImportOptions, Resource, ResourceData,
MODEL_RESOURCE_UUID,
},
core::{
algebra::{UnitQuaternion, Vector3},
log::{Log, MessageKind},
pool::Handle,
reflect::prelude::*,
uuid::Uuid,
variable::mark_inheritable_properties_non_modified,
visitor::{Visit, VisitError, VisitResult, Visitor},
TypeUuidProvider,
},
engine::SerializationContext,
resource::fbx::{self, error::FbxError},
scene::{
animation::AnimationPlayer,
graph::{map::NodeHandleMap, Graph},
node::Node,
Scene, SceneLoader,
},
};
use serde::{Deserialize, Serialize};
use std::{
any::Any,
borrow::Cow,
fmt::{Display, Formatter},
path::{Path, PathBuf},
sync::Arc,
};
use strum_macros::{AsRefStr, EnumString, EnumVariantNames};
pub mod loader;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Reflect)]
#[repr(u32)]
pub(crate) enum NodeMapping {
UseNames = 0,
UseHandles = 1,
}
/// See module docs.
#[derive(Debug, Visit, Reflect)]
pub struct Model {
pub(crate) path: PathBuf,
#[visit(skip)]
pub(crate) mapping: NodeMapping,
#[visit(skip)]
scene: Scene,
}
impl TypeUuidProvider for Model {
fn type_uuid() -> Uuid {
MODEL_RESOURCE_UUID
}
}
/// Type alias for model resources.
pub type ModelResource = Resource<Model>;
/// Extension trait for model resources.
pub trait ModelResourceExtension: Sized {
/// Tries to instantiate model from given resource.
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap);
/// Tries to instantiate model from given resource.
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node>;
/// Instantiates a prefab and places it at specified position and orientation in global coordinates.
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene.
///
/// Animation retargeting allows you to "transfer" animation from a model to a model
/// instance on a scene. Imagine you have a character that should have multiple animations
/// like idle, run, shoot, walk, etc. and you want to store each animation in a separate
/// file. Then when you creating a character on a level you want to have all possible
/// animations assigned to a character, this is where this function comes into play:
/// you just load a model of your character with skeleton, but without any animations,
/// then you load several "models" which have only skeleton with some animation (such
/// "models" can be considered as "animation" resources). After this you need to
/// instantiate model on your level and retarget all animations you need to that instance
/// from other "models". All you have after this is a handle to a model and bunch of
/// handles to specific animations. After this animations can be blended in any combinations
/// you need to. For example idle animation can be blended with walk animation when your
/// character starts walking.
///
/// # Notes
///
/// Most of the 3d model formats can contain only one animation, so in most cases
/// this function will return vector with only one animation.
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to the specified animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if `dest_animation_player` is invalid handle, or the node does not have [`AnimationPlayer`]
/// component.
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>>;
/// Tries to retarget animations from given model resource to a node hierarchy starting
/// from `root` on a given scene. Unlike [`Self::retarget_animations_directly`], it automatically
/// adds retargetted animations to a first animation player in the hierarchy of given `root`.
///
/// # Panic
///
/// Panics if there's no animation player in the given hierarchy (descendant nodes of `root`).
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>>;
}
impl ModelResourceExtension for ModelResource {
fn instantiate_from(
model: ModelResource,
model_data: &Model,
handle: Handle<Node>,
dest_graph: &mut Graph,
) -> (Handle<Node>, NodeHandleMap) {
let (root, old_to_new) =
model_data
.scene
.graph
.copy_node(handle, dest_graph, &mut |_, _| true);
// Notify instantiated nodes about resource they were created from.
let mut stack = vec![root];
while let Some(node_handle) = stack.pop() {
let node = &mut dest_graph[node_handle];
node.resource = Some(model.clone());
// Reset resource instance root flag, this is needed because a node after instantiation cannot
// be a root anymore.
node.is_resource_instance_root = false;
// Reset inheritable properties, so property inheritance system will take properties
// from parent objects on resolve stage.
node.as_reflect_mut(&mut |node| mark_inheritable_properties_non_modified(node));
// Continue on children.
stack.extend_from_slice(node.children());
}
// Fill original handles to instances.
for (&old, &new) in old_to_new.inner().iter() {
dest_graph[new].original_handle_in_resource = old;
}
dest_graph.update_hierarchical_data_for_descendants(root);
(root, old_to_new)
}
fn instantiate(&self, dest_scene: &mut Scene) -> Handle<Node> {
let data = self.data_ref();
let instance_root = Self::instantiate_from(
self.clone(),
&data,
data.scene.graph.get_root(),
&mut dest_scene.graph,
)
.0;
dest_scene.graph[instance_root].is_resource_instance_root = true;
std::mem::drop(data);
instance_root
}
fn instantiate_at(
&self,
scene: &mut Scene,
position: Vector3<f32>,
orientation: UnitQuaternion<f32>,
) -> Handle<Node> {
let root = self.instantiate(scene);
scene.graph[root]
.local_transform_mut()
.set_position(position)
.set_rotation(orientation);
scene.graph.update_hierarchical_data_for_descendants(root);
root
}
fn retarget_animations_directly(&self, root: Handle<Node>, graph: &Graph) -> Vec<Animation> {
let mut retargetted_animations = Vec::new();
let data = self.data_ref();
for src_node_ref in data.scene.graph.linear_iter() {
if let Some(src_player) = src_node_ref.query_component_ref::<AnimationPlayer>() {
for src_anim in src_player.animations().iter() {
let mut anim_copy = src_anim.clone();
// Remap animation track nodes from resource to instance. This is required
// because we've made a plain copy and it has tracks with node handles mapped
// to nodes of internal scene.
for (i, ref_track) in src_anim.tracks().iter().enumerate() {
let ref_node = &data.scene.graph[ref_track.target()];
let track = &mut anim_copy.tracks_mut()[i];
// Find instantiated node that corresponds to node in resource
match graph.find_by_name(root, ref_node.name()) {
Some((instance_node, _)) => {
// One-to-one track mapping so there is [i] indexing.
track.set_target(instance_node);
}
None => {
track.set_target(Handle::NONE);
Log::writeln(
MessageKind::Error,
format!(
"Failed to retarget animation {:?} for node {}",
data.path(),
ref_node.name()
),
);
}
}
}
retargetted_animations.push(anim_copy);
}
}
}
retargetted_animations
}
fn retarget_animations_to_player(
&self,
root: Handle<Node>,
dest_animation_player: Handle<Node>,
graph: &mut Graph,
) -> Vec<Handle<Animation>> {
let mut animation_handles = Vec::new();
let animations = self.retarget_animations_directly(root, graph);
let dest_animation_player = graph[dest_animation_player]
.query_component_mut::<AnimationPlayer>()
.unwrap();
for animation in animations {
animation_handles.push(dest_animation_player.animations_mut().add(animation));
}
animation_handles
}
fn retarget_animations(&self, root: Handle<Node>, graph: &mut Graph) -> Vec<Handle<Animation>> {
if let Some((animation_player, _)) = graph.find(root, &mut |n| {
n.query_component_ref::<AnimationPlayer>().is_some()
}) {
self.retarget_animations_to_player(root, animation_player, graph)
} else {
Default::default()
}
}
}
impl ResourceData for Model {
fn path(&self) -> Cow<Path> {
Cow::Borrowed(&self.path)
}
fn set_path(&mut self, path: PathBuf) {
self.path = path;
}
fn as_any(&self) -> &dyn Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn type_uuid(&self) -> Uuid {
<Self as TypeUuidProvider>::type_uuid()
}
}
impl Default for Model {
fn default() -> Self {
Self {
path: PathBuf::new(),
mapping: NodeMapping::UseNames,
scene: Scene::new(),
}
}
}
/// Defines a way of searching materials when loading a model resource from foreign file format such as FBX.
///
/// # Motivation
///
/// Most 3d model file formats store paths to external resources (textures and other things) as absolute paths,
/// which makes it impossible to use with "location-independent" application like games. To fix that issue, the
/// engine provides few ways of resolving paths to external resources. The engine starts resolving by stripping
/// everything but file name from an external resource's path, then it uses one of the following methods to find
/// a texture with the file name. It could look up on folders hierarchy by using [`MaterialSearchOptions::RecursiveUp`]
/// method, or even use global search starting from the working directory of your game
/// ([`MaterialSearchOptions::WorkingDirectory`])
#[derive(
Clone,
Debug,
Visit,
PartialEq,
Eq,
Deserialize,
Serialize,
Reflect,
AsRefStr,
EnumString,
EnumVariantNames,
)]
pub enum MaterialSearchOptions {
/// Search in specified materials directory. It is suitable for cases when
/// your model resource use shared textures.
///
/// # Platform specific
///
/// Works on every platform.
MaterialsDirectory(PathBuf),
/// Recursive-up search. It is suitable for cases when textures are placed
/// near your model resource. This is **default** option.
///
/// # Platform specific
///
/// Works on every platform.
RecursiveUp,
/// Global search starting from working directory. Slowest option with a lot of ambiguities -
/// it may load unexpected file in cases when there are two or more files with same name but
/// lying in different directories.
///
/// # Platform specific
///
/// WebAssembly - **not supported** due to lack of file system.
WorkingDirectory,
/// Try to use paths stored in the model resource directly. This options has limited usage,
/// it is suitable to load animations, or any other model which does not have any materials.
///
/// # Important notes
///
/// RGS (native engine scenes) files should be loaded with this option by default, otherwise
/// the engine won't be able to correctly find materials.
UsePathDirectly,
}
impl Default for MaterialSearchOptions {
fn default() -> Self {
Self::RecursiveUp
}
}
impl MaterialSearchOptions {
/// A helper to create MaterialsDirectory variant.
pub fn materials_directory<P: AsRef<Path>>(path: P) -> Self {
Self::MaterialsDirectory(path.as_ref().to_path_buf())
}
}
/// A set of options that will be applied to a model resource when loading it from external source.
///
/// # Details
///
/// The engine has a convenient way of storing import options in a `.options` files. For example you may
/// have a `foo.fbx` 3d model, to change import options create a new file with additional `.options`
/// extension: `foo.fbx.options`. The content of an options file could be something like this:
///
/// ```text
/// (
/// material_search_options: RecursiveUp
/// )
/// ```
///
/// Check documentation of the field of the structure for more info about each parameter.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default, Reflect, Eq)]
pub struct ModelImportOptions {
/// See [`MaterialSearchOptions`] docs for more info.
#[serde(default)]
pub material_search_options: MaterialSearchOptions,
}
impl ImportOptions for ModelImportOptions {}
/// All possible errors that may occur while trying to load model from some
/// data source.
#[derive(Debug)]
pub enum ModelLoadError {
/// An error occurred while reading a data source.
Visit(VisitError),
/// Format is not supported.
NotSupported(String),
/// An error occurred while loading FBX file.
Fbx(FbxError),
}
impl Display for ModelLoadError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ModelLoadError::Visit(v) => |
ModelLoadError::NotSupported(v) => {
write!(f, "Model format is not supported: {v}")
}
ModelLoadError::Fbx(v) => v.fmt(f),
}
}
}
impl From<FbxError> for ModelLoadError {
fn from(fbx: FbxError) -> Self {
ModelLoadError::Fbx(fbx)
}
}
impl From<VisitError> for ModelLoadError {
fn from(e: VisitError) -> Self {
ModelLoadError::Visit(e)
}
}
impl Model {
pub(crate) async fn load<P: AsRef<Path>>(
path: P,
serialization_context: Arc<SerializationContext>,
resource_manager: ResourceManager,
model_import_options: ModelImportOptions,
) -> Result<Self, ModelLoadError> {
let extension = path
.as_ref()
.extension()
.unwrap_or_default()
.to_string_lossy()
.as_ref()
.to_lowercase();
let (scene, mapping) = match extension.as_ref() {
"fbx" => {
let mut scene = Scene::new();
if let Some(filename) = path.as_ref().file_name() {
let root = scene.graph.get_root();
scene.graph[root].set_name(&filename.to_string_lossy());
}
fbx::load_to_scene(
&mut scene,
resource_manager,
path.as_ref(),
&model_import_options,
)
.await?;
// Set NodeMapping::UseNames as mapping here because FBX does not have
// any persistent unique ids, and we have to use names.
(scene, NodeMapping::UseNames)
}
// Scene can be used directly as model resource. Such scenes can be created in
// Fyroxed.
"rgs" => (
SceneLoader::from_file(
path.as_ref(),
serialization_context,
resource_manager.clone(),
)
.await?
.finish()
.await,
NodeMapping::UseHandles,
),
// TODO: Add more formats.
_ => {
return Err(ModelLoadError::NotSupported(format!(
"Unsupported model resource format: {}",
extension
)))
}
};
Ok(Self {
path: path.as_ref().to_owned(),
scene,
mapping,
})
}
/// Returns shared reference to internal scene, there is no way to obtain
/// mutable reference to inner scene because resource is immutable source
/// of data.
pub fn get_scene(&self) -> &Scene {
&self.scene
}
/// Searches for a node in the model, starting from specified node using the specified closure. Returns a tuple with a
/// handle and a reference to the found node. If nothing is found, it returns [`None`].
pub fn find_node_by_name(&self, name: &str) -> Option<(Handle<Node>, &Node)> {
self.scene.graph.find_by_name_from_root(name)
}
pub(crate) fn get_scene_mut(&mut self) -> &mut Scene {
&mut self.scene
}
}
| {
write!(f, "An error occurred while reading a data source {v:?}")
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.